\n for s in soup.findAll('div', {\"class\": \"panel-body\"}):\n answer = \"\"\n # each paragraph is in
\n for p in s.findAll('p'):\n paragraph = p.findAll(text=True)\n if paragraph != []:\n if answer == \"\":\n answer += paragraph[0]\n else:\n answer = answer + \" \" + paragraph[0]\n # scraping youtube video url\n for url in p.find_all('iframe'):\n if answer == \"\":\n answer += url['src']\n else:\n answer = answer + \" \" + url['src']\n # scraping bullet points\n for li in s.findAll('li'):\n list_ = li.findAll(text=True)\n answer = answer + \" \" + list_[0]\n answers.append(answer)\n # for url in soup.find_all('iframe'):\n # print(url['src'])\n # for s in soup.findAll('p'):\n # answers.append(s.findAll(text=True))\n # answers.append(s.findAll(text=True)) for s in soup.findAll('p'))\n # print(answers)\n return (questions, answers)\n\n\n# write data in text files\ndef write_data_txt(scripted_data):\n questions, answers = scripted_data\n # create text file for questions\n questions_txt_file = open(\"questions.text\", \"w\")\n # create text file for answers\n answers_txt_file = open(\"answers.text\", \"w\")\n for question in questions:\n if question != []:\n # to get only question sentense, filters by ?\n if question[0][len(question[0])-1] == \"?\":\n questions_txt_file.write(question[0] + \"\\n\")\n questions_txt_file.close()\n for answer in answers:\n # if answer != []:\n answers_txt_file.write(answer + \"\\n\")\n answers_txt_file.close()\n\n\ndef main():\n scripted_data = loadWord()\n write_data_txt(scripted_data)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"CashBet_webscraping.py","file_name":"CashBet_webscraping.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"387149528","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 28 09:41:18 2021\n\n@author: padma carstens\n\"\"\"\n\nimport os\nfrom os.path import exists\nimport json\nfrom ldcoolp.curation import retrieve\n\n#Enter article id for published articles:this is the also last number in the \"cite\" on data.lib.vt.edu \narticle_id=1234\n#Enter your token below\ntoken='1234'\n#Enter published accession number from the spreadsheet\nPublishedAccessionNumber= \"P123\"\n#Enter requestor name\nRequestor=\"XYZ\"\n#Enter corresponding author name\nCorrespondingAuthor=\"XYZ\"\n#Enter version\nVersion=\"01\"\n#Enter published date in YYYYMMDD format \nDatePublished= \"20211025\" \n\n#Create Publication folder to store dataset\ndata_directory1=f\"{PublishedAccessionNumber}_v{Version}\"\ndata_directory2=f\"{PublishedAccessionNumber}_{Requestor}_{CorrespondingAuthor}_v{Version}_{DatePublished}\"\ndata_directory3=f\"DisseminatedContent\"\ndata_directory_path=os.path.join(data_directory1, data_directory2, data_directory3)\nmetadata_directory_path=f\"{PublishedAccessionNumber}_DownloadedFileMetadata_v{Version}\"\n#-----Download dataset for published article using LD-Cool-P and save it as publication meta data in json file format\npublicfigshare_url='https://api.figshare.com/v2/articles/'+str(article_id)\nfrom figshare.figshare import Figshare\nfs=Figshare(token=token,private=False)\nFileDownload=retrieve.download_files(article_id, fs, data_directory=data_directory_path, metadata_directory=metadata_directory_path)\n#-----get article details for published article using LD-Cool-P and save it as published metadata in json file format\njson_out_file1=f\"{data_directory_path}/{PublishedAccessionNumber}_DisseminatedMetadata.json\"\njson_response1=fs.get_article_details(article_id,version=None)\n\n\nif not os.path.exists(json_out_file1):\n with open(json_out_file1, 'w') as f:\n json.dump(json_response1,f,indent=4)\nelse:\n print(f\"File exists: {json_out_file1}\")\n if overwrite:\n print(\"Overwriting!\")\n with open(json_out_file1, 'w') as f:\n json.dump(json_response1,f,indent=4)\n \n#----------------------create VTCurationServicesActions folder to save provenance log and email correspondence\n \ndata_directory4=f\"VTCurationServicesActions\" \ndata_directory_path2=os.path.join(data_directory1,data_directory2,data_directory4)\nos.mkdir(data_directory_path2)\nprint(\"Directory '% s' created\" % data_directory4) \n","sub_path":"create_publication_bag.py","file_name":"create_publication_bag.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"551483469","text":"# -- coding: utf-8 --\n\n'''\n 代销公募基金基本信息和净值数据\n'''\n\nfrom fundSelect import fundPool\nimport pandas as pd\nfrom datetime import datetime,date\nimport numpy as np\nfrom PrintInfo import PrintInfo\nfrom GetHistoryData.GetProductData import GetProductData\n\nclass SetPortfolio:\n def __init__(self,assetIndex={},backDate=date.today().strftime('%Y-%m-%d')):\n self.dicProduct = fundPool.getFundPool()\n self.getInfoFlag = True\n self.backDate = backDate\n self.assetIndex = assetIndex #大类资产指数\n self.PrintInfoDemo = PrintInfo() # 日志信息模块\n\n #初步过滤基金池,并对基金池归类\n def firstSelect(self, fundInfoDf):\n def dateFormat(tempSe):\n tempList = [tempSe[k].strftime('%Y-%m-%d') for k in tempSe.index.tolist()]\n resutlt = pd.Series(tempList,index=tempSe.index)\n return resutlt\n\n #过滤掉成立日期小于指定日期的基金\n fundInfoDf['FUND_SETUPDATE'] = dateFormat(fundInfoDf['FUND_SETUPDATE'])\n fundDf = fundInfoDf.loc[fundInfoDf['FUND_SETUPDATE']<=self.backDate]\n\n #过滤掉定期开放的基金\n fundDf['nameFlag'] = [name.find(u'定期开放') for name in fundDf['FUND_FULLNAME'].tolist()]\n fundDf = fundDf[fundDf['nameFlag']==-1]\n fundDf.drop(labels=['nameFlag'],axis=1,inplace=True)\n\n #按照基金的二级分类,对基金池划分\n dicFundStyle = {}\n for typeName,tempDf in fundDf.groupby(['FUND_INVESTTYPE']):\n dicFundStyle[typeName] = tempDf\n return dicFundStyle\n\n #再次处理基金池,返回大类对应的产品和基金净值数据\n def secondSelect(self,dicFundDf,fundNetValueUpdateDf):\n dicResult = {}\n # if u'被动指数型基金' in dicFundDf:\n # tempETFDf = dicFundDf[u'被动指数型基金']\n\n dicResult['000016.SH'] =['110020.OF']\n dicResult['000300.SH'] = ['270010.OF']\n dicResult['000905.SH'] = ['162711.OF','110026.OF']\n dicResult['SPX.GI'] = ['270042.OF']\n dicResult['CBA00601.CS'] = ['001021.OF']\n # dicResult['AU9999.SGE'] = ['002610.OF']\n dicResult['AU9999.SGE'] = ['518800.OF']\n\n totalSelectList = []\n for key,value in dicResult.items():\n totalSelectList = totalSelectList+value\n resultDf = fundNetValueUpdateDf[totalSelectList]\n return dicResult,resultDf\n\n #整理净值数据\n def settleFundNetValue(self,fundInfoDf,fundNetValueDf):\n def fifteData(tempSe):\n startDate = fundInfoDf.ix[tempSe.name, 'FUND_SETUPDATE']\n tempSe[tempSe.indexOne pageOne looove\"\"\")\n self.assertEqual(page.parse_media(), [])\n\n def test_media_returns_all_images_attributes(self):\n html = u\"\"\"\n One page
\n Yéâh
\n
\n
\n
\n \n \"\"\"\n page = FlatPage(content=html)\n self.assertEqual(page.parse_media(), [\n {'url': '/media/image1.png', 'title': 'Image 1', 'alt': 'image-1', 'mimetype': ['image', 'png']},\n {'url': '/media/image2.jpg', 'title': '', 'alt': '', 'mimetype': ['image', 'jpeg']}\n ])\n\n def test_flatpages_is_a_link(self):\n html = u\"http://www.makina-corpus.com\"\n page = FlatPage(content=html)\n self.assertEqual(page.link, 'http://www.makina-corpus.com')\n","sub_path":"rando/flatpages/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"59020881","text":"import pandas as pd\nimport numpy as np\nfrom scipy import interp\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import roc_curve, auc\nfrom matplotlib import pyplot as plt\n\n\ndef modeling(feature):\n feature_labels = pd.read_excel(\"../cache/feature_21D_hg18_0507.xls\", sheetname='y').columns[4:, ]\n feature_labels = [feature_label.split(\"E017-\")[1] for feature_label in feature_labels]\n labels = []\n for i in feature_labels:\n if str(i).split('_')[0] not in labels:\n labels.append(str(i).split('_')[0])\n df_y = pd.read_excel(feature, header=None, sheetname='y').fillna(0)\n df_n = pd.read_excel(feature, header=None, sheetname='n').fillna(0)\n df = df_y.append(df_n)\n # df = pd.read_csv(\"../cache/Xcov_Data.csv\", header=None).fillna(0)\n # index = [i for i in range(4, 25)] + [i for i in range(67, 88)] # index = [i for i in range(151, 172)]\n # index = [j for j in range(4 + 21 * i, 4 + 21 * (i + 1))]\n index = [i for i in range(4, df.shape[1])]\n data = df.iloc[:, index]\n data = np.matrix(data)\n target = df.iloc[:, 3]\n target = np.array(target)\n k_flods = StratifiedKFold(n_splits=10, shuffle=True)\n rf = RandomForestClassifier(n_estimators=500)\n tprs = []\n aucs = []\n mean_fpr = np.linspace(0, 1, 200)\n i = 0\n for train_index, test_index in k_flods.split(data, target):\n X_train, X_test = data[train_index], data[test_index]\n y_train, y_test = target[train_index], target[test_index]\n rf.fit(X_train, y_train)\n # 特征重要性\n importance = rf.feature_importances_\n # print(importance)\n # print(feature_labels)\n plt.bar(range(1, X_train.shape[1] + 1), importance, 0.5, align='center')\n plt.xlim(-1, 190)\n for i in np.arange(21.5, 180, 21):\n plt.vlines(i, 0, ymax=0.015, colors='m', linestyles=\"dashed\")\n plt.xticks(range(11, 189, 21), labels, fontsize=20)\n plt.yticks(fontsize=20)\n plt.xlabel('Features Type', fontsize=24)\n plt.ylabel('The Importance of Feature', fontsize=24)\n # plt.rc('xtick', labelsize=1)\n plt.tight_layout()\n plt.title(\"Feature importance\", fontsize=28)\n pixel = plt.gcf()\n pixel.set_size_inches(20, 12)\n pixel.savefig(\"../pic/feature_importance_0531.eps\", format='eps', dpi=1000)\n # plt.show()\n importances = importance.reshape(9, 21)\n plt.matshow(importances, cmap=plt.cm.hot)\n plt.colorbar().set_label(\"The importance of the features\", rotation=270, labelpad=20)\n plt.xticks(range(0, 21), range(-10, 11))\n plt.yticks(range(0, 9), labels)\n pixel = plt.gcf()\n pixel.savefig(\"../pic/feature_importance_0531_pixel.eps\", format='eps', dpi=1000)\n # plt.show()\n exit()\n y_pred_rf = rf.predict_proba(X_test)[:, 1]\n # print(y_pred_rf)\n # exit()\n fpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)\n tprs.append(interp(mean_fpr, fpr_rf, tpr_rf))\n tprs[-1][0] = 0.0\n roc_auc = auc(fpr_rf, tpr_rf)\n aucs.append(roc_auc)\n # plt.plot(fpr_rf, tpr_rf, label='ALL Feature %d, AUC=%0.3f' % (i, roc_auc))\n i += 1\n plt.plot([0, 1], [0, 1], 'k--')\n mean_tpr = np.mean(tprs, axis=0)\n mean_fpr[-1] = 1.0\n mean_auc = auc(mean_fpr, mean_tpr)\n std_auc = np.std(aucs)\n plt.plot(mean_fpr, mean_tpr, label=\"Mean AUC=%0.3f $\\pm$ %0.3f\" % (mean_auc, std_auc))\n std_tpr = np.std(tprs, axis=0)\n tprs_upper = np.minimum(mean_tpr + std_tpr, 1)\n tprs_lower = np.maximum(mean_tpr - std_tpr, 0)\n plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='red', alpha=.2,\n label='$\\pm$ 1 std. dev.')\n plt.xlim([-0.05, 1.05])\n plt.ylim([-0.05, 1.05])\n plt.xlabel('False positive rate')\n plt.ylabel('True positive rate')\n plt.title('ROC curve')\n plt.legend()\n plt.show()\n\n\nif __name__ == \"__main__\":\n modeling(\"../cache/feature_21D_hg18_0506.xls\")\n","sub_path":"src_old/plot_roc_auc.py","file_name":"plot_roc_auc.py","file_ext":"py","file_size_in_byte":4052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"536159879","text":"from flask import Flask, request, escape, render_template\nfrom werkzeug.exceptions import HTTPException, BadRequest\n\nimport random\n\n'''\nNotes on how to run locally:\n\nTo build docker in the local directory use the following and pass a name to call for run:\ndocker build --tag name . \n\nIf no name passed then note the pid and use that to run:\ndocker run -d -p 8080:8080 [name or id]\n\nStop the instance with:\ndocker stop [id - first 4]\n\nDeploy to registry\n'''\napp = Flask(__name__)\n\n@app.route('/', methods=['POST', 'GET'])\n@app.route('/', methods=['POST', 'GET'])\ndef say_hello(name='World'):\n\n color = \"%06x\" % random.randint(0, 0xFFFFFF)\n style = \"style=\\\"background-color:#\" + str(color) + \"\\\"\"\n combined_html = \"Hello \" + name + \"!!!
\"\n\n # Option to simply return hello\n # return 'Hello {}!'.format(escape(name))\n # Option passes the generated html to the page\n return combined_html\n\n@app.errorhandler(404)\ndef not_found(error):\n return \"\"\" 404
\"\"\"\n\n #Option to use template \n #return render_template('home.hml')\n\n@app.errorhandler(BadRequest)\ndef handle_bad_request(e):\n return 'bad request!', 400\n\n@app.errorhandler(HTTPException)\ndef handle_exception(e):\n \"\"\"Return JSON instead of HTML for HTTP errors.\"\"\"\n # start with the correct headers and status code from the error\n response = e.get_response()\n # replace the body with JSON\n response.data = json.dumps({\n \"code\": e.code,\n \"name\": e.name,\n \"description\": e.description,\n })\n response.content_type = \"application/json\"\n return response\n\nif __name__ == '__main__':\n # Use when running locally\n #app.run(host='0.0.0.0', use_reloader=True, debug=True)\n\n app.run(host='0.0.0.0',port=8080, use_reloader=True, debug=True, threaded=True)","sub_path":"python/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"207734417","text":"class Solution:\n def isRectangleCover(self, rectangles):\n area, corners = 0, set()\n a, c = lambda: (X - x) * (Y - y), lambda: {(x, y), (x, Y), (X, y), (X, Y)}\n for x, y, X, Y in rectangles:\n area += a()\n corners ^= c()\n x, y, X, Y = (f(z) for f, z in zip((min, min, max, max), zip(*rectangles)))\n return area == a() and corners == c()\n\n\nif __name__ == \"__main__\":\n sol = Solution()\n print(sol.isRectangleCover([[1, 1, 3, 3], [3, 1, 4, 2], [3, 2, 4, 4], [1, 3, 2, 4], [2, 3, 3, 4]]) is True)\n","sub_path":"Solutions/391. Perfect Rectangle/391.py","file_name":"391.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"634925643","text":"import RPi.GPIO as GPIO\nfrom flask import Flask\nfrom flask import jsonify, request\nimport time\n\napp = Flask(__name__)\n\n@app.route(\"/fetchSensorData\", methods=['POST'])\ndef fetchSensorData():\n\n GPIO.setmode(GPIO.BCM)\n TRIG= int(request.form['TRIG'])\n ECHO = int(request.form['ECHO'])\n\n GPIO.setup(TRIG, GPIO.OUT)\n GPIO.setup(ECHO, GPIO.IN)\n\n GPIO.output(TRIG, False)\n print('waiting for sensor')\n time.sleep(2)\n\n GPIO.output(TRIG, True)\n time.sleep(0.00001)\n GPIO.output(TRIG, False)\n\n pulse_start = 0\n pulse_end = 0\n while GPIO.input(ECHO) == 0:\n pulse_start = time.time()\n\n while(GPIO.input(ECHO) == 1):\n pulse_end = time.time()\n\n pulse_duration = pulse_end - pulse_start\n distance = pulse_duration*17150\n distance = round(distance,2)\n\n GPIO.cleanup()\n \n sensor_data = {\"distance\": distance}\n\n return jsonify(sensor_data)\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=80, debug=True)\n","sub_path":"FlaskCode/sensor_server.py","file_name":"sensor_server.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"590934490","text":"# _*_ coding : utf-8 -*-\n\n\"\"\"\nFlowGraphのtreeファイルから共起頻度を計算\n入力: viob2ファイル\n 各行 : レシピ1stepの文章\n スペース区切りで word/iob2tag\n 出力保存先ディレクトリ\n出力: co_occurrence.pickle\n keywordsのリスト\n 各keywordsの頻度\n 共起頻度を表す行列(インデックスは上記のリスト順) \n\"\"\"\n\nimport os\nimport argparse\nimport glob\nimport itertools\nimport pickle\nimport codecs\nimport pandas as pd\nimport numpy as np\n\n\nRECIPE = 1715343\n\n\ndef parse():\n parser = argparse.ArgumentParser()\n parser.add_argument('input_path', help=u'入力ファイル')\n parser.add_argument('synonym_path', help=u'料理オントロジーファイル')\n parser.add_argument('output_dir', help=u'出力ディレクトリ')\n parser.add_argument('-t', '--tags', help='使用するタグ(tag1,tag2,...)', type=str, default='F,T,D,Q,Ac,Af,Sf,St')\n params = parser.parse_args()\n\n return vars(params)\n\n\ndef read_synonym(synonym_path):\n synonyms = pd.read_csv(synonym_path, delimiter='\\t', header = None, encoding='utf-8')\n idx = list((synonyms.iloc[:, 0] != u'調理器具') & (synonyms.iloc[:, 0] != u'動作'))\n ontology = dict(zip(synonyms.iloc[idx, 2], synonyms.iloc[idx, 1]))\n return ontology\n\n\ndef parse_keywords(line, ontology, used_tags):\n keywords_by_step = []\n for token in line:\n token = token.split('/')\n if len(token) == 2:\n word = token[0]\n iob2_format = token[1].split('-')\n if len(iob2_format) == 2:\n recipe_tag = iob2_format[0]\n iob2_tag = iob2_format[1]\n if recipe_tag in used_tags:\n if iob2_tag == 'B':\n keywords_by_step.append(word)\n elif iob2_tag == 'I':\n keywords_by_step[-1] += word \n # ontologyに入っているもののみ\n keywords_by_step = [ontology[keyword] for keyword in keywords_by_step if keyword in ontology]\n keywords_by_step = list(set(keywords_by_step)) # 重複を除く\n return keywords_by_step\n\n\ndef extract_keywords(line, recipe_no, keywords, tmp_keywords, occur, ontology, used_tags):\n keywords_by_step = []\n line = line.split()\n if len(line) == 0: #レシピ終了\n for kwd in tmp_keywords:\n occur[keywords.index(kwd)] += 1\n tmp_keywords = []\n recipe_no += 1\n else:\n keywords_by_step = parse_keywords(line, ontology, used_tags)\n for kwd in keywords_by_step: # 重複を除く\n if kwd not in keywords: # 初めて登場したレシピ用語\n keywords.append(kwd)\n tmp_keywords.append(kwd)\n occur.append(0)\n elif kwd not in tmp_keywords: # レシピ中で初めて登場したレシピ用語\n tmp_keywords.append(kwd)\n return recipe_no, keywords, tmp_keywords, occur\n\n\ndef count_cooccurrence(line, recipe_no, keywords, tmp_keywords, cooccur, ontology, used_tags):\n keywords_by_step = []\n line = line.split()\n if len(line) == 0:\n for kwd1, kwd2 in itertools.combinations(tmp_keywords, 2):\n if kwd1 in keywords and kwd2 in keywords: \n idx1 = keywords.index(kwd1)\n idx2 = keywords.index(kwd2)\n cooccur[idx1, idx2] += 1\n cooccur[idx2, idx1] += 1\n tmp_keywords = []\n recipe_no += 1\n else:\n keywords_by_step = parse_keywords(line, ontology, used_tags)\n for kwd in keywords_by_step:\n if kwd not in tmp_keywords:\n tmp_keywords.append(kwd) # レシピ中で初めて登場したレシピ用語\n return recipe_no, cooccur, tmp_keywords\n\n\ndef main(params):\n input_path = params['input_path']\n synonym_path = params['synonym_path']\n output_dir = params['output_dir']\n used_tags = params['tags']\n\n used_tags = used_tags.split(',')\n print(used_tags)\n\n tag_str = ''\n for t in used_tags:\n tag_str += t\n\n ontology = read_synonym(synonym_path)\n\n # レシピに出現するキーワードを全て取得\n output_file = os.path.join(output_dir, 'viob2_keywords_%s.pickle' % tag_str)\n if os.path.exists(output_file):\n with open(output_file, 'rb') as fin:\n keywords, occur, recipe_no = pickle.load(fin)\n else:\n keywords = []\n occur = []\n tmp_keywords = []\n recipe_no = 0\n with codecs.open(input_path, 'r', 'utf-8') as fin:\n for line in fin:\n line = line.strip()\n if recipe_no % 1000 == 0:\n print(\"extract keywords... %d \\r\"%recipe_no, end='') \n recipe_no, keywords, tmp_keywords, occur =\\\n extract_keywords(line, recipe_no, keywords, tmp_keywords, occur, ontology, used_tags)\n with open(output_file, 'wb') as fout:\n pickle.dump((keywords, occur, recipe_no), fout, protocol=0)\n\n\n # 行列を拡張していくのはコストが高いのでキーワード数を数えてから共起回数を数える\n i = 0\n tmp_keywords = []\n cooccur = np.zeros((len(keywords), len(keywords)))\n with codecs.open(input_path, 'r', 'utf-8') as fin:\n for line in fin:\n line = line.strip()\n if i % 1000 == 0:\n print(\"count co-occurrence... %d/%d \\r\"%(i, recipe_no), end='') \n i, cooccur, tmp_keywords =\\\n count_cooccurrence(line, i, keywords, tmp_keywords, cooccur, ontology, used_tags)\n\n print (\"keywords:%d \"%len(keywords))\n print (keywords[0:10])\n print (occur[0:10])\n print (\"cooccur \", cooccur.shape)\n print (cooccur[0:10, 0:10])\n\n with open(os.path.join(output_dir, 'viob2_cooccurence_%s.pickle' % tag_str), 'wb') as fout:\n pickle.dump((keywords, occur, cooccur, recipe_no), fout, protocol=0)\n\n\nif __name__ == '__main__':\n params = parse()\n main(params)\n\n","sub_path":"tools/grouping/python3/count_co-occurrence_viob2.py","file_name":"count_co-occurrence_viob2.py","file_ext":"py","file_size_in_byte":6016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"600187710","text":"import requests\nfrom lxml import etree\nimport sys\nimport logging\nfrom texttospeech import TextToSpeech\n \n \nclass PSIReader:\n \n __apiURL = 'http://www.nea.gov.sg/api/WebAPI/?dataset=psi_update&keyref=781CF461BB6606ADEA01E0CAF8B35274602B7580279AFE8F'\n __proxies = {\"http\": 'http://host:port'}\n __xpathTemplate = \"/channel/item/region/id[text()='%s']/../record/reading[@type='NPSI']/@value\"\n \n \n def __init__(self):\n #print __name__\n global logger\n logger = logging.getLogger('PSIReader')\n hdlr = logging.FileHandler('myapp.log')\n formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n logger.addHandler(hdlr)\n logger.setLevel(logging.INFO)\n \n def getPSIUpdates(self):\n \n try:\n r = requests.get(PSIReader.__apiURL, \n verify=False)\n #print r.status_code\n #print r.headers\n xmlvalue = r.content\n r.close()\n return xmlvalue\n except:\n logger.error('Error', exc_info=True)\n \n @staticmethod \n def processXML(xmlvalue, regionCode, location):\n \n try:\n root = etree.fromstring(xmlvalue)\n val = root.xpath(PSIReader.__xpathTemplate % regionCode)\n psival = int(val[0])\n \n message = ''\n if psival >=0 and psival <=50:\n message = 'Good'\n elif psival >=51 and psival <=100:\n message = 'Moderate. You can do Normal activities. Stay Healthy'\n elif psival >=101 and psival <=200:\n message = 'Unhealthy. Take Mask. Drink Lot of Water and minimised outdoor activities'\n elif psival >=200 and psival <=300:\n message = 'Very unhealthy. Take Mask and Stay indoor. Drink Lot of Water'\n elif psival >=300:\n message = 'Hazardous. Take Mask and Stay indoor. Drink Lot of Water'\n else:\n return 'Looks like P S I value is not available from source'\n except:\n logger.error(\"Error: %s\" % sys.exc_info()[0], exc_info=True)\n return 'I am sorry, looks like P S I value is not available from source'\n \n return '%s P S I Value is %s and is %s' % (location, psival,message)\n\n def getPSIMessage(self, regionCode, location):\n xmlValue = self.getPSIUpdates()\n #print xmlValue\n text = self.processXML(xmlValue, regionCode, location)\n #print 'final message %s' % text\n return text\n \nif __name__==\"__main__\":\n \n psiReader = PSIReader()\n xmlValue = psiReader.getPSIUpdates()\n text = psiReader.processXML(xmlValue, 'rNO', 'Singapore North')\n tts = TextToSpeech()\n audioFile = 'psiblue.wav'\n tts.bluemixTTS(text, audioFile)\n #tts.googleTTS(\"I am done for the day google\", audioFile)\n tts.play(audioFile)\n #logger.info(xmlValue)\n print \n #print psiReader.processXML(xmlValue, 'rWE134', 'Singapore West')\n #print processXML(xmlvalue, 'rCE', 'Singapore Central')\n #print processXML(xmlvalue, 'rWE', 'Singapore West')\n #print processXML(xmlvalue, 'rEA', 'Singapore East')\n #print processXML(xmlvalue, 'rSO', 'Singapore South')\n","sub_path":"api/psireader.py","file_name":"psireader.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"581942394","text":"# Merton Jump Diffusion Model, 1976\n# Yves Hilpisch - Python for Finance p. 285 ff.\n# Stochastic Differential Equation on page 285\n# Euler Discretization Scheme page 286\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass JumpDiffusion():\n \"\"\"\n SDE: \n dS_t = (r - r_j)S_t dt + sigma S_t dZ_t + J_t S_t dN_t\n where\n S_t index level at date t\n r constant riskless rhot rate\n r_j defined as lambda * (e^{mu_j + delta**2/2 - 1}) drift correctionf or jump tomaintain risk neutrality\n sigma constant volatility of S\n Z_t Standard Brownian motion\n J_t Jump at date ti with distribution:\n log(1+J_t) approx. N(log(1+mu_j) - delta**2/2, delta**2)\n where N is the cumulative distribution function fo a standard normal random variable\n \"\"\"\n def __init__(self):\n self.S0 = 100.0\n self.r = 0.05\n self.sigma = 0.2\n self.lamb = 0.75\n self.mu = -0.6\n self.delta = 0.25\n self.T = 1.0\n\n def _simulate(self):\n # We need three sets of independent random numbers in order to \n # simulate the jump diffusion\n # Input: tdat, r, startvalue, days, sigma)\n \n M = 10 # Maturity # Default: 50\n I = 1 # Number of Paths # Default: 10000\n dt = self.T/M\n rj = self.lamb * (np.exp(self.mu + 0.5 * self.delta**2) - 1)\n S = np.zeros((M+1, I))\n S[0] = self.S0\n sn1 = np.random.standard_normal((M+1, I))\n sn2 = np.random.standard_normal((M+1, I))\n poi = np.random.poisson(self.lamb * dt, (M+1, I))\n for t in range(1, M+1, 1):\n S[t] = S[t-1] * (np.exp((self.r - rj - 0.5 * self.sigma**2) * dt + self.sigma * np.sqrt(dt) * sn1[t]) + (np.exp(self.mu + self.delta * sn2[t]) - 1) * poi[t])\n S[t] = np.maximum(S[t], 0)\n return S\n\n def _run(self, plot = True):\n S = self._simulate()\n \n if plot:\n # Histogram\n plt.hist(S[-1], bins = 50)\n plt.xlabel('value')\n plt.ylabel('frequency')\n plt.grid(True)\n plt.show()\n\n # Paths\n plt.plot(S[:, :10], lw = 1.5)\n plt.xlabel('time')\n plt.ylabel('index level')\n plt.grid(True)\n plt.show()\n\nif __name__ == '__main__':\n jd = JumpDiffusion()\n jd._run()\n\n","sub_path":"BitcoinPricingKernels/src/jump_diffusion.py","file_name":"jump_diffusion.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"507129051","text":"from tictactoe_env import TicTacToe\nimport pdb \nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nimport random\n\ndef tabular_epsilon_greedy_policy(Q, eps, state):\n action = 0\n rand = random.uniform(0, 1)\n if rand <= eps :\n action = random.randrange(1, 9)\n else :\n action = np.argmax(Q[state])\n return action\n\nclass QLearning(object):\n def __init__(self, num_states, num_actions, alpha=0.5, gamma=0.9):\n self.Q = {}\n self.alpha = alpha\n self.gamma = gamma\n\n def update(self, state, action, reward, next_state, done):\n self.Q[state][action]+= self.alpha * (reward + (self.gamma * np.max(self.Q[next_state])) - self.Q[state][action])\n \ndef main():\n env = TicTacToe()\n done = False\n state = env.reset()\n epsilon_greedy_policy = QLearning(19683, 9) #3^9=19683\n num_episodes = 100\n eps = 0.005\n epsilon_greedy_rewards = []\n epsilon_greedy_start_qs = []\n saved_Q_table = []\n Qs = []\n Qdict=epsilon_greedy_policy.Q\n count=0\n for i in range(num_episodes):\n state = env.reset()\n done = False\n ep_rewards = 0\n list_states=[''.join(list(state.flatten().astype(str)))]\n list_actions=[]\n list_rewards=[]\n while done == False:\n currentstateval=''.join(list(state.flatten().astype(str)))\n if currentstateval not in Qdict.keys():\n Qdict[currentstateval]=np.zeros((9))\n action = tabular_epsilon_greedy_policy(epsilon_greedy_policy.Q, eps, currentstateval)\n #action = int(input(\"Choose where to place (1 to 9): \"))\n if state[int((action)/3)][(action)%3]==0: #to eliminate output of an action to a cell that is already occupied.\n next_state,reward,done = env.step(action+1)\n list_actions.append(action)\n list_rewards.append(reward)\n stateval=''.join(list(next_state.flatten().astype(str)))\n list_states.append(stateval)\n ep_rewards += reward\n if stateval not in Qdict.keys():\n Qdict[stateval]=np.zeros((9))\n state = next_state\n state = next_state\n epsilon_greedy_rewards.append(ep_rewards)\n for i in range(len(list_actions)) :\n action=list_actions[i]\n state=list_states[i]\n reward=list_rewards[i]\n next_state=list_states[i+1]\n epsilon_greedy_policy.update(state, action, reward,next_state,done)\n t = np.arange(0, num_episodes)\n plt.plot(t, epsilon_greedy_rewards)\n plt.xlabel('Episode Number')\n plt.ylabel('Starting State Q Value (Best Action)')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Self Learning/Model Training.py","file_name":"Model Training.py","file_ext":"py","file_size_in_byte":3322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"613061410","text":"import inspect\nimport time\nimport pytest\nfrom unittest.mock import Mock, call\nfrom unittest import mock\n\n\nimport mlflow\nfrom mlflow.utils import gorilla\nfrom mlflow.tracking.client import MlflowClient\nfrom mlflow.utils.autologging_utils import (\n log_fn_args_as_params,\n wrap_patch,\n resolve_input_example_and_signature,\n batch_metrics_logger,\n BatchMetricsLogger,\n)\n\n# Example function signature we are testing on\n# def fn(arg1, default1=1, default2=2):\n# pass\n\n\ntwo_default_test_args = [\n ([\"arg1\", \"default1\"], {\"default2\": 42}, [\"arg1\", \"default1\", \"default2\"], [1, 2], {}),\n ([\"arg1\", \"default1\", \"default2\"], {}, [\"arg1\", \"default1\", \"default2\"], [1, 2], {}),\n ([\"arg1\"], {\"default1\": 42, \"default2\": 42}, [\"arg1\", \"default1\", \"default2\"], [1, 2], {}),\n (\n [],\n {\"arg1\": 42, \"default1\": 42, \"default2\": 42},\n [\"arg1\", \"default1\", \"default2\"],\n [1, 2],\n {},\n ),\n ([\"user_arg\"], {\"default1\": 42}, [\"arg1\", \"default1\", \"default2\"], [1, 2], {\"default2\": 2}),\n ([\"user_arg\"], {\"default2\": 42}, [\"arg1\", \"default1\", \"default2\"], [1, 2], {\"default1\": 1}),\n ([], {\"arg1\": 42, \"default1\": 42}, [\"arg1\", \"default1\", \"default2\"], [1, 2], {\"default2\": 2}),\n ([\"arg1\", \"default1\"], {}, [\"arg1\", \"default1\", \"default2\"], [1, 2], {\"default2\": 2}),\n ([\"arg1\"], {}, [\"arg1\", \"default1\", \"default2\"], [1, 2], {\"default1\": 1, \"default2\": 2}),\n ([], {\"arg1\": 42}, [\"arg1\", \"default1\", \"default2\"], [1, 2], {\"default1\": 1, \"default2\": 2}),\n]\n\n\n# Test function signature for the following tests\n# def fn_default_default(default1=1, default2=2, default3=3):\n# pass\n\n\nthree_default_test_args = [\n (\n [],\n {},\n [\"default1\", \"default2\", \"default3\"],\n [1, 2, 3],\n {\"default1\": 1, \"default2\": 2, \"default3\": 3},\n ),\n (\n [],\n {\"default2\": 42},\n [\"default1\", \"default2\", \"default3\"],\n [1, 2, 3],\n {\"default1\": 1, \"default3\": 3},\n ),\n]\n\n\n@pytest.fixture\ndef start_run():\n mlflow.start_run()\n yield\n mlflow.end_run()\n\n\ndef dummy_fn(arg1, arg2=\"value2\", arg3=\"value3\"): # pylint: disable=W0613\n pass\n\n\nlog_test_args = [\n ([], {\"arg1\": \"value_x\", \"arg2\": \"value_y\"}, [\"value_x\", \"value_y\", \"value3\"]),\n ([\"value_x\"], {\"arg2\": \"value_y\"}, [\"value_x\", \"value_y\", \"value3\"]),\n ([\"value_x\"], {\"arg3\": \"value_z\"}, [\"value_x\", \"value2\", \"value_z\"]),\n ([\"value_x\", \"value_y\"], {}, [\"value_x\", \"value_y\", \"value3\"]),\n ([\"value_x\", \"value_y\", \"value_z\"], {}, [\"value_x\", \"value_y\", \"value_z\"]),\n (\n [],\n {\"arg1\": \"value_x\", \"arg2\": \"value_y\", \"arg3\": \"value_z\"},\n [\"value_x\", \"value_y\", \"value_z\"],\n ),\n]\n\n\n@pytest.mark.large\n@pytest.mark.parametrize(\"args,kwargs,expected\", log_test_args)\ndef test_log_fn_args_as_params(args, kwargs, expected, start_run): # pylint: disable=W0613\n log_fn_args_as_params(dummy_fn, args, kwargs)\n client = mlflow.tracking.MlflowClient()\n params = client.get_run(mlflow.active_run().info.run_id).data.params\n for arg, value in zip([\"arg1\", \"arg2\", \"arg3\"], expected):\n assert arg in params\n assert params[arg] == value\n\n\n@pytest.mark.large\ndef test_log_fn_args_as_params_ignores_unwanted_parameters(start_run): # pylint: disable=W0613\n args, kwargs, unlogged = (\"arg1\", {\"arg2\": \"value\"}, [\"arg1\", \"arg2\", \"arg3\"])\n log_fn_args_as_params(dummy_fn, args, kwargs, unlogged)\n client = mlflow.tracking.MlflowClient()\n params = client.get_run(mlflow.active_run().info.run_id).data.params\n assert len(params.keys()) == 0\n\n\ndef get_func_attrs(f):\n assert callable(f)\n\n return (f.__name__, f.__doc__, f.__module__, inspect.signature(f))\n\n\n@pytest.mark.large\ndef test_wrap_patch_with_class():\n class Math:\n def add(self, a, b):\n \"\"\"add\"\"\"\n return a + b\n\n def new_add(self, *args, **kwargs):\n \"\"\"new add\"\"\"\n orig = gorilla.get_original_attribute(self, \"add\")\n return 2 * orig(*args, **kwargs)\n\n before = get_func_attrs(Math.add)\n wrap_patch(Math, Math.add.__name__, new_add)\n after = get_func_attrs(Math.add)\n\n assert after == before\n assert Math().add(1, 2) == 6\n\n\n@pytest.mark.large\ndef test_wrap_patch_with_module():\n def new_log_param(key, value):\n \"\"\"new mlflow.log_param\"\"\"\n return (key, value)\n\n before = get_func_attrs(mlflow.log_param)\n wrap_patch(mlflow, mlflow.log_param.__name__, new_log_param)\n after = get_func_attrs(mlflow.log_param)\n\n assert after == before\n assert mlflow.log_param(\"foo\", \"bar\") == (\"foo\", \"bar\")\n\n\n@pytest.fixture()\ndef logger():\n return Mock()\n\n\ndef get_input_example():\n return \"data\"\n\n\ndef infer_model_signature(_):\n return \"signature\"\n\n\ndef test_if_getting_input_example_fails(logger):\n error_msg = \"NoneType has no whatever\"\n\n def throws():\n raise Exception(error_msg)\n\n input_example, signature = resolve_input_example_and_signature(\n throws, infer_model_signature, True, True, logger\n )\n\n assert input_example is None\n assert signature is None\n calls = [\n call(\"Failed to gather input example: \" + error_msg),\n call(\n \"Failed to infer model signature: \"\n + \"could not sample data to infer model signature: \"\n + error_msg\n ),\n ]\n assert logger.warning.has_calls(calls)\n\n\ndef test_if_model_signature_inference_fails(logger):\n error_msg = \"stack overflow\"\n\n def throws(_):\n raise Exception(error_msg)\n\n input_example, signature = resolve_input_example_and_signature(\n get_input_example, throws, True, True, logger\n )\n\n assert input_example == \"data\"\n assert signature is None\n logger.warning.assert_called_with(\"Failed to infer model signature: \" + error_msg)\n\n\ndef test_happy_path_works(logger):\n input_example, signature = resolve_input_example_and_signature(\n get_input_example, infer_model_signature, True, True, logger\n )\n\n assert input_example == \"data\"\n assert signature == \"signature\"\n logger.warning.assert_not_called()\n\n\ndef test_avoids_collecting_input_example_if_not_needed(logger):\n # We create a get_input_example that modifies the value of x\n # If get_input_example was not invoked, x should not have been modified.\n\n x = {\"data\": 0}\n\n def modifies():\n x[\"data\"] = 1\n\n resolve_input_example_and_signature(modifies, infer_model_signature, False, False, logger)\n\n assert x[\"data\"] == 0\n logger.warning.assert_not_called()\n\n\ndef test_avoids_inferring_signature_if_not_needed(logger):\n # We create an infer_model_signature that modifies the value of x\n # If infer_model_signature was not invoked, x should not have been modified.\n\n x = {\"data\": 0}\n\n def modifies(_):\n x[\"data\"] = 1\n\n resolve_input_example_and_signature(get_input_example, modifies, True, False, logger)\n\n assert x[\"data\"] == 0\n logger.warning.assert_not_called()\n\n\ndef test_batch_metrics_logger_logs_all_metrics(start_run,): # pylint: disable=unused-argument\n run_id = mlflow.active_run().info.run_id\n with batch_metrics_logger(run_id) as metrics_logger:\n for i in range(100):\n metrics_logger.record_metrics({hex(i): i}, i)\n\n metrics_on_run = mlflow.tracking.MlflowClient().get_run(run_id).data.metrics\n\n for i in range(100):\n assert hex(i) in metrics_on_run\n assert metrics_on_run[hex(i)] == i\n\n\ndef test_batch_metrics_logger_flush_logs_to_mlflow(start_run): # pylint: disable=unused-argument\n run_id = mlflow.active_run().info.run_id\n\n # Need to patch _should_flush() to return False, so that we can manually flush the logger\n with mock.patch(\n \"mlflow.utils.autologging_utils.BatchMetricsLogger._should_flush\", return_value=False\n ):\n metrics_logger = BatchMetricsLogger(run_id)\n metrics_logger.record_metrics({\"my_metric\": 10}, 5)\n\n # Recorded metrics should not be logged to mlflow run before flushing BatchMetricsLogger\n metrics_on_run = mlflow.tracking.MlflowClient().get_run(run_id).data.metrics\n assert \"my_metric\" not in metrics_on_run\n\n metrics_logger.flush()\n\n # Recorded metric should be logged to mlflow run after flushing BatchMetricsLogger\n metrics_on_run = mlflow.tracking.MlflowClient().get_run(run_id).data.metrics\n assert \"my_metric\" in metrics_on_run\n assert metrics_on_run[\"my_metric\"] == 10\n\n\ndef test_batch_metrics_logger_runs_training_and_logging_in_correct_ratio(\n start_run,\n): # pylint: disable=unused-argument\n with mock.patch.object(MlflowClient, \"log_batch\") as log_batch_mock:\n run_id = mlflow.active_run().info.run_id\n with batch_metrics_logger(run_id) as metrics_logger:\n metrics_logger.record_metrics({\"x\": 1}, step=0) # data doesn't matter\n\n # first metrics should be logged immediately to record a previous timestamp and\n # batch log time\n log_batch_mock.assert_called_once()\n\n metrics_logger.total_log_batch_time = 1\n metrics_logger.total_training_time = 1\n\n log_batch_mock.reset_mock() # resets the 'calls' of this mock\n\n # the above 'training' took 1 second. So with target training-to-logging time ratio of\n # 10:1, 9 more 'training' should happen without sending the batch and then after the\n # 10th training the batch should be sent.\n for i in range(2, 11):\n metrics_logger.record_metrics({\"x\": 1}, step=0)\n log_batch_mock.assert_not_called()\n metrics_logger.total_training_time = i\n\n # at this point, average log batch time is 1, and total training time is 9\n # thus the next record_metrics call should send the batch.\n metrics_logger.record_metrics({\"x\": 1}, step=0)\n log_batch_mock.assert_called_once()\n\n # update log_batch time to reflect the 'mocked' training time\n metrics_logger.total_log_batch_time = 2\n\n log_batch_mock.reset_mock() # reset the recorded calls\n\n for i in range(12, 21):\n metrics_logger.record_metrics({\"x\": 1}, step=0)\n log_batch_mock.assert_not_called()\n metrics_logger.total_training_time = i\n\n metrics_logger.record_metrics({\"x\": 1}, step=0)\n log_batch_mock.assert_called_once()\n\n\ndef test_batch_metrics_logger_chunks_metrics_when_batch_logging(\n start_run,\n): # pylint: disable=unused-argument\n with mock.patch.object(MlflowClient, \"log_batch\") as log_batch_mock:\n run_id = mlflow.active_run().info.run_id\n with batch_metrics_logger(run_id) as metrics_logger:\n metrics_logger.record_metrics({hex(x): x for x in range(5000)}, step=0)\n run_id = mlflow.active_run().info.run_id\n\n for call_idx, call in enumerate(log_batch_mock.call_args_list):\n _, kwargs = call\n\n assert kwargs[\"run_id\"] == run_id\n assert len(kwargs[\"metrics\"]) == 1000\n for metric_idx, metric in enumerate(kwargs[\"metrics\"]):\n assert metric.key == hex(call_idx * 1000 + metric_idx)\n assert metric.value == call_idx * 1000 + metric_idx\n assert metric.step == 0\n\n\ndef test_batch_metrics_logger_records_time_correctly(start_run,): # pylint: disable=unused-argument\n with mock.patch.object(MlflowClient, \"log_batch\", wraps=lambda *args, **kwargs: time.sleep(1)):\n run_id = mlflow.active_run().info.run_id\n with batch_metrics_logger(run_id) as metrics_logger:\n metrics_logger.record_metrics({\"x\": 1}, step=0)\n\n assert metrics_logger.total_log_batch_time >= 1\n\n time.sleep(2)\n\n metrics_logger.record_metrics({\"x\": 1}, step=0)\n\n assert metrics_logger.total_training_time >= 2\n\n\ndef test_batch_metrics_logger_logs_timestamps_as_int_milliseconds(\n start_run,\n): # pylint: disable=unused-argument\n with mock.patch.object(MlflowClient, \"log_batch\") as log_batch_mock, mock.patch(\n \"time.time\", return_value=123.45678901234567890\n ):\n run_id = mlflow.active_run().info.run_id\n with batch_metrics_logger(run_id) as metrics_logger:\n metrics_logger.record_metrics({\"x\": 1}, step=0)\n\n _, kwargs = log_batch_mock.call_args\n\n logged_metric = kwargs[\"metrics\"][0]\n\n assert logged_metric.timestamp == 123456\n\n\ndef test_batch_metrics_logger_continues_if_log_batch_fails(\n start_run,\n): # pylint: disable=unused-argument\n with mock.patch.object(MlflowClient, \"log_batch\") as log_batch_mock:\n log_batch_mock.side_effect = [Exception(\"asdf\"), None]\n\n run_id = mlflow.active_run().info.run_id\n with batch_metrics_logger(run_id) as metrics_logger:\n # this call should fail to record since log_batch raised exception\n metrics_logger.record_metrics({\"x\": 1}, step=0)\n\n metrics_logger.record_metrics({\"y\": 2}, step=1)\n\n # even though the first call to log_batch failed, the BatchMetricsLogger should continue\n # logging subsequent batches\n last_call = log_batch_mock.call_args_list[-1]\n\n _, kwargs = last_call\n\n assert kwargs[\"run_id\"] == run_id\n assert len(kwargs[\"metrics\"]) == 1\n metric = kwargs[\"metrics\"][0]\n assert metric.key == \"y\"\n assert metric.value == 2\n assert metric.step == 1\n","sub_path":"tests/utils/test_autologging_utils.py","file_name":"test_autologging_utils.py","file_ext":"py","file_size_in_byte":13521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"322687784","text":"# ----------------------------------------------------------\r\n# Introdução a Programação de Computadores - IPC\r\n# Universidade do Estado do Amazonas - UEA\r\n# Prof. Jucimar Jr\r\n# Hugo Thadeu Silva Cardoso 1715310013\r\n# Luiz Paulo Machado 1515200542\r\n# Ian Gabriel Costa Machado 1215120276\r\n# André Luis Laborda Neves 1515070006\r\n# Gabriel de Queiroz Souza 1715310044\r\n# João Vitor De Cordeiro B Gonçalves 1515140036\r\n# Rodrigo Duarte de Souza 1115140049\r\n\r\n# --------------------------\r\n\r\n\r\n#ENTRADA DOS DADOS#####\r\nhoras = int(input(' Digite a hora: '))\r\nminutos = int(input(' Digite os minutos: '))\r\n\r\n#PROCESSAMENTO######\r\n\r\n# LETRA A #\r\nhmin = horas * 60\r\nprint('Conversão de horas para minutos =', hmin)\r\n\r\n#LETRA B#\r\ntminutos = hmin + minutos\r\nprint('Total em minutos =', tminutos)\r\n\r\n#LETRA C#\r\nsegundos = tminutos * 60\r\nprint('Horas em segundos =', segundos)\r\n\r\n\r\n","sub_path":"lista1.5/lista1.5_questao25.py","file_name":"lista1.5_questao25.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"559930729","text":"#!/usr/bin/python3\n# hello_tkinter.py by Barron Stone\n# This is an exercise file from Python GUI Devlopment with Tkinter on lynda.com\n\n#python 2 uses Tkinter\nfrom tkinter import *\n\nroot = Tk()\nLabel (root, text = \"HELLO PANDA!\").pack()\nroot.mainloop()\n\n","sub_path":"Python/Lynda Videos/Python GUI Development with Tkinter/Chapter 01/05_hello_tkinter.py","file_name":"05_hello_tkinter.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"363911469","text":"''' To be run in the main computer. It compiles the model and saves it as an .h5 file\n'''\n\nfrom keras import backend as K\nfrom keras.applications.resnet50 import preprocess_input, decode_predictions\nfrom keras.preprocessing import image\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Activation, Dropout, Flatten, Dense, Conv2D, MaxPooling2D\nimport numpy as np\nfrom keras.utils import plot_model\nimport pydot\n\n# Dimensions of our images.\nimg_width, img_height = 150, 150\ntrain_data_dir = 'training_data'\nvalidation_data_dir = 'testing_data'\nnb_train_samples = 500\nnb_validation_samples = 200\nepochs = 25\nbatch_size = 32\n\nif K.image_data_format() == 'channels_first':\n input_shape = (3, img_width, img_height)\nelse:\n input_shape = (img_width, img_height, 3)\n\nmodel = Sequential()\nmodel.add(Conv2D(32, (3, 3), input_shape=input_shape))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(32, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(64, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Flatten())\nmodel.add(Dense(64))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(1))\nmodel.add(Activation('sigmoid'))\n\nmodel.compile(loss='binary_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n\n# This is the augmentation configuration we will use for training\ntrain_datagen = ImageDataGenerator(\n rescale = 1. / 255,\n shear_range = 0.2,\n zoom_range = 0.2,\n horizontal_flip = True)\n\n# This is the augmentation configuration we will use for testing: only rescaling\ntest_datagen = ImageDataGenerator(rescale=1. / 255)\n\ntrain_generator = train_datagen.flow_from_directory(\n train_data_dir,\n target_size = (img_width, img_height),\n batch_size = batch_size,\n class_mode = 'binary')\n\nvalidation_generator = test_datagen.flow_from_directory(\n validation_data_dir,\n target_size = (img_width, img_height),\n batch_size = batch_size,\n class_mode = 'binary')\n\nmodel.fit_generator(\n train_generator,\n steps_per_epoch = nb_train_samples // batch_size,\n epochs = epochs,\n validation_data = validation_generator,\n validation_steps = nb_validation_samples // batch_size)\n\nmodel.save('not-laptop-on-mac-25.h5')\nplot_model(model, to_file='model.png')\n","sub_path":"Assignment_2_Deep_Learning/streaming_classifier/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"182246906","text":"#!/usr/bin/python3\nimport json\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nfilename1 = \"out-warmup/128kb-1ke-32th.json\" \nfilename2 = \"out-warmup/128kb-1ke-128th.json\"\nfilename3 = \"out-warmup/128kb-1ke-256th.json\"\nfilename4 = \"out-warmup/128kb-1ke-512th.json\"\nfilename5 = \"out-warmup/128kb-1ke-1024th.json\"\n\nwith open(filename1) as f1:\n data1 = json.load(f1)\nwith open(filename2) as f2:\n data2 = json.load(f2)\nwith open(filename3) as f3:\n data3 = json.load(f3)\nwith open(filename4) as f4:\n data4 = json.load(f4)\nwith open(filename5) as f5:\n data5 = json.load(f5)\n\ndef getData(data):\n meanv= np.array(data['mean'])\n minerr = meanv-np.array(data['min'])\n maxerr = np.array(data['max'])-meanv\n return (meanv, minerr, maxerr)\n\ndef plotData(data, marker, color, ax):\n l = ax.errorbar(data1['nof_blocks'], data[0], yerr=[data[1], data[2]], fmt=color+marker+'--',linewidth=1,elinewidth=1,ecolor=color, capsize=5, capthick=0.5)\n ax.set_xticks(range(1,33,2))\n ax.grid(True, which=\"both\")\n ax.set_xlabel('Nof Blocks')\n ax.set_ylabel('Cycles/Elem')\n return l\n\nfig, ax = plt.subplots(3,2)\nl1 = plotData(getData(data1),'*','r',ax[0,0])\nl2 = plotData(getData(data2),'+','m',ax[1,0])\nl3 = plotData(getData(data3),'x','k',ax[1,1])\nl4 = plotData(getData(data4),'|','b',ax[2,0])\nl5 = plotData(getData(data5),'v','g',ax[2,1])\nax[0, 1].axis('off')\n\n#fig.legend((l1, l2, l3, l4, l5), (\\\n# '128kB, 32 threads',\\\n# '128kB, 128 threadst',\\\n# '128kB, 256 threadst',\\\n# '128kB, 512 threads',\\\n# '128kB, 1024 threads'\\\n# ), 'best')\n\nbox = ax[0,1].get_position()\n#ax[0,0].set_position([box.x0, box.y0, box.width * 0.8, box.height])\nax[0,1].legend((l1, l2, l3 ,l4, l5), (\\\n '128kB, 32 threads',\\\n '128kB, 128 threadst',\\\n '128kB, 256 threadst',\\\n '128kB, 512 threads',\\\n '128kB, 1024 threads'\\\n ),loc='center')\nfig.suptitle('Coallocent sequential walk - multiple blocks')\nplt.xlabel('Nof Blocks')\nplt.ylabel('Cycles/Elem')\nplt.show()\n","sub_path":"experiments/cpu-inter-sequential-walk-multiblock/plotwup.py","file_name":"plotwup.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"336796228","text":"# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nfrom __future__ import absolute_import\n\nimport os\nimport datetime\nimport time\n\nimport yaml\n\nCURRENT_DIR = os.path.dirname(os.path.realpath(__file__))\n\n\ndef reproducible_datetime():\n\n build_date = datetime.datetime.utcfromtimestamp(\n int(os.environ.get(\"SOURCE_DATE_EPOCH\", time.time()))\n )\n return build_date.isoformat().replace(\"T\", \" AT \")[:22]\n\n\ndef type_to_ctype(typename):\n is_const = False\n if \"Const[\" in typename:\n is_const = True\n typename = typename[len(\"Const[\") : -1]\n count = 0\n while \"List[\" in typename:\n count += 1\n typename = typename[len(\"List[\") : -1]\n typename = typename + \"*\" * count\n if is_const:\n typename = \"const \" + typename\n return typename\n\n\ndef include_kernels_h(specification):\n print(\"Generating include/awkward/kernels.h...\")\n\n with open(\n os.path.join(CURRENT_DIR, \"..\", \"include\", \"awkward\", \"kernels.h\"), \"w\"\n ) as header:\n header.write(\n \"\"\"// AUTO GENERATED ON {0}\n// DO NOT EDIT BY HAND!\n//\n// To regenerate file, run\n//\n// python dev/generate-kernel-signatures.py\n//\n// (It is usually run as part of pip install . or localbuild.py.)\n\n#ifndef AWKWARD_KERNELS_H_\n#define AWKWARD_KERNELS_H_\n\n#include \"awkward/common.h\"\n\nextern \"C\" {{\n\n\"\"\".format(\n reproducible_datetime()\n )\n )\n for spec in specification[\"kernels\"]:\n for childfunc in spec[\"specializations\"]:\n header.write(\" \" * 2 + \"EXPORT_SYMBOL ERROR\\n\")\n header.write(\" \" * 2 + childfunc[\"name\"] + \"(\\n\")\n for i, arg in enumerate(childfunc[\"args\"]):\n header.write(\n \" \" * 4 + type_to_ctype(arg[\"type\"]) + \" \" + arg[\"name\"]\n )\n if i == (len(childfunc[\"args\"]) - 1):\n header.write(\");\\n\")\n else:\n header.write(\",\\n\")\n header.write(\"\\n\")\n header.write(\n \"\"\"}\n\n#endif // AWKWARD_KERNELS_H_\n\"\"\"\n )\n\n print(\"Done with include/awkward/kernels.h.\")\n\n\ntype_to_dtype = {\n \"bool\": \"bool_\",\n \"int8\": \"int8\",\n \"uint8\": \"uint8\",\n \"int16\": \"int16\",\n \"uint16\": \"uint16\",\n \"int32\": \"int32\",\n \"uint32\": \"uint32\",\n \"int64\": \"int64\",\n \"uint64\": \"uint64\",\n \"float\": \"float32\",\n \"double\": \"float64\",\n}\n\n\ndef type_to_pytype(typename, special):\n if \"Const[\" in typename:\n typename = typename[len(\"Const[\") : -1]\n count = 0\n while \"List[\" in typename:\n count += 1\n typename = typename[len(\"List[\") : -1]\n if typename.endswith(\"_t\"):\n typename = typename[:-2]\n if count != 0:\n special.append(type_to_dtype[typename])\n return (\"POINTER(\" * count) + (\"c_\" + typename) + (\")\" * count)\n\n\ndef kernel_signatures_py(specification):\n print(\"Generating src/awkward/_kernel_signatures.py...\")\n\n with open(\n os.path.join(CURRENT_DIR, \"..\", \"src\", \"awkward\", \"_kernel_signatures.py\"),\n \"w\",\n ) as file:\n file.write(\n \"\"\"# AUTO GENERATED ON {0}\n# DO NOT EDIT BY HAND!\n#\n# To regenerate file, run\n#\n# python dev/generate-kernel-signatures.py\n#\n# (It is usually run as part of pip install . or localbuild.py.)\n\n# fmt: off\n\nfrom ctypes import (\n POINTER,\n Structure,\n c_bool,\n c_int8,\n c_uint8,\n c_int16,\n c_uint16,\n c_int32,\n c_uint32,\n c_int64,\n c_uint64,\n c_float,\n c_double,\n c_char_p,\n)\n\nimport numpy as np\n\nfrom numpy import (\n bool_,\n int8,\n uint8,\n int16,\n uint16,\n int32,\n uint32,\n int64,\n uint64,\n float32,\n float64,\n)\n\nclass ERROR(Structure):\n _fields_ = [\n (\"str\", c_char_p),\n (\"filename\", c_char_p),\n (\"id\", c_int64),\n (\"attempt\", c_int64),\n (\"pass_through\", c_bool),\n ]\n\n\ndef by_signature(lib):\n out = {{}}\n\"\"\".format(\n reproducible_datetime()\n )\n )\n\n for spec in specification[\"kernels\"]:\n for childfunc in spec[\"specializations\"]:\n special = [repr(spec[\"name\"])]\n arglist = [\n type_to_pytype(x[\"type\"], special) for x in childfunc[\"args\"]\n ]\n file.write(\n \"\"\"\n f = lib.{0}\n f.argtypes = [{1}]\n f.restype = ERROR\n out[{2}] = f\n\"\"\".format(\n childfunc[\"name\"], \", \".join(arglist), \", \".join(special)\n )\n )\n\n file.write(\n \"\"\"\n return out\n\"\"\"\n )\n\n print(\"Done with src/awkward/_kernel_signatures.py...\")\n\n\nif __name__ == \"__main__\":\n with open(os.path.join(CURRENT_DIR, \"..\", \"kernel-specification.yml\")) as specfile:\n specification = yaml.safe_load(specfile)\n include_kernels_h(specification)\n kernel_signatures_py(specification)\n","sub_path":"dev/generate-kernel-signatures.py","file_name":"generate-kernel-signatures.py","file_ext":"py","file_size_in_byte":4997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"597280122","text":"import time\nimport os\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom time import sleep\n\n############### 예약 제외일자 설정하기 ##################################################\n# (week, 요일) 1=일, 2=월, 3=화, 4=수, 5=목, 6=금, 7=토) \n# 2번째주 수요일을 제외할 경우: except_date = [(2, 4)]\n# 2번째주 수요일과 3번째주 토요일을 제외할 경우: except_date = [(2, 4), (3, 7)]\n\nexcept_date = [] \n#######################################################################################\n\nopen_flag = False\ndef ecolian_action():\n global open_flag\n\n options = webdriver.ChromeOptions()\n options.add_argument('window-size=240,200')\n \n driver = webdriver.Chrome(executable_path='chromedriver_linux64/chromedriver', options=options)\n driver.implicitly_wait(2)\n\n driver.get(url='https://jc.ecolian.or.kr/asp/ecolian/login.asp')\n\n id_input = driver.find_element_by_id('txtId')\n id_input. send_keys('maranta')\n\n pass_input = driver.find_element_by_id('txtPwd')\n pass_input.send_keys('xxxxx')\n\n login_button = driver.find_element_by_id('btnLogin')\n login_button.send_keys(Keys.RETURN)\n \n sleep(1)\n\n reserve_link = driver.find_element_by_xpath('//*[@id=\"navi\"]/li[4]/a/img')\n reserve_link.click()\n\n next_monath_link = driver.find_element_by_xpath('//*[@id=\"contents\"]/p/a[2]/img')\n next_monath_link.click()\n\n for i in range(2, 8):\n except_cnt = 0\n for j in range(1, 8):\n date_xpath = '//*[@id=\"contents\"]/table/tbody/tr[' + str(i) + ']/td[' + str(j) + ']/p'\n try:\n date_link = driver.find_element_by_xpath(date_xpath)\n print(i, j, date_link.text)\n if(date_link.text == '[예약가능]' and (i-1, j) not in except_date):\n date_link.click()\n comfirm_link = driver.find_element_by_xpath('//*[@id=\"rspop_01\"]/div[1]/div[3]/div/a[1]')\n comfirm_link.click()\n time_link = driver.find_element_by_xpath('//*[@id=\"contents\"]/table/tbody/tr[2]/td[2]/span')\n time_link.click()\n alert_box = driver.switch_to_alert()\n alert_box.accept()\n \n duration = 1 # seconds\n freq = 440 # Hz\n os.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))\n open_flag = True\n except:\n except_cnt += 1\n print(i, j, 'No Schedule')\n # except가 7번 발생하면 예약화면이 아니므로(로그인 실패 등) 종료\n if(except_cnt >= 7):\n break\n if(not open_flag):\n driver.close() \n\ni = 0\nstarttime = time.time()\nwhile True:\n i += 1\n if(not open_flag):\n print(\"close\", i, open_flag)\n ecolian_action()\n else:\n print(\"open\", i, open_flag)\n\n time.sleep(5 - ((time.time() - starttime) % 5))\n","sub_path":"ecolian_reservation_jechon_next_month.py","file_name":"ecolian_reservation_jechon_next_month.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"409483230","text":"from Game import Unit\r\nfrom Game import Player\r\nfrom Interfacer.Texty import Texty\r\nfrom Game import BattleHandler\r\nfrom Interfacer.Savvy import Savvy\r\nfrom Game.CommandFactory import CommandFactory\r\n\r\n\r\n \r\n \r\ndef analysisPhase(player):\r\n texty = Texty() \r\n while (True):\r\n enemy = Unit.Unit(\"Enemy\", 0) \r\n player.analyze()\r\n enemy.analyze() \r\n texty.showPlayer(\"Do you wish to fight this enemy?\")\r\n read = texty.readOneKeyFromPlayer() \r\n read = read.lower()\r\n if (read == \"y\"): \r\n texty.showPlayer(\"You approach the enemy and fight!\")\r\n return enemy \r\n else: \r\n texty.showPlayer(\"You bravely flee from your foe!\")\r\n \r\n \r\n \r\n \r\n#MAIN\r\n\r\ntexty = Texty()\r\ntexty.setUp(\"network\")\r\ntexty.getPlayerConnection()\r\ntexty.showPlayer(\"Hello, and welcome to the Enrichment Center. The Device Has Been Modified.\")\r\nplayGame = True\r\ngameOverFlag = False\r\ncf = CommandFactory()\r\nsavvy = Savvy()\r\nflagLoadedMidfight = False\r\nif (savvy.isMidFight()):\r\n texty.showPlayer(\"Do you want to resume the fight?\")\r\n read = texty.readOneKeyFromPlayer()\r\n if (read == \"y\"):\r\n cf.load()\r\n \r\n flagLoadedMidfight = True\r\nif (not flagLoadedMidfight):\r\n texty.showPlayer(\"Do you want to load a game?\")\r\n read = texty.readOneKeyFromPlayer()\r\n if (read == \"y\"):\r\n texty.showPlayer (\"Please write the name of the player\")\r\n read = texty.readOneKeyFromPlayer()\r\n playerchar = savvy.loadPickle(read) \r\n else : playerchar = cf.create(\"newPlayer\", [\"Player\", \"Warrior\"])\r\n \r\nplayerchar = cf.getNextCommand().execute()\r\n\r\n \r\nwhile (playGame):\r\n \r\n if (cf.needsInput()):\r\n enemy = analysisPhase(playerchar) \r\n combat = cf.create(\"fight\", [playerchar, enemy])\r\n wonCombat, playerchar = cf.getNextCommand().execute()\r\n if (not wonCombat): \r\n texty.showPlayer(\"You lose! Play again?\")\r\n playerchar = Player.Player(\"Player\", \"test\")\r\n else:\r\n texty.showPlayer(\"You win! Save and quit?\")\r\n read = texty.readOneKeyFromConsole()\r\n if (read == \"y\"):\r\n texty.showPlayer(\"(J)son or (P)ickle?\")\r\n read = texty.readOneKeyFromPlayer() \r\n if (read == \"j\"):\r\n savvy.saveJson(playerchar)\r\n texty.showPlayer(\"Saved.\")\r\n elif (read == \"p\"):\r\n savvy.savePickle(playerchar)\r\n texty.showPlayer(\"Saved.\")\r\n else:\r\n texty.showPlayer(\"Not a valid option, defaulting to pickle\") \r\n savvy.savePickle(playerchar)\r\n texty.showPlayer(\"Saved.\")","sub_path":"Test/Game/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"43392200","text":"import unittest\nimport sys\nfrom sqlalchemy import false\n\n# pip3 install google-api-python-client-py3\n\n\nfrom unittest import TestCase\n\nclass test_google_photo(TestCase):\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n\n def test_getPeoples(self):\n from os.path import join, dirname\n\n from googleapiclient.discovery import build\n from httplib2 import Http\n from oauth2client import file, client, tools\n SCOPES = 'https://www.googleapis.com/auth/photoslibrary.readonly'\n\n store = file.Storage(join(dirname(__file__), 'token-for-google.json'))\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets(join(dirname(__file__), 'client_id.json', SCOPES))\n creds = tools.run_flow(flow, store)\n google_photos = build('photoslibrary', 'v1', http=creds.authorize(Http()))\n\n day, month, year = ('0', '6', '2019') # Day or month may be 0 => full month resp. year\n date_filter = [{\"day\": day, \"month\": month, \"year\": year}] # No leading zeroes for day an month!\n nextpagetoken = 'Dummy'\n while nextpagetoken != '':\n nextpagetoken = '' if nextpagetoken == 'Dummy' else nextpagetoken\n results = google_photos.mediaItems().search(\n body={\"filters\": {\"dateFilter\": {\"dates\": [{\"day\": day, \"month\": month, \"year\": year}]}},\n \"pageSize\": 10, \"pageToken\": nextpagetoken}).execute()\n # The default number of media items to return at a time is 25. The maximum pageSize is 100.\n items = results.get('mediaItems', [])\n nextpagetoken = results.get('nextPageToken', '')\n for item in items:\n print(f\"{item['filename']} {item['mimeType']} '{item.get('description', '- -')}'\"\n f\" {item['mediaMetadata']['creationTime']}\\nURL: {item['productUrl']}\")\n\n\n\n def test_list_albums(self):\n #from __future__ import print_function\n from apiclient.discovery import build\n from httplib2 import Http\n from oauth2client import file, client, tools\n\n # Setup the Photo v1 API\n SCOPES = 'https://www.googleapis.com/auth/photoslibrary.readonly'\n\n # see https://console.cloud.google.com/apis/credentials?pli=1 to generate credentials.json file\n store = file.Storage('credentials.json')\n print(store)\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets('credentials.json', SCOPES)\n # flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)\n creds = tools.run_flow(flow, store)\n service = build('photoslibrary', 'v1', http=creds.authorize(Http()))\n\n # Call the Photo v1 API\n results = service.albums().list(\n pageSize=10, fields=\"nextPageToken,albums(id,title)\").execute()\n items = results.get('albums', [])\n if not items:\n print('No albums found.')\n else:\n print('Albums:')\n for item in items:\n print('{0} ({1})'.format(item['title'].encode('utf8'), item['id']))","sub_path":"tests/test_google_api/test_google_photo.py","file_name":"test_google_photo.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"109758512","text":"# Testing TSAdjoint and matrix-free Jacobian\n\nimport sys, petsc4py\npetsc4py.init(sys.argv)\n\nfrom petsc4py import PETSc\n\nclass VDP(object):\n n = 2\n comm = PETSc.COMM_SELF\n def __init__(self, mu_=1.0e3,mf_=False):\n self.mu_ = mu_\n self.mf_ = mf_\n if self.mf_:\n self.J_ = PETSc.Mat().createDense([self.n,self.n], comm=self.comm)\n self.J_.setUp()\n self.Jp_ = PETSc.Mat().createDense([self.n,1], comm=self.comm)\n self.Jp_.setUp()\n def initialCondition(self, u):\n mu = self.mu_\n u[0] = 2.0\n u[1] = -2.0/3.0 + 10.0/(81.0*mu) - 292.0/(2187.0*mu*mu)\n u.assemble()\n def evalFunction(self, ts, t, u, f):\n mu = self.mu_\n f[0] = u[1]\n f[1] = mu*((1.-u[0]*u[0])*u[1]-u[0])\n f.assemble()\n def evalJacobian(self, ts, t, u, A, B):\n if not self.mf_:\n J = A\n else :\n J = self.J_\n mu = self.mu_\n J[0,0] = 0\n J[1,0] = -mu*(2.0*u[1]*u[0]+1.)\n J[0,1] = 1.0\n J[1,1] = mu*(1.0-u[0]*u[0])\n J.assemble()\n if A != B: B.assemble()\n return True # same nonzero pattern\n def evalJacobianP(self, ts, t, u, C):\n if not self.mf_:\n Jp = C\n else:\n Jp = self.Jp_\n Jp[0,0] = 0\n Jp[1,0] = (1.-u[0]*u[0])*u[1]-u[0]\n Jp.assemble()\n return True\n def evalIFunction(self, ts, t, u, udot, f):\n mu = self.mu_\n f[0] = udot[0]-u[1]\n f[1] = udot[1]-mu*((1.-u[0]*u[0])*u[1]-u[0])\n f.assemble()\n def evalIJacobian(self, ts, t, u, udot, shift, A, B):\n if not self.mf_:\n J = A\n else :\n J = self.J_\n mu = self.mu_\n J[0,0] = shift\n J[1,0] = mu*(2.0*u[1]*u[0]+1.)\n J[0,1] = -1.0\n J[1,1] = shift-mu*(1.0-u[0]*u[0])\n J.assemble()\n if A != B: B.assemble()\n return True # same nonzero pattern\n def evalIJacobianP(self, ts, t, u, udot, shift, C):\n if not self.mf_:\n Jp = C\n else:\n Jp = self.Jp_\n Jp[0,0] = 0\n Jp[1,0] = u[0]-(1.-u[0]*u[0])*u[1]\n Jp.assemble()\n return True\n\nclass JacShell:\n def __init__(self, ode):\n self.ode_ = ode\n def mult(self, A, x, y):\n \"y <- A * x\"\n self.ode_.J_.mult(x,y)\n def multTranspose(self, A, x, y):\n \"y <- A' * x\"\n self.ode_.J_.multTranspose(x, y)\n\nclass JacPShell:\n def __init__(self, ode):\n self.ode_ = ode\n def multTranspose(self, A, x, y):\n \"y <- A' * x\"\n self.ode_.Jp_.multTranspose(x, y)\nOptDB = PETSc.Options()\n\nmu_ = OptDB.getScalar('mu', 1.0e3)\nmf_ = OptDB.getBool('mf', False)\n\nimplicitform_ = OptDB.getBool('implicitform', False)\n\node = VDP(mu_,mf_)\n\nif not mf_:\n J = PETSc.Mat().createDense([ode.n,ode.n], comm=ode.comm)\n J.setUp()\n Jp = PETSc.Mat().createDense([ode.n,1], comm=ode.comm)\n Jp.setUp()\nelse:\n J = PETSc.Mat().create()\n J.setSizes([ode.n,ode.n])\n J.setType('python')\n shell = JacShell(ode)\n J.setPythonContext(shell)\n J.setUp()\n J.assemble()\n Jp = PETSc.Mat().create()\n Jp.setSizes([ode.n,1])\n Jp.setType('python')\n shell = JacPShell(ode)\n Jp.setPythonContext(shell)\n Jp.setUp()\n Jp.assemble()\n\nu = PETSc.Vec().createSeq(ode.n, comm=ode.comm)\nf = u.duplicate()\nadj_u = []\nadj_u.append(PETSc.Vec().createSeq(ode.n, comm=ode.comm))\nadj_u.append(PETSc.Vec().createSeq(ode.n, comm=ode.comm))\nadj_p = []\nadj_p.append(PETSc.Vec().createSeq(1, comm=ode.comm))\nadj_p.append(PETSc.Vec().createSeq(1, comm=ode.comm))\n\nts = PETSc.TS().create(comm=ode.comm)\nts.setProblemType(ts.ProblemType.NONLINEAR)\n\nif implicitform_:\n ts.setType(ts.Type.CN)\n ts.setIFunction(ode.evalIFunction, f)\n ts.setIJacobian(ode.evalIJacobian, J)\n ts.setIJacobianP(ode.evalIJacobianP, Jp)\nelse:\n ts.setType(ts.Type.RK)\n ts.setRHSFunction(ode.evalFunction, f)\n ts.setRHSJacobian(ode.evalJacobian, J)\n ts.setRHSJacobianP(ode.evalJacobianP, Jp)\n\nts.setSaveTrajectory()\nts.setTime(0.0)\nts.setTimeStep(0.001)\nts.setMaxTime(0.5)\nts.setMaxSteps(1000)\nts.setExactFinalTime(PETSc.TS.ExactFinalTime.MATCHSTEP)\n\nts.setFromOptions()\node.initialCondition(u)\nts.solve(u)\n\nadj_u[0][0] = 1\nadj_u[0][1] = 0\nadj_u[0].assemble()\nadj_u[1][0] = 0\nadj_u[1][1] = 1\nadj_u[1].assemble()\nadj_p[0][0] = 0\nadj_p[0].assemble()\nadj_p[1][0] = 0\nadj_p[1].assemble()\n\nts.setCostGradients(adj_u,adj_p)\n\nts.adjointSolve()\n\nadj_u[0].view()\nadj_u[1].view()\nadj_p[0].view()\nadj_p[1].view()\n\ndef compute_derp(du,dp):\n print(du[1]*(-10.0/(81.0*mu_*mu_)+2.0*292.0/(2187.0*mu_*mu_*mu_))+dp[0])\n\ncompute_derp(adj_u[0],adj_p[0])\ncompute_derp(adj_u[1],adj_p[1])\n\ndel ode, J, Jp, u, f, ts, adj_u, adj_p\n","sub_path":"src/binding/petsc4py/demo/ode/vanderpol.py","file_name":"vanderpol.py","file_ext":"py","file_size_in_byte":4759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"285341354","text":"#Mengurutkan data dari kecil kebesar\n\nprint(\"~\"*39)\n\numur_adik = input(\"Umur adik : \")\numur_kakak = input(\"Umur kakak : \")\numur_ayah = input(\"Umur ayah : \")\numur_ibu = input(\"Umur ibu : \")\n\ndata = [umur_adik, umur_kakak, umur_ayah, umur_ibu]\n\n#Fungsi .sort() untuk mengurutkan bilangan kecil ke besar\ndata.sort()\n\nprint(\"Maka urutan umur dari termuda hingga tertua adalah :\",data)","sub_path":"Mengurutkandata.py","file_name":"Mengurutkandata.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"246203105","text":"if __name__ == '__main__':\n import matplotlib\n # Agg backend runs without a display\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport sys\nimport json\nimport datetime\nimport numpy as np\nimport skimage.io\nimport cv2\nfrom scipy.ndimage.measurements import label\nROOT_DIR = os.path.abspath(\"../\")\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn.config import Config\nfrom mrcnn import utils\n\nclass CellDataset(utils.Dataset):\n\n def load_trainingcells(self, dataset_dir):\n \"\"\"\n dataset_dir: The directory of training images.\n \"\"\"\n # Add classes. We have one class.\n # Naming the dataset cell, and the class cell\n self.add_class(\"cell\", 1, \"cell\")\n image_ids = next(os.walk(dataset_dir))[2]\n #To suffle the images just to make sure that any bais due to\n #generation does not kick in.\n image_ids = list(set(image_ids))\n\n # Add images\n for image_id in image_ids:\n\n self.add_image(\n \"cell\",\n image_id=image_id,\n path=os.path.join(dataset_dir,image_id))\n\n def load_validationcells(self,dataset_dir):\n #dataset_dir: The directory of validation images.\n self.add_class(\"cell\",1,\"cell\")\n image_ids=next(os.walk(dataset_dir))[2]\n image_ids=list(set(image_ids))\n for image_id in image_ids:\n self.add_image(\"cell\",image_id=image_id,path=os.path.join(dataset_dir,image_id))\n\n def train_validate_split(self,dataset_dir,ratio=0.7):\n #If the validation set needs to be a part of the training data\n #We need to randomly select out of it ,by the ratio=training/validation\n image_ids=next(os.walk(dataset_dir))[2]\n splitratio=int((1-ratio)*len(image_ids))\n train_ids=np.random.choice(image_ids,splitratio)\n for i in range(len(train_ids)):\n train_ids[i]=str(os.path.join(dataset_dir,train_ids[i]))\n #returns the array of images to train\n return train_ids\n\n def train_validate_loadtrain(self,dataset_dir,train_ids):\n self.add_class(\"cell\", 1, \"cell\")\n for image_id in train_ids:\n self.add_image(\"cell\",image_id=image_id,path=image_id)\n\n def train_validate_loadval(self,dataset_dir,train_ids):\n self.add_class(\"cell\",1,\"cell\")\n image_ids=next(os.walk(dataset_dir))[2]\n val_ids=list(set(image_ids)-set(train_ids))\n for image_id in val_ids:\n self.add_image(\"cell\",image_id=image_id,path=image_id)\n\n def load_mask(self, image_id):\n \"\"\"Generate instance masks for an image.\n Returns:\n masks: A bool array of shape [height, width, instance count] with\n one mask per instance.\n class_ids: a 1D array of class IDs of the instance masks.\n \"\"\"\n info = self.image_info[image_id]\n image_name=info['path'].split(\"/\")[-1]\n # Get mask directory from image path\n #mask_dir = os.path.join(os.path.dirname(os.path.dirname(info['path'])), \"masks\")\n mask_dir = '/Mask_RCNN/data/annotated'\n\n #Get the exact full mask for that image\n for f in next(os.walk(mask_dir))[2]:\n if f.endswith(str(image_name)):\n full_mask = cv2.imread(os.path.join(mask_dir, f),0)\n\n #extract indivial masks of cells from the full mask\n lb = label(full_mask)\n\n msks = []\n for key in range(1,lb[1]+1):\n\n x = lb[1]+1\n #print(x)\n #print(key)\n\n\n newim=np.zeros(full_mask.shape)\n for i in range(full_mask.shape[0]):\n for j in range(full_mask.shape[1]):\n if lb[0][i][j]==key:\n newim[i][j]=1\n msks.append(newim)\n #msks=np.astype(np.bool)\n #print (msks)\n\n # Combine these masks of indiviual cells\n mask = np.stack(msks, axis=-1)\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID, we return an array of ones\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)\n\n def image_reference(self, image_id):\n \"\"\"Return the path of the image.\"\"\"\n info = self.image_info[image_id]\n if info[\"source\"] == \"cell\":\n return info[\"id\"]\n else:\n super(self.__class__, self).image_reference(image_id)\n","sub_path":"mrcnn/deepcell_dataset.py","file_name":"deepcell_dataset.py","file_ext":"py","file_size_in_byte":4450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"340922901","text":"from gi import require_version\nrequire_version( \"Gtk\", \"3.0\" )\nfrom gi.repository import Gtk, Gdk, GdkPixbuf, GLib \n\nclass ColorDialog( Gtk.Dialog ):\n\n def __init__( self, parent ):\n Gtk.Dialog.__init__(self, \"Pick a Color\", parent, 0, \n (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, \n Gtk.STOCK_OK, Gtk.ResponseType.OK))\n self.set_default_size( 150, 100 )\n\n box = self.get_content_area()\n box.set_border_width( 10 )\n self.colorchooser = Gtk.ColorChooserWidget(show_editor=True)\n self.colorchooser.set_use_alpha( False )\n box.add( self.colorchooser )\n self.show_all()\n","sub_path":"src/gui/colorpicker.py","file_name":"colorpicker.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"503805140","text":"'''\n'''\n\nfrom __future__ import division\nimport pypyodbc as odb\nimport pandas.io.sql as psql\nimport re, sys, os, time\nimport numpy as np\nimport pandas as pd\nimport sklearn as sk\nfrom datetime import datetime\nfrom datetime import timedelta\nimport json\nfrom sqlalchemy import create_engine\n\n\nengine = create_engine(\"mssql+pypyodbc://odin:odin168!@10.132.44.78:3000/ODIN?driver=SQL Server\")\npg_engine = create_engine('postgresql://postgres:1Bxpia2a456789@localhost:5432/scm')\n\n\ndef read_scp_folder_today(path, date_delta):\n\tfile_list = list(os.listdir(path))\n\t#for i in range(len(file_list)):\n\t#\tfile_list[i]= file_list[i]\n\tfile_list_2= list(range(len(file_list)))\n\tfor i in range(1,len(file_list)):\n\t\tfile_list_2[i]= [path+file_list[i], file_list[i].replace('.','-').split('-')[1], file_list[i].replace('.','-').split('-')[2][:6], file_list[i].replace('.','-').split('-')[2][:10]]\n\tdf_files = pd.DataFrame( file_list_2[1:], columns = ['path', 'site', 'update_time', 'time_stamp'])\n\ttime_ref = str(datetime.today().date()-timedelta(date_delta))[2:].replace('-','')\n\tdf_files_2 = df_files.sort_values('update_time',ascending=False)\n\tdf_files_2 = df_files_2[ df_files_2['update_time']==time_ref ]\n\tdf_files_2.index = range(1,len(df_files_2)+1)\n\treturn df_files_2\n\n\ndef read_scp_document_v2(obj1):\n\t'''\n\tinput data : obj1 = path(str) target to file level\n\toutput data : json-like list \n\t'''\n\twith open (obj1,\"r\") as myfile:data=myfile.read().replace('\\n', '').replace('REC|','#wa-REC|')\n\tdata = [ s for s in [ s.split('|') for s in data.split('#wa-') ] ]\n\t#data[0] = data[0][1:]\n\treturn data\n\n\n# Check the data structure\ndef check_size_distribution(obj):\n\t'''\n\tcheck the data length => show df\n\tin-put : scp FB01 (json-like list) ex : read_scp_document_v2(path)\n\tout-put : df with distribution\n\t'''\t\n\ttemp = []\n\tfor i in range(len(obj)):temp += [ len(obj[i]) ] # count the size of each sub-list\n\ttemp = pd.DataFrame({'count':temp}) # json-like to table\n\ttemp = pd.DataFrame({'index':list(temp['count'].value_counts().index),'count':list(temp['count'].value_counts())})\n\treturn temp\n\ndef check_dq(obj):\t\n\t'''\n\tcheck the data length => lf there are some length is not majority => show data\n\tin-put : scp FB01 (json-like list) ex : read_scp_document_v2(path)\n\tout-put : json-like list with singular points\n\t'''\t\n\ttemp = []\n\tfor i in range(len(obj)):temp += [ len(obj[i]) ] # count the size of each sub-list\n\ttemp = pd.DataFrame({'count':temp}) # json-like to table\n\ttemp = pd.DataFrame({'index':list(temp['count'].value_counts().index),'count':list(temp['count'].value_counts())})\n\tsingular=[]\n\tfor i in range(len(temp)): \n\t\tif temp['count'][i] / sum(temp['count']) < 0.05: \n\t\t\tsingular += [ temp['index'][i] ]\n\ttemp = []\n\tfor i in range(len(obj)):\n\t\tfor j in range(len(singular)):\n\t\t\tif len(obj[i])== singular[j]:temp+=[ obj[i] ]\n\treturn temp\n\n\ndef trans_to_table(obj):\n\t'''\n\tGoal : transform from json-like list to read_scp_document_v2(path)\n\tin-put : scp FB01 (json-like list) ex : read_scp_document_v2(path)\n\t-> create table columns with the max_len of list\n\t-> exame the data length and make it the same length to have same table columns\n\t-> use df.loc[i] to add each list into a row\n\t'''\n\tmax_len = check_size_distribution(obj)['index'].max()\n\tmain_column = list(range(max_len))\n\tdf = pd.DataFrame(np.random.randn(0, max_len), columns=main_column)\n\tfor i in range(len(obj)):\n\t\tif len(obj[i]) Config:\n with open(CONFIG_FILE, 'r') as file:\n data = json.loads(file.read())\n return Config(**data)\n\n def __set_driver(self):\n if not self.is_test:\n self.driver = webdriver.Remote(\n command_executor='http://127.0.0.1:4444/wd/hub',\n desired_capabilities=DesiredCapabilities.CHROME)\n else:\n is_find = False\n for driver in self.config.drivers:\n try:\n self.driver = webdriver.Chrome(f'{self.config.base_path}/drivers/{driver}')\n is_find = True\n break\n except WebDriverException:\n continue\n\n assert is_find, 'Driver not found!'\n","sub_path":"core/BaseCore.py","file_name":"BaseCore.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"313007343","text":"import struct\nfrom classes import hardware\nfrom classes import common\n\nclass NRFCommands():\n\tdef __init__(self):\n\t\tself.OPCODE_RX_DATA\t\t\t= 100\n\t\tself.OPCODE_TX_DATA\t\t\t= 101\n\t\tself.OPCODE_SET_ADDRESS \t= 102\n\t\tself.OPCODE_GET_ADDRESS \t= 103\n\t\tself.OPCODE_GET_NODEMAP \t= 104\n\t\tself.OPCODE_ADD_NODE_INDEX \t= 105\n\t\tself.OPCODE_DEL_NODE_INDEX \t= 106\n\t\tself.OPCODE_GET_NODE_INFO \t= 107\n\t\tself.OPCODE_GET_NODES_MAP \t= 108\n\t\tself.OPCODE_GET_NODES_LIST\t= 109\n\t\tself.OPCODE_SET_NODES_DATA\t= 110\n\t\tself.OPCODE_GET_NODES_DATA\t= 111\n\t\n\t'''\n\t{UART PACKET}\n\t-------------\n\t[MAGIC_NUMBER] \t\t(2 Bytes)\n\t[Direction] \t\t(1 Byte)\n\t[Opcode]\t\t\t(1 Byte)\n\t[Content Length] \t(1 Byte)\n\t[Payload]\t\t\t(57 Bytes)\n\t[MAGIC_NUMBER] \t\t(2 Bytes)\n\n\t{NRF PACKET}\n\t------------\n\t[NodeID] \t\t\t(1 Byte)\n\t[Opcode] \t\t\t(1 Byte)\n\t[Size] \t\t\t\t(1 Byte)\n\t[Payload]\t\t\t(12 Bytes)\n\t[CRC] \t\t\t\t(1 Byte)\n\t'''\n\n\tdef SetNodeDataCommand(self, index, data):\n\t\treturn struct.pack(\"BBBBBIBB\", 0xDE, 0xAD, 0x1, self.OPCODE_SET_NODES_DATA, index, data, 0xAD, 0xDE)\n\t\n\tdef GetNodeDataCommand(self, index):\n\t\treturn struct.pack(\"BBBBBBB\", 0xDE, 0xAD, 0x1, self.OPCODE_GET_NODES_DATA, index, 0xAD, 0xDE)\n\t\n\tdef GetNodeListCommand(self):\n\t\treturn struct.pack(\"BBBBBB\", 0xDE, 0xAD, 0x1, self.OPCODE_GET_NODES_LIST, 0xAD, 0xDE)\n\t\n\tdef ReadRemoteCommand(self, node_id, msg):\n\t\ts_msg = ''.join(chr(x) for x in msg)\n\t\t# \t\t\t\t\t\t\t\t\t\t\t\t\t [MN] [DIR] [OP] [LEN] [ID] [OP] [LEN] [P] [MN]\n\t\treturn struct.pack(\"BBBBBB{0}sBB\".format(len(msg)), 0xDE, 0xAD, 0x1, self.OPCODE_RX_DATA, 1, node_id, s_msg.encode(), 0xAD, 0xDE)\n\t\n\tdef WriteRemoteCommand(self, node_id):\n\t\treturn struct.pack(\"BBBBBBBB\", 0xDE, 0xAD, 0x1, self.OPCODE_TX_DATA, 1, node_id, 0xAD, 0xDE)\n\n\tdef GetNodeMapCommand(self):\n\t\treturn struct.pack(\"BBBBBB\", 0xDE, 0xAD, 0x1, self.OPCODE_GET_NODEMAP, 0xAD, 0xDE)\n\t\n\tdef AddNodeIndexCommand(self, index):\n\t\treturn struct.pack(\"BBBBBBBB\", 0xDE, 0xAD, 0x1, self.OPCODE_ADD_NODE_INDEX, 1, index, 0xAD, 0xDE)\n\t\n\tdef DelNodeIndexCommand(self, index):\n\t\treturn struct.pack(\"BBBBBBBB\", 0xDE, 0xAD, 0x1, self.OPCODE_DEL_NODE_INDEX, 1, index, 0xAD, 0xDE)\n\n\tdef SetAddressCommand(self, address):\n\t\treturn struct.pack(\"BBBBBBBB\", 0xDE, 0xAD, 0x1, self.OPCODE_SET_ADDRESS, 1, address, 0xAD, 0xDE)\n\t\n\tdef GetAddressCommand(self):\n\t\treturn struct.pack(\"BBBBBB\", 0xDE, 0xAD, 0x1, self.OPCODE_GET_ADDRESS, 0xAD, 0xDE)\n\t\n\tdef GetNodeInfoCommand(self):\n\t\treturn struct.pack(\"BBBBBB\", 0xDE, 0xAD, 0x1, self.OPCODE_GET_NODE_INFO, 0xAD, 0xDE)\n\t\n\tdef GetNodesMapCommand(self):\n\t\treturn struct.pack(\"BBBBBB\", 0xDE, 0xAD, 0x1, self.OPCODE_GET_NODES_MAP, 0xAD, 0xDE)\n\nclass NRF(hardware.HardwareLayer):\n\tdef __init__(self):\n\t\thardware.HardwareLayer.__init__(self)\n\t\tself.Commands = NRFCommands()\n\n\t\tself.NodeTypeMap = {\n\t\t\t0x2: \"GATEWAY\",\n\t\t\t0x3: \"NODE\"\n\t\t}\n\t\n\tdef GetDeviceType(self, port):\n\t\tdev_type = self.HW.GetDeviceType(port)\n\t\treturn {\n\t\t\t'device_type': dev_type\n\t\t}\n\t\n\tdef GetDeviceAdditional(self, port):\n\t\tadditional = self.HW.GetDeviceAdditional(port)\n\t\tif (len(additional) > 1):\n\t\t\treturn {\n\t\t\t\t'type': self.NodeTypeMap[additional[0]],\n\t\t\t\t'index': additional[1]\n\t\t\t}\n\t\treturn None\n\t\n\tdef SetNodeAddress(self, port, address):\n\t\tpacket = self.Commands.SetAddressCommand(address)\n\t\tpacket = self.HW.Send(port, packet)\n\n\t\tif packet is None:\n\t\t\treturn None\n\n\t\tif len(packet) > 3:\n\t\t\tif packet[1] == self.Commands.OPCODE_SET_ADDRESS:\n\t\t\t\treturn {\n\t\t\t\t\t'index': packet[3]\n\t\t\t\t}\n\t\t\telse:\n\t\t\t\tprint(\"(ERROR) Return OPCODE is incorrect.\")\n\t\t\t\treturn None\n\t\telse:\n\t\t\tprint(\"(ERROR) Return packet is less then expected.\")\n\t\t\treturn None\n\t\n\tdef GetNodeAddress(self, port):\n\t\tpacket = self.Commands.GetAddressCommand()\n\t\tpacket = self.HW.Send(port, packet)\n\n\t\tif packet is None:\n\t\t\treturn None\n\t\t\n\t\tif len(packet) > 3:\n\t\t\tif packet[1] == self.Commands.OPCODE_GET_ADDRESS:\n\t\t\t\treturn {\n\t\t\t\t\t'index': packet[3]\n\t\t\t\t}\n\t\t\telse:\n\t\t\t\tprint(\"(ERROR) Return OPCODE is incorrect.\")\n\t\t\t\treturn None\n\t\telse:\n\t\t\tprint(\"(ERROR) Return packet is less then expected.\")\n\t\t\treturn None\n\t\n\tdef GetNodeInfo(self, port):\n\t\tpacket = self.Commands.GetNodeInfoCommand()\n\t\tinfo = self.HW.Send(port, packet)\n\n\t\tif info is None:\n\t\t\treturn None\n\t\t\n\t\tif len(info) > 3:\n\t\t\tif info[1] == self.Commands.OPCODE_GET_NODE_INFO:\n\t\t\t\treturn {\n\t\t\t\t\t'info': info\n\t\t\t\t}\n\t\t\telse:\n\t\t\t\tprint(\"(ERROR) Return OPCODE is incorrect.\")\n\t\t\t\treturn None\n\t\telse:\n\t\t\tprint(\"(ERROR) Return packet is less then expected.\")\n\t\t\treturn None\n\t\n\tdef GetNodeList(self, port):\n\t\tpacket = self.Commands.GetNodeListCommand()\n\t\tpacket = self.HW.Send(port, packet)\n\n\t\tif packet is None:\n\t\t\treturn None\n\t\t\n\t\tif len(packet) > 0:\n\t\t\tif packet[1] == self.Commands.OPCODE_GET_NODES_LIST:\n\t\t\t\tinfo = {\n\t\t\t\t\t'list': []\n\t\t\t\t}\n\t\t\t\tdata = packet[3:]\n\t\t\t\tfor idx, item in enumerate(data[::2]):\n\t\t\t\t\tnode = data[idx*2:idx*2+2]\n\t\t\t\t\tinfo[\"list\"].append({\n\t\t\t\t\t\t\"device_id\": node[0],\n\t\t\t\t\t\t\"status\": node[1]\n\t\t\t\t\t})\n\t\t\t\treturn info\n\t\t\telse:\n\t\t\t\tprint(\"(ERROR) Return OPCODE is incorrect.\")\n\t\t\t\treturn None\n\t\telse:\n\t\t\tprint(\"(ERROR) Return packet is less then expected.\")\n\t\t\treturn None\n\t\n\tdef GetNodesMap(self, port):\n\t\tpacket = self.Commands.GetNodesMapCommand()\n\t\tmap = self.HW.Send(port, packet)\n\n\t\tif map is None:\n\t\t\treturn None\n\t\t\n\t\tif len(map) > 3:\n\t\t\tif map[1] == self.Commands.OPCODE_GET_NODES_MAP:\n\t\t\t\treturn {\n\t\t\t\t\t'info': map\n\t\t\t\t}\n\t\t\telse:\n\t\t\t\tprint(\"(ERROR) Return OPCODE is incorrect.\")\n\t\t\t\treturn None\n\t\telse:\n\t\t\tprint(\"(ERROR) Return packet is less then expected.\")\n\t\t\treturn None\n\t\n\tdef AddNodeIndex(self, port, index):\n\t\tpacket = self.Commands.AddNodeIndexCommand(index)\n\t\tpacket = self.HW.Send(port, packet)\n\n\t\tif packet is None:\n\t\t\treturn None\n\n\t\tif len(packet) > 3:\n\t\t\tif packet[1] == self.Commands.OPCODE_ADD_NODE_INDEX:\n\t\t\t\tupdated_index = packet[3]\n\t\t\t\tstatus = False\n\t\t\t\tif index == updated_index:\n\t\t\t\t\tstatus = True\n\t\t\t\treturn {\n\t\t\t\t\t'status': status,\n\t\t\t\t\t'info': packet\n\t\t\t\t}\n\t\t\telse:\n\t\t\t\tprint(\"(ERROR) Return OPCODE is incorrect.\")\n\t\t\t\treturn None\n\t\telse:\n\t\t\tprint(\"(ERROR) Return packet is less then expected.\")\n\t\t\treturn None\n\t\n\tdef DelNodeIndex(self, port, index):\n\t\tpacket = self.Commands.DelNodeIndexCommand(index)\n\t\tpacket = self.HW.Send(port, packet)\n\n\t\tif packet is None:\n\t\t\treturn None\n\n\t\tif len(packet) > 3:\n\t\t\tif packet[1] == self.Commands.OPCODE_DEL_NODE_INDEX:\n\t\t\t\tupdated_index = packet[3]\n\t\t\t\tstatus = False\n\t\t\t\tif index == updated_index:\n\t\t\t\t\tstatus = True\n\t\t\t\treturn {\n\t\t\t\t\t'status': status,\n\t\t\t\t\t'device_id': updated_index\n\t\t\t\t}\n\t\t\telse:\n\t\t\t\tprint(\"(ERROR) Return OPCODE is incorrect.\")\n\t\t\t\treturn None\n\t\telse:\n\t\t\tprint(\"(ERROR) Return packet is less then expected.\")\n\t\t\treturn None\n\t\n\tdef GetRemoteNodeInfo(self, port, index):\n\t\tpayload = [self.Commands.OPCODE_GET_NODE_INFO, 0]\n\t\tpacket = self.Commands.ReadRemoteCommand(index, payload)\n\t\tpacket = self.HW.Send(port, packet)\n\n\t\tif packet is None:\n\t\t\treturn None\n\t\t\n\t\tif len(packet) > 0:\n\t\t\tif packet[1] == self.Commands.OPCODE_RX_DATA:\n\t\t\t\tif len(packet) > 18:\n\t\t\t\t\tif packet[4] == self.Commands.OPCODE_GET_NODE_INFO:\n\t\t\t\t\t\tnrf_packet = packet[3:]\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\t'packet': nrf_packet\n\t\t\t\t\t\t}\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"(ERROR) Incorrect answer. {0}\".format(packet))\n\t\t\t\t\t\treturn None\n\t\t\t\telse:\n\t\t\t\t\tprint(\"(ERROR) Packet length incorrect.\")\n\t\t\t\t\treturn None\n\t\t\telse:\n\t\t\t\tprint(\"(ERROR) Return OPCODE is incorrect.\")\n\t\t\t\treturn None\n\t\telse:\n\t\t\tprint(\"(ERROR) Return packet is less then expected.\")\n\t\t\treturn None\n\t\n\tdef GetRemoteNodeData(self, port, node_id, sensor_index):\n\t\tpayload = [self.Commands.OPCODE_GET_NODES_DATA, 1, sensor_index]\n\t\tpacket = self.Commands.ReadRemoteCommand(node_id, payload)\n\t\tpacket = self.HW.Send(port, packet)\n\n\t\tif packet is None:\n\t\t\treturn None\n\t\t\n\t\tif len(packet) > 0:\n\t\t\tif packet[1] == self.Commands.OPCODE_RX_DATA:\n\t\t\t\tif packet[4] == self.Commands.OPCODE_GET_NODES_DATA:\n\t\t\t\t\tnrf_packet = packet[3:]\n\t\t\t\t\treturn {\n\t\t\t\t\t\t'packet': nrf_packet\n\t\t\t\t\t}\n\t\t\t\telse:\n\t\t\t\t\tprint(\"(ERROR) Incorrect answer. {0}\".format(packet))\n\t\t\t\t\treturn None\n\t\t\telse:\n\t\t\t\tprint(\"(ERROR) Return OPCODE is incorrect.\")\n\t\t\t\treturn None\n\t\telse:\n\t\t\tprint(\"(ERROR) Return packet is less then expected.\")\n\t\t\treturn None\n\t\n\tdef SetRemoteNodeData(self, port, node_id, sensor_index, sensor_value):\n\t\tarr_value = common.IntToBytes(sensor_value, 4)\n\t\t# arr_value = value.to_bytes(4, 'big')\n\t\tpayload = [self.Commands.OPCODE_SET_NODES_DATA, 5, sensor_index] + arr_value\n\t\tpacket = self.Commands.ReadRemoteCommand(node_id, payload)\n\t\tpacket = self.HW.Send(port, packet)\n\n\t\tif packet is None:\n\t\t\treturn None\n\n\t\tif packet is not None and len(packet) > 0:\n\t\t\tif packet[1] == self.Commands.OPCODE_RX_DATA:\n\t\t\t\tif packet[4] == self.Commands.OPCODE_SET_NODES_DATA:\n\t\t\t\t\tnrf_packet = packet[3:]\n\t\t\t\t\treturn {\n\t\t\t\t\t\t'packet': nrf_packet\n\t\t\t\t\t}\n\t\t\t\telse:\n\t\t\t\t\tprint(\"(ERROR) Incorrect answer. {0}\".format(packet))\n\t\t\t\t\treturn None\n\t\t\telse:\n\t\t\t\tprint(\"(ERROR) Return OPCODE is incorrect.\")\n\t\t\t\treturn None\n\t\telse:\n\t\t\tprint(\"(ERROR) Return packet is less then expected.\")\n\t\t\treturn None\n\t\n\tdef SetRemoteNodeAddress(self, port, node_id, address):\n\t\t# OPCODE : LEN : PAYLOAD\n\t\tpayload = [self.Commands.OPCODE_SET_ADDRESS, 1, address]\n\t\tpacket = self.Commands.ReadRemoteCommand(node_id, payload)\n\t\tpacket = self.HW.Send(port, packet)\n\n\t\tif packet is None:\n\t\t\treturn None\n\t\t\n\t\tif packet is not None and len(packet) > 0:\n\t\t\tif packet[1] == self.Commands.OPCODE_RX_DATA:\n\t\t\t\tif packet[4] == self.Commands.OPCODE_SET_ADDRESS:\n\t\t\t\t\treturn {\n\t\t\t\t\t\t'packet': packet\n\t\t\t\t\t}\n\t\t\t\telse:\n\t\t\t\t\tprint(\"(ERROR) Incorrect answer. {0}\".format(packet))\n\t\t\t\t\treturn None\n\t\t\telse:\n\t\t\t\tprint(\"(ERROR) Return OPCODE is incorrect.\")\n\t\t\t\treturn None\n\t\telse:\n\t\t\tprint(\"(ERROR) Return packet is less then expected.\")\n\t\t\treturn None\n\t\n\tdef GetRemoteNodeAddress(self, port, node_id):\n\t\tpayload = [self.Commands.OPCODE_GET_ADDRESS]\n\t\tpacket = self.Commands.ReadRemoteCommand(node_id, payload)\n\t\tpacket = self.HW.Send(port, packet)\n\n\t\tif packet is None:\n\t\t\treturn None\n\n\t\tif packet is not None and len(packet) > 0:\n\t\t\tif packet[1] == self.Commands.OPCODE_RX_DATA:\n\t\t\t\tif packet[4] == self.Commands.OPCODE_GET_ADDRESS:\n\t\t\t\t\treturn {\n\t\t\t\t\t\t'packet': packet\n\t\t\t\t\t}\n\t\t\t\telse:\n\t\t\t\t\tprint(\"(ERROR) Incorrect answer. {0}\".format(packet))\n\t\t\t\t\treturn None\n\t\t\telse:\n\t\t\t\tprint(\"(ERROR) Return OPCODE is incorrect.\")\n\t\t\t\treturn None\n\t\telse:\n\t\t\tprint(\"(ERROR) Return packet is less then expected.\")\n\t\t\treturn None\n","sub_path":"2020/poc/python/classes/nrf.py","file_name":"nrf.py","file_ext":"py","file_size_in_byte":10310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"630370655","text":"# coding=utf-8\n# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport argparse\nimport platform\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\nfrom .data_loader import DataGenerator_read_data\nfrom .models import Actor\nfrom .rewards import get_Reward\nfrom .helpers.tf_utils import set_seed\nfrom .helpers.lambda_utils import BIC_lambdas\nfrom .helpers.analyze_utils import convert_graph_int_to_adj_mat, \\\n graph_prunned_by_coef, graph_prunned_by_coef_2nd\n\nfrom castle.common import BaseLearner, Tensor\nfrom castle.metrics import MetricsDAG\n\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')\n\n\nclass RL(BaseLearner):\n \"\"\"\n RL Algorithm.\n A classic causal discovery algorithm based on conditional independence tests.\n\n Parameters\n ----------\n encoder_type: str\n type of encoder used\n hidden_dim: int\n actor LSTM num_neurons\n num_heads: int\n actor input embedding\n num_stacks: int\n actor LSTM num_neurons\n residual: bool\n whether to use residual for gat encoder\n decoder_type: str\n type of decoder used\n decoder_activation: str\n activation for decoder\n decoder_hidden_dim: int\n hidden dimension for decoder\n use_bias: bool\n Whether to add bias term when calculating decoder logits\n use_bias_constant: bool\n Whether to add bias term as CONSTANT when calculating decoder logits\n bias_initial_value: float\n Initial value for bias term when calculating decoder logits\n batch_size: int\n batch size for training\n input_dimension: int\n dimension of reshaped vector\n normalize: bool\n whether the inputdata shall be normalized\n transpose: bool\n whether the true graph needs transposed\n score_type: str\n score functions\n reg_type: str\n regressor type (in combination wth score_type)\n lambda_iter_num: int\n how often to update lambdas\n lambda_flag_default: bool\n with set lambda parameters; true with default strategy and ignore input bounds\n score_bd_tight: bool\n if bound is tight, then simply use a fixed value, rather than the adaptive one\n lambda1_update: float\n increasing additive lambda1\n lambda2_update: float\n increasing multiplying lambda2\n score_lower: float\n lower bound on lambda1\n score_upper: float\n upper bound on lambda1\n lambda2_lower: float\n lower bound on lambda2\n lambda2_upper: float\n upper bound on lambda2\n seed: int\n seed\n nb_epoch: int\n nb epoch\n lr1_start: float\n actor learning rate\n lr1_decay_step: int\n lr1 decay step\n lr1_decay_rate: float\n lr1 decay rate\n alpha: float\n update factor moving average baseline\n init_baseline: float\n initial baseline - REINFORCE\n temperature: float\n pointer_net initial temperature\n C: float\n pointer_net tan clipping\n l1_graph_reg: float\n L1 graph regularization to encourage sparsity\n inference_mode: bool\n switch to inference mode when model is trained\n verbose: bool\n print detailed logging or not\n\n Attributes\n ----------\n causal_matrix : numpy.ndarray\n Learned causal structure matrix\n\n References\n ----------\n https://arxiv.org/abs/1906.04477\n\n Examples\n --------\n >>> from castle.algorithms import RL\n >>> from castle.datasets import load_dataset\n >>> from castle.common import GraphDAG\n >>> from castle.metrics import MetricsDAG\n >>> true_dag, X = load_dataset(name='iid_test')\n >>> n = RL(lambda_flag_default=True)\n >>> n.learn(X, dag=true_dag)\n >>> GraphDAG(n.causal_matrix, true_dag)\n >>> met = MetricsDAG(n.causal_matrix, true_dag)\n >>> print(met.metrics)\n \"\"\"\n \n def __init__(self, encoder_type='TransformerEncoder', \n hidden_dim=64, \n num_heads=16, \n num_stacks=6, \n residual=False, \n decoder_type='SingleLayerDecoder', \n decoder_activation='tanh', \n decoder_hidden_dim=16, \n use_bias=False, \n use_bias_constant=False, \n bias_initial_value=False, \n batch_size=64, \n input_dimension=64, \n normalize=False, \n transpose=False, \n score_type='BIC', \n reg_type='LR', \n lambda_iter_num=1000, \n lambda_flag_default=False, \n score_bd_tight=False, \n lambda1_update=1.0, \n lambda2_update=10, \n score_lower=0.0, \n score_upper=0.0, \n lambda2_lower=-1.0, \n lambda2_upper=-1.0, \n seed=8, \n nb_epoch=20000, \n lr1_start=0.001,\n lr1_decay_step=5000, \n lr1_decay_rate=0.96, \n alpha=0.99, \n init_baseline=-1.0, \n temperature=3.0, \n C=10.0, \n l1_graph_reg=0.0, \n inference_mode=True, \n verbose=False):\n\n super().__init__()\n\n parser = argparse.ArgumentParser(description='Configuration')\n self.config = parser.parse_args(args=[])\n self.config.encoder_type = encoder_type\n self.config.hidden_dim = hidden_dim\n self.config.num_heads = num_heads\n self.config.num_stacks = num_stacks\n self.config.residual = residual\n self.config.decoder_type = decoder_type\n self.config.decoder_activation = decoder_activation\n self.config.decoder_hidden_dim = decoder_hidden_dim\n self.config.use_bias = use_bias\n self.config.use_bias_constant = use_bias_constant\n self.config.bias_initial_value = bias_initial_value\n self.config.batch_size = batch_size\n self.config.input_dimension = input_dimension\n self.config.normalize = normalize\n self.config.transpose = transpose\n self.config.score_type = score_type\n self.config.reg_type = reg_type\n self.config.lambda_iter_num = lambda_iter_num\n self.config.lambda_flag_default = lambda_flag_default\n self.config.score_bd_tight = score_bd_tight\n self.config.lambda1_update = lambda1_update\n self.config.lambda2_update = lambda2_update\n self.config.score_lower = score_lower\n self.config.score_upper = score_upper\n self.config.lambda2_lower = lambda2_lower\n self.config.lambda2_upper = lambda2_upper\n self.config.seed = seed\n self.config.nb_epoch = nb_epoch\n self.config.lr1_start = lr1_start\n self.config.lr1_decay_step = lr1_decay_step\n self.config.lr1_decay_rate = lr1_decay_rate\n self.config.alpha = alpha\n self.config.init_baseline = init_baseline\n self.config.temperature = temperature\n self.config.C = C\n self.config.l1_graph_reg = l1_graph_reg\n self.config.inference_mode = inference_mode\n self.config.verbose = verbose\n\n def learn(self, data, dag=None):\n \"\"\"\n Set up and run the RL algorithm.\n\n Parameters\n ----------\n data: castle.Tensor or numpy.ndarray\n The castle.Tensor or numpy.ndarray format data you want to learn.\n \"\"\"\n config = self.config\n if dag is not None:\n config.dag = dag\n\n if isinstance(data, np.ndarray):\n X = data\n elif isinstance(data, Tensor):\n X = data.data\n else:\n raise TypeError('The type of data must be '\n 'Tensor or numpy.ndarray, but got {}'\n .format(type(data)))\n \n config.data_size = X.shape[0]\n config.max_length = X.shape[1]\n\n causal_matrix = self._rl(X, config)\n self.causal_matrix = causal_matrix\n\n def _rl(self, X, config):\n # Reproducibility\n set_seed(config.seed)\n\n logging.info('Python version is {}'.format(platform.python_version()))\n\n # input data\n if hasattr(config, 'dag'):\n training_set = DataGenerator_read_data(\n X, config.dag, config.normalize, config.transpose)\n else:\n training_set = DataGenerator_read_data(\n X, None, config.normalize, config.transpose)\n\n # set penalty weights\n score_type = config.score_type\n reg_type = config.reg_type\n\n if config.lambda_flag_default: \n sl, su, strue = BIC_lambdas(training_set.inputdata, None, None, None, reg_type, score_type)\n lambda1 = 0\n lambda1_upper = 5\n lambda1_update_add = 1\n lambda2 = 1/(10**(np.round(config.max_length/3)))\n lambda2_upper = 0.01\n lambda2_update_mul = 10\n lambda_iter_num = config.lambda_iter_num\n\n # test initialized score\n logging.info('Original sl: {}, su: {}, strue: {}'.format(sl, su, strue))\n logging.info('Transfomed sl: {}, su: {}, lambda2: {}, true: {}'.format(sl, su, lambda2,\n (strue-sl)/(su-sl)*lambda1_upper)) \n else:\n # test choices for the case with mannualy provided bounds\n # not fully tested\n sl = config.score_lower\n su = config.score_upper\n if config.score_bd_tight:\n lambda1 = 2\n lambda1_upper = 2\n else:\n lambda1 = 0\n lambda1_upper = 5\n lambda1_update_add = 1\n lambda2 = 1/(10**(np.round(config.max_length/3)))\n lambda2_upper = 0.01\n lambda2_update_mul = config.lambda2_update\n lambda_iter_num = config.lambda_iter_num\n\n # actor\n actor = Actor(config)\n callreward = get_Reward(actor.batch_size, config.max_length, \n actor.input_dimension, training_set.inputdata,\n sl, su, lambda1_upper, score_type, reg_type, \n config.l1_graph_reg, False)\n logging.info('Finished creating training dataset, actor model and reward class')\n\n logging.info('Starting session...')\n sess_config = tf.ConfigProto(log_device_placement=False)\n sess_config.gpu_options.allow_growth = True\n\n with tf.Session(config=sess_config) as sess:\n # Run initialize op\n sess.run(tf.global_variables_initializer())\n\n # Test tensor shape\n logging.info('Shape of actor.input: {}'.format(sess.run(tf.shape(actor.input_))))\n\n # Initialize useful variables\n rewards_avg_baseline = []\n rewards_batches = []\n reward_max_per_batch = []\n \n lambda1s = []\n lambda2s = []\n \n graphss = []\n probsss = []\n max_rewards = []\n max_reward = float('-inf')\n max_reward_score_cyc = (lambda1_upper+1, 0)\n\n logging.info('Starting training.')\n \n for i in (range(1, config.nb_epoch + 1)):\n\n if config.verbose:\n logging.info('Start training for {}-th epoch'.format(i))\n\n input_batch = training_set.train_batch(actor.batch_size, actor.max_length, actor.input_dimension)\n graphs_feed = sess.run(actor.graphs, feed_dict={actor.input_: input_batch})\n reward_feed = callreward.cal_rewards(graphs_feed, lambda1, lambda2)\n\n # max reward, max reward per batch\n max_reward = -callreward.update_scores([max_reward_score_cyc], lambda1, lambda2)[0]\n max_reward_batch = float('inf')\n max_reward_batch_score_cyc = (0, 0)\n\n for reward_, score_, cyc_ in reward_feed:\n if reward_ < max_reward_batch:\n max_reward_batch = reward_\n max_reward_batch_score_cyc = (score_, cyc_)\n \n max_reward_batch = -max_reward_batch\n\n if max_reward < max_reward_batch:\n max_reward = max_reward_batch\n max_reward_score_cyc = max_reward_batch_score_cyc\n\n # for average reward per batch\n reward_batch_score_cyc = np.mean(reward_feed[:,1:], axis=0)\n\n if config.verbose:\n logging.info('Finish calculating reward for current batch of graph')\n\n # Get feed dict\n feed = {actor.input_: input_batch, actor.reward_: -reward_feed[:,0], actor.graphs_:graphs_feed}\n\n summary, base_op, score_test, probs, graph_batch, reward_batch, \\\n reward_avg_baseline, train_step1, train_step2 = sess.run( \\\n [actor.merged, actor.base_op, actor.test_scores, \\\n actor.log_softmax, actor.graph_batch, actor.reward_batch, \\\n actor.avg_baseline, actor.train_step1, actor.train_step2], \\\n feed_dict=feed)\n\n if config.verbose:\n logging.info('Finish updating actor and critic network using reward calculated')\n \n lambda1s.append(lambda1)\n lambda2s.append(lambda2)\n\n rewards_avg_baseline.append(reward_avg_baseline)\n rewards_batches.append(reward_batch_score_cyc)\n reward_max_per_batch.append(max_reward_batch_score_cyc)\n\n graphss.append(graph_batch)\n probsss.append(probs)\n max_rewards.append(max_reward_score_cyc)\n\n # logging\n if i == 1 or i % 500 == 0:\n logging.info('[iter {}] reward_batch: {:.4}, max_reward: {:.4}, max_reward_batch: {:.4}'.format(i,\n reward_batch, max_reward, max_reward_batch))\n\n # update lambda1, lamda2\n if i == 1 or i % lambda_iter_num == 0:\n ls_kv = callreward.update_all_scores(lambda1, lambda2)\n\n graph_int, score_min, cyc_min = np.int32(ls_kv[0][0]), ls_kv[0][1][1], ls_kv[0][1][-1]\n\n if cyc_min < 1e-5:\n lambda1_upper = score_min\n lambda1 = min(lambda1+lambda1_update_add, lambda1_upper)\n lambda2 = min(lambda2*lambda2_update_mul, lambda2_upper)\n logging.info('[iter {}] lambda1 {:.4}, upper {:.4}, lambda2 {:.4}, upper {:.4}, score_min {:.4}, cyc_min {:.4}'.format(i,\n lambda1*1.0, lambda1_upper*1.0, lambda2*1.0, lambda2_upper*1.0, score_min*1.0, cyc_min*1.0))\n\n graph_batch = convert_graph_int_to_adj_mat(graph_int)\n\n if reg_type == 'LR':\n graph_batch_pruned = np.array(graph_prunned_by_coef(graph_batch, training_set.inputdata))\n elif reg_type == 'QR':\n graph_batch_pruned = np.array(graph_prunned_by_coef_2nd(graph_batch, training_set.inputdata))\n\n if hasattr(config, 'dag'):\n met = MetricsDAG(graph_batch.T, training_set.true_graph)\n met2 = MetricsDAG(graph_batch_pruned.T, training_set.true_graph)\n acc_est = met.metrics\n acc_est2 = met2.metrics\n\n fdr, tpr, fpr, shd, nnz = \\\n acc_est['fdr'], acc_est['tpr'], acc_est['fpr'], \\\n acc_est['shd'], acc_est['nnz']\n fdr2, tpr2, fpr2, shd2, nnz2 = \\\n acc_est2['fdr'], acc_est2['tpr'], acc_est2['fpr'], \\\n acc_est2['shd'], acc_est2['nnz']\n \n logging.info('before pruning: fdr {}, tpr {}, fpr {}, shd {}, nnz {}'.format(fdr, tpr, fpr, shd, nnz))\n logging.info('after pruning: fdr {}, tpr {}, fpr {}, shd {}, nnz {}'.format(fdr2, tpr2, fpr2, shd2, nnz2))\n\n plt.figure(1)\n plt.plot(rewards_batches, label='reward per batch')\n plt.plot(max_rewards, label='max reward')\n plt.legend()\n plt.savefig('reward_batch_average.png')\n plt.close()\n \n logging.info('Training COMPLETED !')\n\n return graph_batch_pruned.T\n","sub_path":"gcastle/castle/algorithms/gradient/rl/rl.py","file_name":"rl.py","file_ext":"py","file_size_in_byte":17224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"514172145","text":"# -*- coding: utf-8 -*-\nfrom django.shortcuts import render\n\nfrom .models import UserMessage\nimport re\n# Create your views here.\n\ndef getform(request):\n # return render(request,'message_form.html')\n\n #UserMessage默认的数据管理器是objects\n #方法all()将所有数据返回成一个queryset类型\n\n full_path = request.get_full_path()\n r = re.search('\\?(\\w)=(\\d{4})&(\\w)=(\\d{4})&(\\w{2})=(\\d)&(\\w)=(\\d)&(\\w{2})=(\\d{7})',full_path).groups()\n sn = ''\n for i in r:\n sn += i\n sn = sn.upper()\n # print(sn)\n message = UserMessage.objects.get(serial_num=sn)\n return render(request,'message_form.html',{'my_message': message})","sub_path":"apps/message/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"535041892","text":"# general imports\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model\nfrom pathlib import Path\n\n# DragonPHY imports\nfrom dragonphy import *\n\nTHIS_DIR = Path(__file__).parent.resolve()\nBUILD_DIR = THIS_DIR / 'build'\n\ndef test_sim(dump_waveforms):\n deps = get_deps_cpu_sim(impl_file=THIS_DIR / 'test.sv')\n print(deps)\n\n def qwrap(s):\n return f'\"{s}\"'\n\n defines = {\n 'TI_ADC_TXT': qwrap(BUILD_DIR / 'ti_adc.txt'),\n 'RX_INPUT_TXT': qwrap(BUILD_DIR / 'rx_input.txt'),\n 'WIDTH_TXT': qwrap(BUILD_DIR / 'width.txt')\n }\n\n DragonTester(\n ext_srcs=deps,\n directory=BUILD_DIR,\n defines=defines,\n dump_waveforms=dump_waveforms\n ).run()\n\n x = np.loadtxt(BUILD_DIR / 'rx_input.txt', dtype=float)\n\n y = np.loadtxt(BUILD_DIR / 'ti_adc.txt', dtype=int, delimiter=',')\n y = y.flatten()\n\n widths = np.loadtxt(BUILD_DIR / 'width.txt', dtype=float, delimiter=',')\n widths = widths.flatten()\n\n # make sure that length of y is an integer multiple of length of x\n assert len(y) % len(x) == 0, \\\n 'Number of ADC codes must be an integer multiple of the number of input samples.'\n\n # repeat input as necessary\n num_repeat = len(y) // len(x)\n x = np.repeat(x, num_repeat)\n\n assert len(x) == len(y), \\\n 'Lengths of x and y should match at this point.'\n\n plot_data(x, y, widths)\n check_data(x, y)\n\ndef plot_data(x, y, widths):\n plt.plot(x, y, '*')\n plt.xlabel('Differential input voltage')\n plt.ylabel('ADC Code')\n plt.savefig(BUILD_DIR / 'dc.eps')\n plt.cla()\n plt.clf()\n\n plt.plot(x, widths*1e12, '*')\n plt.xlabel('Differential input voltage')\n plt.ylabel('PFD Out Width (ps)')\n plt.savefig(BUILD_DIR / 'widths.eps')\n plt.cla()\n plt.clf()\n\ndef check_data(x, y, inl_limit=5, offset_limit=2.5, gain_bnds=(240, 300)):\n # compute linear regression\n regr = linear_model.LinearRegression()\n regr.fit(x[:, np.newaxis], y)\n\n # INL\n y_fit = regr.predict(x[:, np.newaxis])\n inl = np.max(np.abs(y - y_fit))\n assert inl <= inl_limit, f'INL out of spec: {inl}.'\n print(f'INL OK: {inl}')\n\n # offset\n offset = regr.intercept_\n assert -offset_limit <= offset <= offset_limit, f'Offset out of spec: {offset}.'\n print(f'Offset OK: {offset}')\n\n # gain\n gain = regr.coef_[0]\n assert min(gain_bnds) <= gain <= max(gain_bnds), f'Gain out of spec: {gain} LSB/Volt.'\n print(f'Gain OK: {gain} LSB/Volt.')\n","sub_path":"tests/cpu_system_tests/dc/test_dc.py","file_name":"test_dc.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"566171239","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n#\n# Copyright 2010 OpenStack LLC\n# Copyright 2012 University Of Minho\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport errno\nimport os\nimport shutil\n\nfrom nova.compute import instance_types\nfrom nova import context\nfrom nova import db\nfrom nova import exception\nfrom nova import flags\nfrom nova.openstack.common import importutils\nfrom nova.openstack.common import log as logging\nfrom nova import test\nfrom nova import utils\nfrom nova.virt.gpu import driver as gpulibvirt_driver\nfrom nova.virt.gpu import utils as gpu_utils\n\nfrom nova.virt.libvirt import driver as libvirt_driver\n\ntry:\n import libvirt\nexcept ImportError:\n import nova.tests.fakelibvirt as libvirt\nlibvirt_driver.libvirt = libvirt\n\n\nFLAGS = flags.FLAGS\nLOG = logging.getLogger(__name__)\n\n\nCOMMON_FLAGS = dict(\n \n instance_type_extra_specs=['cpu_arch:x86_64',\n 'gpus:1', 'gpu_arch:fermi', \n 'hypervisor_type:LXC'],\n libvirt_type='lxc',\n dev_cgroups_path='/test/cgroup'\n)\n\n\nclass GPULibvirtDriverTestCase(test.TestCase):\n \"\"\"Test for nova.virt.gpu.gpulibvirt_driver.LibvirtDriver.\"\"\"\n def setUp(self):\n super(GPULibvirtDriverTestCase, self).setUp()\n\n self.flags(**COMMON_FLAGS)\n self.flags(fake_call=True)\n self.user_id = 'fake'\n self.project_id = 'fake'\n self.context = context.get_admin_context()\n self.gpulibvirtconnection = gpulibvirt_driver.GPULibvirtDriver(read_only=True)\n self.root_fs = './test-gpu'\n self.cgroup_path = self.root_fs + '/cgroup/fake'\n self.etc_path = self.root_fs + '/etc'\n flavor_id = instance_types.get_instance_type_by_name('m1.small')\\\n ['flavorid']\n extra_specs = {}\n extra_specs['cpu_arch'] = 's== x86_64'\n extra_specs['gpus'] = '= 1'\n extra_specs['gpu_arch'] = 's== fermi'\n extra_specs['hypervisor_type'] = 's== LXC'\n\n db.instance_type_extra_specs_update_or_create(\n context.get_admin_context(), flavor_id,extra_specs)\n\n def tearDown(self):\n super(GPULibvirtDriverTestCase, self).tearDown()\n \n inst_meta = {'gpus': 1} \n test_instance = {'memory_kb': '1024000',\n 'basepath': '/some/path',\n 'bridge_name': 'br100',\n 'vcpus': 2,\n 'name' : 'fake',\n 'project_id': 'fake',\n 'bridge': 'br101',\n 'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',\n 'root_gb': 10,\n 'ephemeral_gb': 20,\n 'metadata': inst_meta,\n 'instance_type_id': '5'} # m1.small\n\n def testInitGPU(self):\n extra_specs = gpu_utils.get_instance_type_extra_specs_capabilities()\n init_gpus = extra_specs['gpus']\n self.assertEquals(1, int(init_gpus))\n self.assertEquals(1, gpu_utils.get_gpu_total())\n\n def testAssignDeassignGPU(self):\n if os.path.isdir(self.root_fs):\n shutil.rmtree(self.root_fs)\n os.makedirs(self.cgroup_path)\n os.makedirs(self.etc_path)\n gpu_utils.assign_gpus(self.context, self.test_instance, \n self.root_fs)\n self.assertEquals(0, gpu_utils.get_gpu_total())\n \n gpu_utils.deassign_gpus(self.test_instance)\n self.assertEquals(1, gpu_utils.get_gpu_total())\n shutil.rmtree(self.root_fs)\n \n def testOverAllocationGPU(self):\n if os.path.isdir(self.root_fs):\n shutil.rmtree(self.root_fs)\n os.makedirs(self.cgroup_path)\n os.makedirs(self.etc_path)\n gpu_utils.assign_gpus(self.context, self.test_instance,self.root_fs)\n try:\n gpu_utils.assign_gpus(self.context, self.test_instance, \n self.root_fs)\n except Exception as Exn:\n gpu_utils.deassign_gpus(self.test_instance)\n shutil.rmtree(self.root_fs)\n return\n shutil.rmtree(self.root_fs)\n assert false, \"Cannot detect over-allocation\"\n\n","sub_path":"nova/tests/test_gpu.py","file_name":"test_gpu.py","file_ext":"py","file_size_in_byte":4702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"510319157","text":"#! /usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\nimport logging\r\nimport os\r\nimport subprocess\r\n\r\nfrom glob import glob\r\nfrom shutil import rmtree\r\nfrom tempfile import mkdtemp\r\n\r\nfrom PyPDF2 import PdfFileReader\r\nfrom ebooklib import epub\r\n\r\nfrom . import COVER_SUFFIX\r\n\r\nCMD_PDF2HTML = ['tools/pdf2html/pdf2htmlEx.exe',\r\n '--split-pages', '1', '--printing', '0',\r\n '--tounicode', '0', '--process-outline', '0',\r\n '--embed-css', '0', '--embed-font', '0',\r\n '--embed-javascript', '1', '--embed-image', '0',\r\n '--css-filename', 'epub.css', '--bg-format', 'jpg',\r\n '--page-filename', 'chapter%02d.xhtml']\r\n\r\nPREV_TEMPLATE = ''''''\r\nNEXT_TEMPLATE = ''''''\r\n\r\nlogger = logging.getLogger('mkepub.pdfreader2')\r\n\r\n\r\ndef get_num_pages(filename):\r\n with open(filename, 'rb') as f:\r\n r = PdfFileReader(f, strict=False)\r\n return r.getNumPages()\r\n\r\n\r\nclass PdfReader:\r\n\r\n def __init__(self):\r\n self._filename = None\r\n self._workpath = []\r\n\r\n def is_support(self, ext):\r\n return ext in ('.pdf',)\r\n\r\n def get_template(self):\r\n return None\r\n\r\n def _get_content(self, styles, pages):\r\n path = os.path.dirname(__file__)\r\n filename = os.path.join(path, '..', 'templates', 'pdf_frame.html')\r\n with open(filename, 'r') as f:\r\n return f.read() \\\r\n .replace('%CSS_LINKS%', ''.join(styles)) \\\r\n .replace('%PAGES%', ''.join(pages))\r\n\r\n def get_cover(self):\r\n cover = os.path.join(self._filename[:-4] + COVER_SUFFIX)\r\n return cover if os.path.exists(cover) else None\r\n\r\n def open(self, filename):\r\n n = get_num_pages(filename)\r\n\r\n batch = 30\r\n logger.info('Total pages: %s', n)\r\n for i in range(1, n, batch):\r\n p = mkdtemp(prefix='mkepub_', suffix='_pdf')\r\n self._workpath.append(p)\r\n\r\n j = i + batch - 1\r\n logger.info('Convert pages from %d to %d', i, j)\r\n logger.info('Target path: %s', p)\r\n\r\n args = ['--dest-dir', p, '-f', str(i), '-l', str(j), filename]\r\n cmdlist = CMD_PDF2HTML + args\r\n\r\n logger.info('Run command: %s', ' '.join(cmdlist))\r\n p = subprocess.Popen(cmdlist)\r\n p.communicate()\r\n\r\n if p.returncode != 0:\r\n raise RuntimeError('转换失败,pdf2htmlEx 出错')\r\n logger.info('Convert page %d to %d OK', i, j)\r\n self._filename = filename\r\n\r\n def close(self):\r\n self._filename = None\r\n for p in self._workpath:\r\n rmtree(p)\r\n self._workpath = []\r\n\r\n def get_metadata(self):\r\n return {}\r\n\r\n def get_toc(self):\r\n return self._toc\r\n\r\n def images(self):\r\n for p in self._workpath:\r\n for filename in glob(os.path.join(p, '*.jpg')):\r\n name = os.path.basename(filename)\r\n with open(filename, 'rb') as f:\r\n yield epub.EpubItem(uid=name,\r\n file_name=\"Text/%s\" % name,\r\n media_type=\"images/jpg\",\r\n content=f.read())\r\n\r\n def stylesheets(self):\r\n n = 0\r\n for p in self._workpath:\r\n for filename in glob(os.path.join(p, '*.css')):\r\n name = str(n) + '/' + os.path.basename(filename)\r\n with open(filename, \"rb\") as f:\r\n yield epub.EpubItem(uid=name,\r\n file_name=\"Styles/%s\" % name,\r\n media_type=\"text/css\",\r\n content=f.read())\r\n n += 1\r\n\r\n def contents(self):\r\n if not self._workpath:\r\n return\r\n self._toc = []\r\n\r\n def _page_name(i):\r\n return \"pdf_frame%s.html\" % (str(i) if i else '')\r\n\r\n n = len(self._workpath)\r\n for i in range(n):\r\n p = self._workpath[i]\r\n for filename in glob(os.path.join(p, '*.html')):\r\n with open(filename, 'r') as f:\r\n content = f.read()\r\n m = 'link rel=\"stylesheet\" href=\"'\r\n s = '../Styles/%d/' % i\r\n content = content.replace(m, m+s)\r\n if i:\r\n m = ''\r\n s = PREV_TEMPLATE % _page_name(i-1)\r\n content = content.replace(m, m+s)\r\n if i < n - 1:\r\n m = '
\\n'\r\n s = NEXT_TEMPLATE % _page_name(i+1)\r\n content = content.replace(m, s+m)\r\n url = \"Text/%s\" % _page_name(i)\r\n page = epub.EpubItem(file_name=url, content=content)\r\n yield page\r\n\r\n for p in self._workpath:\r\n for filename in glob(os.path.join(p, 'chapter*.xhtml')):\r\n name = os.path.basename(filename)\r\n with open(filename, 'rb') as f:\r\n page = epub.EpubItem(file_name=\"Text/%s\" % name,\r\n content=f.read())\r\n yield page\r\n\r\n n = 0\r\n for p in self._workpath:\r\n prefix = 'Styles/' + str(n)\r\n n += 1\r\n for filename in glob(os.path.join(p, '*.woff')):\r\n name = os.path.basename(filename)\r\n with open(filename, 'rb') as f:\r\n page = epub.EpubItem(\r\n file_name=\"%s/%s\" % (prefix, name),\r\n content=f.read())\r\n yield page\r\n\r\n\r\ndef register_reader():\r\n return PdfReader()\r\n\r\n\r\nif __name__ == '__main__':\r\n r = PdfReader()\r\n","sub_path":"readers/reader_pdf2.py","file_name":"reader_pdf2.py","file_ext":"py","file_size_in_byte":7033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"510590414","text":"# standard library\nimport os\n# project\nimport config\n\n\ndef line_generator():\n first_line = True;\n with open(config.NEW_PEAK_FLOW_SOURCE) as handle:\n for line in handle:\n if first_line:\n first_line = False\n continue\n yield(line)\n\n\ndef get_comid(line):\n return line.split(',')[0]\n\n\ndef process(line):\n comid = get_comid(line)\n path = os.path.join(config.OBSERVED_FILTERED, comid + '.csv')\n print(comid)\n with open(path) as handle:\n lines = [ln for ln in handle]\n if line not in lines:\n lines.append(line)\n lines.sort()\n with open(path, 'w') as handle:\n for line in lines:\n handle.write(line)\n\n\ndef main():\n # os.makedirs(config.OBSERVED_NEW_PEAK, exist_ok=True)\n for line in line_generator():\n process(line)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"helper_scripts/peak_flow.py","file_name":"peak_flow.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"163602464","text":"\"\"\"\n8.\tПосчитать, сколько раз встречается определенная цифра в введенной\n последовательности чисел. Количество вводимых чисел и цифра,\n которую необходимо посчитать, задаются вводом с клавиатуры.\n\"\"\"\n\ndef finder(numbers, number, number_counter = 0):\n if numbers == 0:\n return f\"{number_counter}\"\n else:\n if numbers % 10 == number:\n number_counter += 1\n numbers = numbers // 10\n return finder(numbers, number, number_counter)\n\n\nNUMBERS = int(input(\"Введите составное число: \"))\nNUMBER = int(input(\"Какую цифру ищем: \"))\n\nprint(f'Мы ищем количество вхождений {NUMBER} в {NUMBERS}, давайте узнаем: ровно {finder(NUMBERS, NUMBER)} раза')","sub_path":"Lesson_2/8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"45575088","text":"#Запросите у пользователя значения выручки и издержек фирмы.\r\n#6. Определите, с каким финансовым результатом работает фирма\r\n#(при��ыль — выручка больше издержек, или убыток — издержки больше выручки).\r\n#Выведите соответствующее сообщение.\r\n#Если фирма отработала с прибылью, вычислите рентабельность выручки (соотношение прибыли к выручке).\r\n#Далее запросите численность сотрудников фирмы и определите прибыль фирмы в расчете на одного сотрудника.\r\n\r\nrevenue = int(input('Выручка: '))\r\ncost = int(input('Затраты: '))\r\nprofit = revenue - cost\r\nif profit > 0:\r\n print(f'Good news, your have profit {profit}')\r\n marg = profit / revenue*100\r\n workers_n = int(input('Численность Вашей фирмы?'))\r\n average_p = profit/workers_n\r\n print(f'Рентабельность деятельности {marg:2}%, прибыль на сотрудника: {average_p:2}')\r\nelse:\r\n print(f'Bad news, your have loss {profit}')\r\n","sub_path":"HW 5.py","file_name":"HW 5.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"629072190","text":"from django.urls import path\n\nfrom .views import index, ProductbyCategoryListView, ComputerFilterView, NoutFilterView, KonsoliFilterView, IgriFilterView, product_detail\n\napp_name = 'shop'\n\nurlpatterns = [\n path('filter_comp/', ComputerFilterView.as_view(), name = 'computers_filter'),\n path('filter_nout/', NoutFilterView.as_view(), name = 'nout_filter'),\n path('filter_konsoli/', KonsoliFilterView.as_view(), name = 'konsoli_filter'),\n path('filter_igri/', IgriFilterView.as_view(), name = 'igri_filter'),\n path('
/', ProductbyCategoryListView.as_view(), name = 'product_list_by_category'),\n path('//', product_detail, name = 'product_detail'),\n path('', index, name = 'index'),\n]\n","sub_path":"shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"643250991","text":"from kivy.config import Config\nfrom kivy.core.window import Window\nfrom kivy.graphics import Color\nfrom kivy.lang import Builder\nfrom kivy.properties import ObjectProperty, StringProperty, ListProperty, NumericProperty\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.image import Image\nfrom kivy.uix.recycleview import RecycleView\nfrom kivy.uix.screenmanager import Screen\nfrom kivy.clock import Clock\nfrom kivymd.uix.button import MDIconButton\nfrom kivymd.app import MDApp\nfrom kivymd.theming import ThemableBehavior\nfrom kivymd.uix.boxlayout import MDBoxLayout\nfrom kivymd.uix.expansionpanel import MDExpansionPanel, MDExpansionPanelOneLine\nfrom kivymd.uix.label import MDLabel\nfrom kivymd.uix.list import ILeftBodyTouch, IRightBodyTouch, OneLineAvatarIconListItem, MDList, IconLeftWidget\n\nfrom data import head, body, end\n\n# Dimensiones de pantalla portrait\nConfig.set('graphics','resizable',0)\nWindow.size = (360, 640)\n\nclass Container(IRightBodyTouch, MDBoxLayout):\n adaptive_width = True\n\nclass ContentNavigationDrawer(BoxLayout):\n pass\n\nclass NavigationItem(OneLineAvatarIconListItem):\n icon = StringProperty() \n icon2 = StringProperty()\n\nclass Content(MDBoxLayout):\n def __init__(self, item):\n super(Content, self).__init__() \n self.ids.rv.data = []\n\n # si no son cadenas vacias '', agrego a la data al recycleview\n if item['sub'] != '':\n for sub_obj in item['sub']: \n self.ids.rv.data.append(\n {\n \"viewclass\": \"NavigationItem\",\n \"icon\": sub_obj['icon'],\n \"text\": sub_obj['text'],\n \"icon2\": sub_obj['icon_button'],\n \"callback\": lambda x: x,\n }\n )\n\nclass DrawerList(ThemableBehavior, MDList):\n def set_color_item(self, instance_item):\n \"\"\"Called when tap on a menu item.\"\"\"\n\n # Set the color of the icon and text for the menu item.\n for item in self.children:\n if item.text_color == self.theme_cls.primary_color:\n item.text_color = self.theme_cls.text_color\n break\n instance_item.text_color = self.theme_cls.primary_color\n\n\"\"\" Clase para cuando no hay ninguna imagen, está clase en vez de heredar del widget IRightBodyTouch hereda de ILeftBody Touch.\n\n\"\"\"\n\nclass MDExpansionChevronRight(ILeftBodyTouch, MDIconButton):\n\n _angle = NumericProperty(0)\n\nclass MyExpansionPanel(MDExpansionPanel):\n \n def __init__(self, data, image, **kwargs):\n super(MyExpansionPanel, self).__init__(**kwargs)\n self.at_least_an_image=image\n\n \"\"\" si no hay ninguna imagen, borra el widget ImageLeftWidget de la componente MDExpansionPanel y agrega el widget MDExpansionChevronRight() a la izquierda.\n \n \"\"\"\n\n if not self.at_least_an_image:\n for child in self.panel_cls.children[0:1]:\n self.panel_cls.remove_widget(child)\n \n self.chevron = MDExpansionChevronRight()\n self.panel_cls.add_widget(self.chevron)\n \nclass NavigationDrawer(MDApp):\n \n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n \n def build(self):\n return Builder.load_file(\"myfile.kv\")\n\n def on_start(self):\n new_doby = list()\n\n \"\"\" Cambia todos las claves None por '' porque\n None is not allowed for MDExpansionPanel.icon \n\n \"\"\"\n at_least_an_image = False\n\n for obj in body:\n for key,val in obj.items():\n if val == None:\n obj[key] = ''\n if obj['icon'] != '': \n at_least_an_image = True\n\n new_doby.append(obj) \n\n for new_obj in new_doby:\n self.root.ids.content_drawer.ids.md_list.add_widget(\n MyExpansionPanel(\n image=at_least_an_image,\n data=new_obj,\n icon=new_obj[\"icon\"],\n content=Content(new_obj),\n panel_cls=MDExpansionPanelOneLine(\n text=\"[color=6258B1]\" + new_obj[\"text\"] + \"[/color]\",\n )\n )\n ) \n\nNavigationDrawer().run()","sub_path":"learning/tests/challenge_1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"398476300","text":"import sys\nfrom collections import defaultdict\nfrom dataclasses import asdict\nfrom typing import List, Optional\n\nimport graphene\nfrom django_countries.fields import Country\nfrom graphene import relay\n\nfrom ....attribute import models as attribute_models\nfrom ....core.permissions import (\n AuthorizationFilters,\n OrderPermissions,\n ProductPermissions,\n has_one_of_permissions,\n)\nfrom ....core.tracing import traced_resolver\nfrom ....core.utils import build_absolute_uri, get_currency_for_country\nfrom ....core.weight import convert_weight_to_default_weight_unit\nfrom ....product import models\nfrom ....product.models import ALL_PRODUCTS_PERMISSIONS\nfrom ....product.utils import calculate_revenue_for_variant\nfrom ....product.utils.availability import (\n get_product_availability,\n get_variant_availability,\n)\nfrom ....product.utils.variants import get_variant_selection_attributes\nfrom ....thumbnail.utils import get_image_or_proxy_url, get_thumbnail_size\nfrom ....warehouse.reservations import is_reservation_enabled\nfrom ...account import types as account_types\nfrom ...account.enums import CountryCodeEnum\nfrom ...attribute.filters import AttributeFilterInput\nfrom ...attribute.resolvers import resolve_attributes\nfrom ...attribute.types import (\n AssignedVariantAttribute,\n Attribute,\n AttributeCountableConnection,\n SelectedAttribute,\n)\nfrom ...channel import ChannelContext, ChannelQsContext\nfrom ...channel.dataloaders import ChannelBySlugLoader\nfrom ...channel.types import ChannelContextType, ChannelContextTypeWithMetadata\nfrom ...channel.utils import get_default_channel_slug_or_graphql_error\nfrom ...core.connection import (\n CountableConnection,\n create_connection_slice,\n filter_connection_queryset,\n)\nfrom ...core.descriptions import (\n ADDED_IN_31,\n DEPRECATED_IN_3X_FIELD,\n DEPRECATED_IN_3X_INPUT,\n PREVIEW_FEATURE,\n RICH_CONTENT,\n)\nfrom ...core.enums import ReportingPeriod\nfrom ...core.federation import federated_entity, resolve_federation_references\nfrom ...core.fields import (\n ConnectionField,\n FilterConnectionField,\n JSONString,\n PermissionsField,\n)\nfrom ...core.types import (\n Image,\n ModelObjectType,\n NonNullList,\n TaxedMoney,\n TaxedMoneyRange,\n TaxType,\n ThumbnailField,\n Weight,\n)\nfrom ...core.utils import from_global_id_or_error\nfrom ...discount.dataloaders import DiscountsByDateTimeLoader\nfrom ...meta.types import ObjectWithMetadata\nfrom ...order.dataloaders import (\n OrderByIdLoader,\n OrderLinesByVariantIdAndChannelIdLoader,\n)\nfrom ...plugins.dataloaders import load_plugin_manager\nfrom ...product.dataloaders.products import (\n AvailableProductVariantsByProductIdAndChannel,\n ProductVariantsByProductIdAndChannel,\n)\nfrom ...site.dataloaders import load_site\nfrom ...translations.fields import TranslationField\nfrom ...translations.types import (\n CategoryTranslation,\n CollectionTranslation,\n ProductTranslation,\n ProductVariantTranslation,\n)\nfrom ...utils import get_user_or_app_from_context\nfrom ...utils.filters import reporting_period_to_date\nfrom ...warehouse.dataloaders import (\n AvailableQuantityByProductVariantIdCountryCodeAndChannelSlugLoader,\n PreorderQuantityReservedByVariantChannelListingIdLoader,\n StocksWithAvailableQuantityByProductVariantIdCountryCodeAndChannelLoader,\n)\nfrom ...warehouse.types import Stock\nfrom ..dataloaders import (\n CategoryByIdLoader,\n CategoryChildrenByCategoryIdLoader,\n CollectionChannelListingByCollectionIdAndChannelSlugLoader,\n CollectionChannelListingByCollectionIdLoader,\n CollectionsByProductIdLoader,\n ImagesByProductIdLoader,\n ImagesByProductVariantIdLoader,\n MediaByProductIdLoader,\n MediaByProductVariantIdLoader,\n ProductAttributesByProductTypeIdLoader,\n ProductByIdLoader,\n ProductChannelListingByProductIdAndChannelSlugLoader,\n ProductChannelListingByProductIdLoader,\n ProductTypeByIdLoader,\n ProductVariantByIdLoader,\n ProductVariantsByProductIdLoader,\n SelectedAttributesByProductIdLoader,\n SelectedAttributesByProductVariantIdLoader,\n ThumbnailByCategoryIdSizeAndFormatLoader,\n ThumbnailByCollectionIdSizeAndFormatLoader,\n ThumbnailByProductMediaIdSizeAndFormatLoader,\n VariantAttributesByProductTypeIdLoader,\n VariantChannelListingByVariantIdAndChannelSlugLoader,\n VariantChannelListingByVariantIdLoader,\n VariantsChannelListingByProductIdAndChannelSlugLoader,\n)\nfrom ..enums import ProductMediaType, ProductTypeKindEnum, VariantAttributeScope\nfrom ..filters import ProductFilterInput\nfrom ..resolvers import resolve_product_variants, resolve_products\nfrom ..sorters import ProductOrder\nfrom .channels import (\n CollectionChannelListing,\n ProductChannelListing,\n ProductVariantChannelListing,\n)\nfrom .digital_contents import DigitalContent\n\ndestination_address_argument = graphene.Argument(\n account_types.AddressInput,\n description=(\n \"Destination address used to find warehouses where stock availability \"\n \"for this product is checked. If address is empty, uses \"\n \"`Shop.companyAddress` or fallbacks to server's \"\n \"`settings.DEFAULT_COUNTRY` configuration.\"\n ),\n)\n\n\nclass Margin(graphene.ObjectType):\n start = graphene.Int()\n stop = graphene.Int()\n\n\nclass BasePricingInfo(graphene.ObjectType):\n on_sale = graphene.Boolean(description=\"Whether it is in sale or not.\")\n discount = graphene.Field(\n TaxedMoney, description=\"The discount amount if in sale (null otherwise).\"\n )\n discount_local_currency = graphene.Field(\n TaxedMoney, description=\"The discount amount in the local currency.\"\n )\n\n\nclass VariantPricingInfo(BasePricingInfo):\n discount_local_currency = graphene.Field(\n TaxedMoney, description=\"The discount amount in the local currency.\"\n )\n price = graphene.Field(\n TaxedMoney, description=\"The price, with any discount subtracted.\"\n )\n price_undiscounted = graphene.Field(\n TaxedMoney, description=\"The price without any discount.\"\n )\n price_local_currency = graphene.Field(\n TaxedMoney, description=\"The discounted price in the local currency.\"\n )\n\n class Meta:\n description = \"Represents availability of a variant in the storefront.\"\n\n\nclass ProductPricingInfo(BasePricingInfo):\n price_range = graphene.Field(\n TaxedMoneyRange,\n description=\"The discounted price range of the product variants.\",\n )\n price_range_undiscounted = graphene.Field(\n TaxedMoneyRange,\n description=\"The undiscounted price range of the product variants.\",\n )\n price_range_local_currency = graphene.Field(\n TaxedMoneyRange,\n description=(\n \"The discounted price range of the product variants \"\n \"in the local currency.\"\n ),\n )\n\n class Meta:\n description = \"Represents availability of a product in the storefront.\"\n\n\nclass PreorderData(graphene.ObjectType):\n global_threshold = PermissionsField(\n graphene.Int,\n required=False,\n description=\"The global preorder threshold for product variant.\",\n permissions=[ProductPermissions.MANAGE_PRODUCTS],\n )\n global_sold_units = PermissionsField(\n graphene.Int,\n required=True,\n description=\"Total number of sold product variant during preorder.\",\n permissions=[ProductPermissions.MANAGE_PRODUCTS],\n )\n end_date = graphene.DateTime(required=False, description=\"Preorder end date.\")\n\n class Meta:\n description = \"Represents preorder settings for product variant.\"\n\n @staticmethod\n def resolve_global_threshold(root, _info):\n return root.global_threshold\n\n @staticmethod\n def resolve_global_sold_units(root, _info):\n return root.global_sold_units\n\n\n@federated_entity(\"id channel\")\nclass ProductVariant(ChannelContextTypeWithMetadata, ModelObjectType):\n id = graphene.GlobalID(required=True)\n name = graphene.String(required=True)\n sku = graphene.String()\n product = graphene.Field(lambda: Product, required=True)\n track_inventory = graphene.Boolean(required=True)\n quantity_limit_per_customer = graphene.Int()\n weight = graphene.Field(Weight)\n channel = graphene.String(\n description=(\n \"Channel given to retrieve this product variant. Also used by federation \"\n \"gateway to resolve this object in a federated query.\"\n ),\n )\n channel_listings = PermissionsField(\n NonNullList(ProductVariantChannelListing),\n description=\"List of price information in channels for the product.\",\n permissions=[\n AuthorizationFilters.AUTHENTICATED_APP,\n AuthorizationFilters.AUTHENTICATED_STAFF_USER,\n ],\n )\n pricing = graphene.Field(\n VariantPricingInfo,\n address=destination_address_argument,\n description=(\n \"Lists the storefront variant's pricing, the current price and discounts, \"\n \"only meant for displaying.\"\n ),\n )\n attributes = NonNullList(\n SelectedAttribute,\n required=True,\n description=\"List of attributes assigned to this variant.\",\n variant_selection=graphene.Argument(\n VariantAttributeScope,\n description=\"Define scope of returned attributes.\",\n ),\n )\n margin = graphene.Int(description=\"Gross margin percentage value.\")\n quantity_ordered = PermissionsField(\n graphene.Int,\n description=\"Total quantity ordered.\",\n permissions=[ProductPermissions.MANAGE_PRODUCTS],\n )\n revenue = PermissionsField(\n TaxedMoney,\n period=graphene.Argument(ReportingPeriod),\n description=(\n \"Total revenue generated by a variant in given period of time. Note: this \"\n \"field should be queried using `reportProductSales` query as it uses \"\n \"optimizations suitable for such calculations.\"\n ),\n permissions=[ProductPermissions.MANAGE_PRODUCTS],\n )\n images = NonNullList(\n lambda: ProductImage,\n description=\"List of images for the product variant.\",\n deprecation_reason=f\"{DEPRECATED_IN_3X_FIELD} Use the `media` field instead.\",\n )\n media = NonNullList(\n lambda: ProductMedia,\n description=\"List of media for the product variant.\",\n )\n translation = TranslationField(\n ProductVariantTranslation,\n type_name=\"product variant\",\n resolver=ChannelContextType.resolve_translation,\n )\n digital_content = PermissionsField(\n DigitalContent,\n description=\"Digital content for the product variant.\",\n permissions=[ProductPermissions.MANAGE_PRODUCTS],\n )\n stocks = PermissionsField(\n NonNullList(Stock),\n description=\"Stocks for the product variant.\",\n address=destination_address_argument,\n country_code=graphene.Argument(\n CountryCodeEnum,\n description=(\n \"Two-letter ISO 3166-1 country code. \"\n f\"{DEPRECATED_IN_3X_INPUT} Use `address` argument instead.\"\n ),\n ),\n permissions=[\n ProductPermissions.MANAGE_PRODUCTS,\n OrderPermissions.MANAGE_ORDERS,\n ],\n )\n quantity_available = graphene.Int(\n required=False,\n description=(\n \"Quantity of a product available for sale in one checkout. \"\n \"Field value will be `null` when \"\n \"no `limitQuantityPerCheckout` in global settings has been set, and \"\n \"`productVariant` stocks are not tracked.\"\n ),\n address=destination_address_argument,\n country_code=graphene.Argument(\n CountryCodeEnum,\n description=(\n \"Two-letter ISO 3166-1 country code. When provided, the exact quantity \"\n \"from a warehouse operating in shipping zones that contain this \"\n \"country will be returned. Otherwise, it will return the maximum \"\n \"quantity from all shipping zones. \"\n f\"{DEPRECATED_IN_3X_INPUT} Use `address` argument instead.\"\n ),\n ),\n )\n preorder = graphene.Field(\n PreorderData,\n required=False,\n description=(\n \"Preorder data for product variant.\" + ADDED_IN_31 + PREVIEW_FEATURE\n ),\n )\n created = graphene.DateTime(required=True)\n updated_at = graphene.DateTime(required=True)\n\n class Meta:\n default_resolver = ChannelContextType.resolver_with_context\n description = (\n \"Represents a version of a product such as different size or color.\"\n )\n interfaces = [relay.Node, ObjectWithMetadata]\n model = models.ProductVariant\n\n @staticmethod\n def resolve_created(root: ChannelContext[models.ProductVariant], _info):\n return root.node.created_at\n\n @staticmethod\n def resolve_channel(root: ChannelContext[models.Product], _info):\n return root.channel_slug\n\n @staticmethod\n def resolve_stocks(\n root: ChannelContext[models.ProductVariant],\n info,\n address=None,\n country_code=None,\n ):\n if address is not None:\n country_code = address.country\n return StocksWithAvailableQuantityByProductVariantIdCountryCodeAndChannelLoader(\n info.context\n ).load((root.node.id, country_code, root.channel_slug))\n\n @staticmethod\n def resolve_quantity_available(\n root: ChannelContext[models.ProductVariant],\n info,\n address=None,\n country_code=None,\n ):\n if address is not None:\n country_code = address.country\n site = load_site(info.context)\n channel_slug = str(root.channel_slug) if root.channel_slug else None\n\n global_quantity_limit_per_checkout = site.settings.limit_quantity_per_checkout\n\n if root.node.is_preorder_active():\n variant = root.node\n channel_listing = VariantChannelListingByVariantIdAndChannelSlugLoader(\n info.context\n ).load((variant.id, channel_slug))\n\n def calculate_available_per_channel(channel_listing):\n if (\n channel_listing\n and channel_listing.preorder_quantity_threshold is not None\n ):\n if is_reservation_enabled(site.settings):\n quantity_reserved = (\n PreorderQuantityReservedByVariantChannelListingIdLoader(\n info.context\n ).load(channel_listing.id)\n )\n\n def calculate_available_channel_quantity_with_reservations(\n reserved_quantity,\n ):\n return max(\n min(\n channel_listing.preorder_quantity_threshold\n - channel_listing.preorder_quantity_allocated\n - reserved_quantity,\n global_quantity_limit_per_checkout or sys.maxsize,\n ),\n 0,\n )\n\n return quantity_reserved.then(\n calculate_available_channel_quantity_with_reservations\n )\n\n return min(\n channel_listing.preorder_quantity_threshold\n - channel_listing.preorder_quantity_allocated,\n global_quantity_limit_per_checkout or sys.maxsize,\n )\n if variant.preorder_global_threshold is not None:\n variant_channel_listings = VariantChannelListingByVariantIdLoader(\n info.context\n ).load(variant.id)\n\n def calculate_available_global(variant_channel_listings):\n if not variant_channel_listings:\n return global_quantity_limit_per_checkout\n global_sold_units = sum(\n channel_listing.preorder_quantity_allocated\n for channel_listing in variant_channel_listings\n )\n\n available_quantity = variant.preorder_global_threshold\n available_quantity -= global_sold_units\n\n if is_reservation_enabled(site.settings):\n quantity_reserved = (\n PreorderQuantityReservedByVariantChannelListingIdLoader(\n info.context\n ).load_many(\n [listing.id for listing in variant_channel_listings]\n )\n )\n\n def calculate_available_global_quantity_with_reservations(\n reserved_quantities,\n ):\n return max(\n min(\n variant.preorder_global_threshold\n - global_sold_units\n - sum(reserved_quantities),\n global_quantity_limit_per_checkout\n or sys.maxsize,\n ),\n 0,\n )\n\n return quantity_reserved.then(\n calculate_available_global_quantity_with_reservations\n )\n\n return min(\n variant.preorder_global_threshold - global_sold_units,\n global_quantity_limit_per_checkout or sys.maxsize,\n )\n\n return variant_channel_listings.then(calculate_available_global)\n\n return global_quantity_limit_per_checkout\n\n return channel_listing.then(calculate_available_per_channel)\n\n if not root.node.track_inventory:\n return global_quantity_limit_per_checkout\n\n return AvailableQuantityByProductVariantIdCountryCodeAndChannelSlugLoader(\n info.context\n ).load((root.node.id, country_code, channel_slug))\n\n @staticmethod\n def resolve_digital_content(root: ChannelContext[models.ProductVariant], _info):\n return getattr(root.node, \"digital_content\", None)\n\n @staticmethod\n def resolve_attributes(\n root: ChannelContext[models.ProductVariant],\n info,\n variant_selection: Optional[str] = None,\n ):\n def apply_variant_selection_filter(selected_attributes):\n if not variant_selection or variant_selection == VariantAttributeScope.ALL:\n return selected_attributes\n attributes = [\n (selected_att[\"attribute\"], selected_att[\"variant_selection\"])\n for selected_att in selected_attributes\n ]\n variant_selection_attrs = [\n attr for attr, _ in get_variant_selection_attributes(attributes)\n ]\n\n if variant_selection == VariantAttributeScope.VARIANT_SELECTION:\n return [\n selected_attribute\n for selected_attribute in selected_attributes\n if selected_attribute[\"attribute\"] in variant_selection_attrs\n ]\n return [\n selected_attribute\n for selected_attribute in selected_attributes\n if selected_attribute[\"attribute\"] not in variant_selection_attrs\n ]\n\n return (\n SelectedAttributesByProductVariantIdLoader(info.context)\n .load(root.node.id)\n .then(apply_variant_selection_filter)\n )\n\n @staticmethod\n def resolve_channel_listings(root: ChannelContext[models.ProductVariant], info):\n return VariantChannelListingByVariantIdLoader(info.context).load(root.node.id)\n\n @staticmethod\n def resolve_pricing(\n root: ChannelContext[models.ProductVariant], info, *, address=None\n ):\n if not root.channel_slug:\n return None\n\n channel_slug = str(root.channel_slug)\n context = info.context\n\n product = ProductByIdLoader(context).load(root.node.product_id)\n product_channel_listing = ProductChannelListingByProductIdAndChannelSlugLoader(\n context\n ).load((root.node.product_id, channel_slug))\n variant_channel_listing = VariantChannelListingByVariantIdAndChannelSlugLoader(\n context\n ).load((root.node.id, channel_slug))\n collections = CollectionsByProductIdLoader(context).load(root.node.product_id)\n channel = ChannelBySlugLoader(context).load(channel_slug)\n\n address_country = address.country if address is not None else None\n manager = load_plugin_manager(info.context)\n\n def calculate_pricing_info(discounts):\n def calculate_pricing_with_channel(channel):\n def calculate_pricing_with_product_variant_channel_listings(\n variant_channel_listing,\n ):\n def calculate_pricing_with_product(product):\n def calculate_pricing_with_product_channel_listings(\n product_channel_listing,\n ):\n def calculate_pricing_with_collections(collections):\n if (\n not variant_channel_listing\n or not product_channel_listing\n ):\n return None\n\n country_code = (\n address_country or channel.default_country.code\n )\n\n local_currency = None\n local_currency = get_currency_for_country(country_code)\n\n availability = get_variant_availability(\n variant=root.node,\n variant_channel_listing=variant_channel_listing,\n product=product,\n product_channel_listing=product_channel_listing,\n collections=collections,\n discounts=discounts,\n channel=channel,\n country=Country(country_code),\n local_currency=local_currency,\n plugins=manager,\n )\n return VariantPricingInfo(**asdict(availability))\n\n return collections.then(calculate_pricing_with_collections)\n\n return product_channel_listing.then(\n calculate_pricing_with_product_channel_listings\n )\n\n return product.then(calculate_pricing_with_product)\n\n return variant_channel_listing.then(\n calculate_pricing_with_product_variant_channel_listings\n )\n\n return channel.then(calculate_pricing_with_channel)\n\n return (\n DiscountsByDateTimeLoader(context)\n .load(info.context.request_time)\n .then(calculate_pricing_info)\n )\n\n @staticmethod\n def resolve_product(root: ChannelContext[models.ProductVariant], info):\n product = ProductByIdLoader(info.context).load(root.node.product_id)\n return product.then(\n lambda product: ChannelContext(node=product, channel_slug=root.channel_slug)\n )\n\n @staticmethod\n def resolve_quantity_ordered(root: ChannelContext[models.ProductVariant], _info):\n # This field is added through annotation when using the\n # `resolve_report_product_sales` resolver.\n return getattr(root.node, \"quantity_ordered\", None)\n\n @staticmethod\n @traced_resolver\n def resolve_revenue(root: ChannelContext[models.ProductVariant], info, *, period):\n start_date = reporting_period_to_date(period)\n variant = root.node\n channel_slug = root.channel_slug\n\n def calculate_revenue_with_channel(channel):\n if not channel:\n return None\n\n def calculate_revenue_with_order_lines(order_lines):\n def calculate_revenue_with_orders(orders):\n orders_dict = {order.id: order for order in orders}\n return calculate_revenue_for_variant(\n variant,\n start_date,\n order_lines,\n orders_dict,\n channel.currency_code,\n )\n\n order_ids = [order_line.order_id for order_line in order_lines]\n return (\n OrderByIdLoader(info.context)\n .load_many(order_ids)\n .then(calculate_revenue_with_orders)\n )\n\n return (\n OrderLinesByVariantIdAndChannelIdLoader(info.context)\n .load((variant.id, channel.id))\n .then(calculate_revenue_with_order_lines)\n )\n\n return (\n ChannelBySlugLoader(info.context)\n .load(channel_slug)\n .then(calculate_revenue_with_channel)\n )\n\n @staticmethod\n def resolve_media(root: ChannelContext[models.ProductVariant], info):\n return MediaByProductVariantIdLoader(info.context).load(root.node.id)\n\n @staticmethod\n def resolve_images(root: ChannelContext[models.ProductVariant], info):\n return ImagesByProductVariantIdLoader(info.context).load(root.node.id)\n\n @staticmethod\n def resolve_weight(root: ChannelContext[models.ProductVariant], _info):\n return convert_weight_to_default_weight_unit(root.node.weight)\n\n @staticmethod\n @traced_resolver\n def resolve_preorder(root: ChannelContext[models.ProductVariant], info):\n variant = root.node\n\n variant_channel_listings = VariantChannelListingByVariantIdLoader(\n info.context\n ).load(variant.id)\n\n def calculate_global_sold_units(variant_channel_listings):\n global_sold_units = sum(\n channel_listing.preorder_quantity_allocated\n for channel_listing in variant_channel_listings\n )\n return (\n PreorderData(\n global_threshold=variant.preorder_global_threshold,\n global_sold_units=global_sold_units,\n end_date=variant.preorder_end_date,\n )\n if variant.is_preorder_active()\n else None\n )\n\n return variant_channel_listings.then(calculate_global_sold_units)\n\n @staticmethod\n def __resolve_references(roots: List[\"ProductVariant\"], info):\n requestor = get_user_or_app_from_context(info.context)\n requestor_has_access_to_all = has_one_of_permissions(\n requestor, ALL_PRODUCTS_PERMISSIONS\n )\n\n channels = defaultdict(set)\n roots_ids = []\n for root in roots:\n roots_ids.append(f\"{root.channel}_{root.id}\")\n channels[root.channel].add(root.id)\n\n variants = {}\n for channel, ids in channels.items():\n qs = resolve_product_variants(\n info,\n requestor_has_access_to_all,\n requestor,\n ids=ids,\n channel_slug=channel,\n ).qs\n for variant in qs:\n global_id = graphene.Node.to_global_id(\"ProductVariant\", variant.id)\n variants[f\"{channel}_{global_id}\"] = ChannelContext(\n channel_slug=channel, node=variant\n )\n\n return [variants.get(root_id) for root_id in roots_ids]\n\n\nclass ProductVariantCountableConnection(CountableConnection):\n class Meta:\n node = ProductVariant\n\n\n@federated_entity(\"id channel\")\nclass Product(ChannelContextTypeWithMetadata, ModelObjectType):\n id = graphene.GlobalID(required=True)\n seo_title = graphene.String()\n seo_description = graphene.String()\n name = graphene.String(required=True)\n description = JSONString(description=\"Description of the product.\" + RICH_CONTENT)\n product_type = graphene.Field(lambda: ProductType, required=True)\n slug = graphene.String(required=True)\n category = graphene.Field(lambda: Category)\n created = graphene.DateTime(required=True)\n updated_at = graphene.DateTime(required=True)\n charge_taxes = graphene.Boolean(required=True)\n weight = graphene.Field(Weight)\n default_variant = graphene.Field(ProductVariant)\n rating = graphene.Float()\n channel = graphene.String(\n description=(\n \"Channel given to retrieve this product. Also used by federation \"\n \"gateway to resolve this object in a federated query.\"\n ),\n )\n description_json = JSONString(\n description=\"Description of the product.\" + RICH_CONTENT,\n deprecation_reason=(\n f\"{DEPRECATED_IN_3X_FIELD} Use the `description` field instead.\"\n ),\n )\n thumbnail = ThumbnailField()\n pricing = graphene.Field(\n ProductPricingInfo,\n address=destination_address_argument,\n description=(\n \"Lists the storefront product's pricing, the current price and discounts, \"\n \"only meant for displaying.\"\n ),\n )\n is_available = graphene.Boolean(\n address=destination_address_argument,\n description=\"Whether the product is in stock and visible or not.\",\n )\n tax_type = graphene.Field(\n TaxType, description=\"A type of tax. Assigned by enabled tax gateway\"\n )\n attributes = NonNullList(\n SelectedAttribute,\n required=True,\n description=\"List of attributes assigned to this product.\",\n )\n channel_listings = PermissionsField(\n NonNullList(ProductChannelListing),\n description=\"List of availability in channels for the product.\",\n permissions=[ProductPermissions.MANAGE_PRODUCTS],\n )\n media_by_id = graphene.Field(\n lambda: ProductMedia,\n id=graphene.Argument(graphene.ID, description=\"ID of a product media.\"),\n description=\"Get a single product media by ID.\",\n )\n image_by_id = graphene.Field(\n lambda: ProductImage,\n id=graphene.Argument(graphene.ID, description=\"ID of a product image.\"),\n description=\"Get a single product image by ID.\",\n deprecation_reason=(\n f\"{DEPRECATED_IN_3X_FIELD} Use the `mediaById` field instead.\"\n ),\n )\n variants = NonNullList(\n ProductVariant,\n description=(\n \"List of variants for the product. Requires the following permissions to \"\n \"include the unpublished items: \"\n f\"{', '.join([p.name for p in ALL_PRODUCTS_PERMISSIONS])}.\"\n ),\n )\n media = NonNullList(\n lambda: ProductMedia,\n description=\"List of media for the product.\",\n )\n images = NonNullList(\n lambda: ProductImage,\n description=\"List of images for the product.\",\n deprecation_reason=f\"{DEPRECATED_IN_3X_FIELD} Use the `media` field instead.\",\n )\n collections = NonNullList(\n lambda: Collection,\n description=(\n \"List of collections for the product. Requires the following permissions \"\n \"to include the unpublished items: \"\n f\"{', '.join([p.name for p in ALL_PRODUCTS_PERMISSIONS])}.\"\n ),\n )\n translation = TranslationField(\n ProductTranslation,\n type_name=\"product\",\n resolver=ChannelContextType.resolve_translation,\n )\n available_for_purchase = graphene.Date(\n description=\"Date when product is available for purchase.\",\n deprecation_reason=(\n f\"{DEPRECATED_IN_3X_FIELD} \"\n \"Use the `availableForPurchaseAt` field to fetch \"\n \"the available for purchase date.\"\n ),\n )\n available_for_purchase_at = graphene.DateTime(\n description=\"Date when product is available for purchase.\"\n )\n is_available_for_purchase = graphene.Boolean(\n description=\"Whether the product is available for purchase.\"\n )\n\n class Meta:\n default_resolver = ChannelContextType.resolver_with_context\n description = \"Represents an individual item for sale in the storefront.\"\n interfaces = [relay.Node, ObjectWithMetadata]\n model = models.Product\n\n @staticmethod\n def resolve_created(root: ChannelContext[models.Product], _info):\n created_at = root.node.created_at\n return created_at\n\n @staticmethod\n def resolve_channel(root: ChannelContext[models.Product], _info):\n return root.channel_slug\n\n @staticmethod\n def resolve_default_variant(root: ChannelContext[models.Product], info):\n default_variant_id = root.node.default_variant_id\n if default_variant_id is None:\n return None\n\n def return_default_variant_with_channel_context(variant):\n return ChannelContext(node=variant, channel_slug=root.channel_slug)\n\n return (\n ProductVariantByIdLoader(info.context)\n .load(default_variant_id)\n .then(return_default_variant_with_channel_context)\n )\n\n @staticmethod\n def resolve_category(root: ChannelContext[models.Product], info):\n category_id = root.node.category_id\n if category_id is None:\n return None\n return CategoryByIdLoader(info.context).load(category_id)\n\n @staticmethod\n def resolve_description_json(root: ChannelContext[models.Product], _info):\n description = root.node.description\n return description if description is not None else {}\n\n @staticmethod\n def resolve_tax_type(root: ChannelContext[models.Product], info):\n manager = load_plugin_manager(info.context)\n tax_data = manager.get_tax_code_from_object_meta(root.node)\n return TaxType(tax_code=tax_data.code, description=tax_data.description)\n\n @staticmethod\n @traced_resolver\n def resolve_thumbnail(\n root: ChannelContext[models.Product], info, *, size=256, format=None\n ):\n format = format.lower() if format else None\n size = get_thumbnail_size(size)\n\n def return_first_thumbnail(product_media):\n if not product_media:\n return None\n\n image = product_media[0]\n oembed_data = image.oembed_data\n\n if oembed_data.get(\"thumbnail_url\"):\n return Image(alt=oembed_data[\"title\"], url=oembed_data[\"thumbnail_url\"])\n\n def _resolve_url(thumbnail):\n url = get_image_or_proxy_url(\n thumbnail, image.id, \"ProductMedia\", size, format\n )\n return Image(alt=image.alt, url=build_absolute_uri(url))\n\n return (\n ThumbnailByProductMediaIdSizeAndFormatLoader(info.context)\n .load((image.id, size, format))\n .then(_resolve_url)\n )\n\n return (\n MediaByProductIdLoader(info.context)\n .load(root.node.id)\n .then(return_first_thumbnail)\n )\n\n @staticmethod\n def resolve_url(_root, _info):\n return \"\"\n\n @staticmethod\n def resolve_pricing(root: ChannelContext[models.Product], info, *, address=None):\n if not root.channel_slug:\n return None\n\n channel_slug = str(root.channel_slug)\n context = info.context\n\n product_channel_listing = ProductChannelListingByProductIdAndChannelSlugLoader(\n context\n ).load((root.node.id, channel_slug))\n variants = ProductVariantsByProductIdLoader(context).load(root.node.id)\n variants_channel_listing = (\n VariantsChannelListingByProductIdAndChannelSlugLoader(context).load(\n (root.node.id, channel_slug)\n )\n )\n collections = CollectionsByProductIdLoader(context).load(root.node.id)\n channel = ChannelBySlugLoader(context).load(channel_slug)\n\n address_country = address.country if address is not None else None\n manager = load_plugin_manager(info.context)\n\n def calculate_pricing_info(discounts):\n def calculate_pricing_with_channel(channel):\n def calculate_pricing_with_product_channel_listings(\n product_channel_listing,\n ):\n def calculate_pricing_with_variants(variants):\n def calculate_pricing_with_variants_channel_listings(\n variants_channel_listing,\n ):\n def calculate_pricing_with_collections(collections):\n if not variants_channel_listing:\n return None\n\n local_currency = None\n country_code = (\n address_country or channel.default_country.code\n )\n local_currency = get_currency_for_country(country_code)\n\n availability = get_product_availability(\n product=root.node,\n product_channel_listing=product_channel_listing,\n variants=variants,\n variants_channel_listing=variants_channel_listing,\n collections=collections,\n discounts=discounts,\n channel=channel,\n manager=manager,\n country=Country(country_code),\n local_currency=local_currency,\n )\n return ProductPricingInfo(**asdict(availability))\n\n return collections.then(calculate_pricing_with_collections)\n\n return variants_channel_listing.then(\n calculate_pricing_with_variants_channel_listings\n )\n\n return variants.then(calculate_pricing_with_variants)\n\n return product_channel_listing.then(\n calculate_pricing_with_product_channel_listings\n )\n\n return channel.then(calculate_pricing_with_channel)\n\n return (\n DiscountsByDateTimeLoader(context)\n .load(info.context.request_time)\n .then(calculate_pricing_info)\n )\n\n @staticmethod\n @traced_resolver\n def resolve_is_available(\n root: ChannelContext[models.Product], info, *, address=None\n ):\n if not root.channel_slug:\n return None\n\n channel_slug = str(root.channel_slug)\n country_code = address.country if address is not None else None\n\n requestor = get_user_or_app_from_context(info.context)\n\n has_required_permissions = has_one_of_permissions(\n requestor, ALL_PRODUCTS_PERMISSIONS\n )\n\n def calculate_is_available(quantities):\n for qty in quantities:\n if qty > 0:\n return True\n return False\n\n def load_variants_availability(variants):\n keys = [(variant.id, country_code, channel_slug) for variant in variants]\n return AvailableQuantityByProductVariantIdCountryCodeAndChannelSlugLoader(\n info.context\n ).load_many(keys)\n\n def check_variant_availability():\n if has_required_permissions and not channel_slug:\n variants = ProductVariantsByProductIdLoader(info.context).load(\n root.node.id\n )\n elif has_required_permissions and channel_slug:\n variants = ProductVariantsByProductIdAndChannel(info.context).load(\n (root.node.id, channel_slug)\n )\n else:\n variants = AvailableProductVariantsByProductIdAndChannel(\n info.context\n ).load((root.node.id, channel_slug))\n return variants.then(load_variants_availability).then(\n calculate_is_available\n )\n\n def check_is_available_for_purchase(product_channel_listing):\n if product_channel_listing:\n if product_channel_listing.is_available_for_purchase():\n return check_variant_availability()\n return False\n\n return (\n ProductChannelListingByProductIdAndChannelSlugLoader(info.context)\n .load((root.node.id, channel_slug))\n .then(check_is_available_for_purchase)\n )\n\n @staticmethod\n def resolve_attributes(root: ChannelContext[models.Product], info):\n return SelectedAttributesByProductIdLoader(info.context).load(root.node.id)\n\n @staticmethod\n def resolve_media_by_id(root: ChannelContext[models.Product], _info, *, id):\n _type, pk = from_global_id_or_error(id, ProductMedia)\n return root.node.media.filter(pk=pk).first()\n\n @staticmethod\n def resolve_image_by_id(root: ChannelContext[models.Product], _info, *, id):\n _type, pk = from_global_id_or_error(id, ProductImage)\n return root.node.media.filter(pk=pk).first()\n\n @staticmethod\n def resolve_media(root: ChannelContext[models.Product], info):\n return MediaByProductIdLoader(info.context).load(root.node.id)\n\n @staticmethod\n def resolve_images(root: ChannelContext[models.Product], info):\n return ImagesByProductIdLoader(info.context).load(root.node.id)\n\n @staticmethod\n def resolve_variants(root: ChannelContext[models.Product], info):\n requestor = get_user_or_app_from_context(info.context)\n has_required_permissions = has_one_of_permissions(\n requestor, ALL_PRODUCTS_PERMISSIONS\n )\n if has_required_permissions and not root.channel_slug:\n variants = ProductVariantsByProductIdLoader(info.context).load(root.node.id)\n elif has_required_permissions and root.channel_slug:\n variants = ProductVariantsByProductIdAndChannel(info.context).load(\n (root.node.id, root.channel_slug)\n )\n else:\n variants = AvailableProductVariantsByProductIdAndChannel(info.context).load(\n (root.node.id, root.channel_slug)\n )\n\n def map_channel_context(variants):\n return [\n ChannelContext(node=variant, channel_slug=root.channel_slug)\n for variant in variants\n ]\n\n return variants.then(map_channel_context)\n\n @staticmethod\n def resolve_channel_listings(root: ChannelContext[models.Product], info):\n return ProductChannelListingByProductIdLoader(info.context).load(root.node.id)\n\n @staticmethod\n @traced_resolver\n def resolve_collections(root: ChannelContext[models.Product], info):\n requestor = get_user_or_app_from_context(info.context)\n\n has_required_permissions = has_one_of_permissions(\n requestor, ALL_PRODUCTS_PERMISSIONS\n )\n\n def return_collections(collections):\n if has_required_permissions:\n return [\n ChannelContext(node=collection, channel_slug=root.channel_slug)\n for collection in collections\n ]\n\n dataloader_keys = [\n (collection.id, str(root.channel_slug)) for collection in collections\n ]\n CollectionChannelListingLoader = (\n CollectionChannelListingByCollectionIdAndChannelSlugLoader\n )\n channel_listings = CollectionChannelListingLoader(info.context).load_many(\n dataloader_keys\n )\n\n def return_visible_collections(channel_listings):\n visible_collections = []\n channel_listings_dict = {\n channel_listing.collection_id: channel_listing\n for channel_listing in channel_listings\n if channel_listing\n }\n\n for collection in collections:\n channel_listing = channel_listings_dict.get(collection.id)\n if channel_listing and channel_listing.is_visible:\n visible_collections.append(collection)\n\n return [\n ChannelContext(node=collection, channel_slug=root.channel_slug)\n for collection in visible_collections\n ]\n\n return channel_listings.then(return_visible_collections)\n\n return (\n CollectionsByProductIdLoader(info.context)\n .load(root.node.id)\n .then(return_collections)\n )\n\n @staticmethod\n def resolve_weight(root: ChannelContext[models.Product], _info):\n return convert_weight_to_default_weight_unit(root.node.weight)\n\n @staticmethod\n @traced_resolver\n def resolve_is_available_for_purchase(root: ChannelContext[models.Product], info):\n if not root.channel_slug:\n return None\n channel_slug = str(root.channel_slug)\n\n def calculate_is_available_for_purchase(product_channel_listing):\n if not product_channel_listing:\n return None\n return product_channel_listing.is_available_for_purchase()\n\n return (\n ProductChannelListingByProductIdAndChannelSlugLoader(info.context)\n .load((root.node.id, channel_slug))\n .then(calculate_is_available_for_purchase)\n )\n\n @staticmethod\n @traced_resolver\n def resolve_available_for_purchase(root: ChannelContext[models.Product], info):\n if not root.channel_slug:\n return None\n channel_slug = str(root.channel_slug)\n\n def calculate_available_for_purchase(product_channel_listing):\n if not product_channel_listing:\n return None\n return product_channel_listing.available_for_purchase_at\n\n return (\n ProductChannelListingByProductIdAndChannelSlugLoader(info.context)\n .load((root.node.id, channel_slug))\n .then(calculate_available_for_purchase)\n )\n\n @staticmethod\n @traced_resolver\n def resolve_available_for_purchase_at(root: ChannelContext[models.Product], info):\n if not root.channel_slug:\n return None\n channel_slug = str(root.channel_slug)\n\n def calculate_available_for_purchase(product_channel_listing):\n if not product_channel_listing:\n return None\n return product_channel_listing.available_for_purchase_at\n\n return (\n ProductChannelListingByProductIdAndChannelSlugLoader(info.context)\n .load((root.node.id, channel_slug))\n .then(calculate_available_for_purchase)\n )\n\n @staticmethod\n def resolve_product_type(root: ChannelContext[models.Product], info):\n return ProductTypeByIdLoader(info.context).load(root.node.product_type_id)\n\n @staticmethod\n def __resolve_references(roots: List[\"Product\"], info):\n requestor = get_user_or_app_from_context(info.context)\n channels = defaultdict(set)\n roots_ids = []\n for root in roots:\n _, root_id = from_global_id_or_error(root.id, Product, raise_error=True)\n if root_id:\n roots_ids.append(f\"{root.channel}_{root_id}\")\n channels[root.channel].add(root_id)\n\n products = {}\n for channel, ids in channels.items():\n queryset = resolve_products(\n info, requestor, channel_slug=channel\n ).qs.filter(id__in=ids)\n\n for product in queryset:\n products[f\"{channel}_{product.id}\"] = ChannelContext(\n channel_slug=channel, node=product\n )\n\n return [products.get(root_id) for root_id in roots_ids]\n\n\nclass ProductCountableConnection(CountableConnection):\n class Meta:\n node = Product\n\n\n@federated_entity(\"id\")\nclass ProductType(ModelObjectType):\n id = graphene.GlobalID(required=True)\n name = graphene.String(required=True)\n slug = graphene.String(required=True)\n has_variants = graphene.Boolean(required=True)\n is_shipping_required = graphene.Boolean(required=True)\n is_digital = graphene.Boolean(required=True)\n weight = graphene.Field(Weight)\n kind = ProductTypeKindEnum(description=\"The product type kind.\", required=True)\n products = ConnectionField(\n ProductCountableConnection,\n channel=graphene.String(\n description=\"Slug of a channel for which the data should be returned.\"\n ),\n description=\"List of products of this type.\",\n deprecation_reason=(\n f\"{DEPRECATED_IN_3X_FIELD} \"\n \"Use the top-level `products` query with the `productTypes` filter.\"\n ),\n )\n tax_type = graphene.Field(\n TaxType, description=\"A type of tax. Assigned by enabled tax gateway\"\n )\n variant_attributes = NonNullList(\n Attribute,\n description=\"Variant attributes of that product type.\",\n variant_selection=graphene.Argument(\n VariantAttributeScope,\n description=\"Define scope of returned attributes.\",\n ),\n deprecation_reason=(\n f\"{DEPRECATED_IN_3X_FIELD} Use `assignedVariantAttributes` instead.\"\n ),\n )\n assigned_variant_attributes = NonNullList(\n AssignedVariantAttribute,\n description=(\n \"Variant attributes of that product type with attached variant selection.\"\n + ADDED_IN_31\n ),\n variant_selection=graphene.Argument(\n VariantAttributeScope,\n description=\"Define scope of returned attributes.\",\n ),\n )\n product_attributes = NonNullList(\n Attribute, description=\"Product attributes of that product type.\"\n )\n available_attributes = FilterConnectionField(\n AttributeCountableConnection,\n filter=AttributeFilterInput(),\n description=\"List of attributes which can be assigned to this product type.\",\n permissions=[ProductPermissions.MANAGE_PRODUCTS],\n )\n\n class Meta:\n description = (\n \"Represents a type of product. It defines what attributes are available to \"\n \"products of this type.\"\n )\n interfaces = [relay.Node, ObjectWithMetadata]\n model = models.ProductType\n\n @staticmethod\n def resolve_tax_type(root: models.ProductType, info):\n manager = load_plugin_manager(info.context)\n tax_data = manager.get_tax_code_from_object_meta(root)\n return TaxType(tax_code=tax_data.code, description=tax_data.description)\n\n @staticmethod\n def resolve_product_attributes(root: models.ProductType, info):\n def unpack_attributes(attributes):\n return [attr for attr, *_ in attributes]\n\n return (\n ProductAttributesByProductTypeIdLoader(info.context)\n .load(root.pk)\n .then(unpack_attributes)\n )\n\n @staticmethod\n @traced_resolver\n def resolve_variant_attributes(\n root: models.ProductType,\n info,\n variant_selection: Optional[str] = None,\n ):\n def apply_variant_selection_filter(attributes):\n if not variant_selection or variant_selection == VariantAttributeScope.ALL:\n return [attr for attr, *_ in attributes]\n variant_selection_attrs = get_variant_selection_attributes(attributes)\n if variant_selection == VariantAttributeScope.VARIANT_SELECTION:\n return [attr for attr, *_ in variant_selection_attrs]\n return [\n attr\n for attr, variant_selection in attributes\n if (attr, variant_selection) not in variant_selection_attrs\n ]\n\n return (\n VariantAttributesByProductTypeIdLoader(info.context)\n .load(root.pk)\n .then(apply_variant_selection_filter)\n )\n\n @staticmethod\n @traced_resolver\n def resolve_assigned_variant_attributes(\n root: models.ProductType,\n info,\n variant_selection: Optional[str] = None,\n ):\n def apply_variant_selection_filter(attributes):\n if not variant_selection or variant_selection == VariantAttributeScope.ALL:\n return [\n {\"attribute\": attr, \"variant_selection\": variant_selection}\n for attr, variant_selection in attributes\n ]\n variant_selection_attrs = get_variant_selection_attributes(attributes)\n if variant_selection == VariantAttributeScope.VARIANT_SELECTION:\n return [\n {\"attribute\": attr, \"variant_selection\": variant_selection}\n for attr, variant_selection in variant_selection_attrs\n ]\n return [\n {\"attribute\": attr, \"variant_selection\": variant_selection}\n for attr, variant_selection in attributes\n if (attr, variant_selection) not in variant_selection_attrs\n ]\n\n return (\n VariantAttributesByProductTypeIdLoader(info.context)\n .load(root.pk)\n .then(apply_variant_selection_filter)\n )\n\n @staticmethod\n def resolve_products(root: models.ProductType, info, *, channel=None, **kwargs):\n requestor = get_user_or_app_from_context(info.context)\n if channel is None:\n channel = get_default_channel_slug_or_graphql_error()\n qs = root.products.visible_to_user(requestor, channel) # type: ignore\n qs = ChannelQsContext(qs=qs, channel_slug=channel)\n kwargs[\"channel\"] = channel\n return create_connection_slice(qs, info, kwargs, ProductCountableConnection)\n\n @staticmethod\n def resolve_available_attributes(root: models.ProductType, info, **kwargs):\n qs = attribute_models.Attribute.objects.get_unassigned_product_type_attributes(\n root.pk\n )\n qs = resolve_attributes(info, qs=qs)\n qs = filter_connection_queryset(qs, kwargs, info.context)\n return create_connection_slice(qs, info, kwargs, AttributeCountableConnection)\n\n @staticmethod\n def resolve_weight(root: models.ProductType, _info):\n return convert_weight_to_default_weight_unit(root.weight)\n\n @staticmethod\n def __resolve_references(roots: List[\"ProductType\"], _info):\n return resolve_federation_references(\n ProductType, roots, models.ProductType.objects\n )\n\n\nclass ProductTypeCountableConnection(CountableConnection):\n class Meta:\n node = ProductType\n\n\n@federated_entity(\"id channel\")\nclass Collection(ChannelContextTypeWithMetadata, ModelObjectType):\n id = graphene.GlobalID(required=True)\n seo_title = graphene.String()\n seo_description = graphene.String()\n name = graphene.String(required=True)\n description = JSONString(\n description=\"Description of the collection.\" + RICH_CONTENT\n )\n slug = graphene.String(required=True)\n channel = graphene.String(\n description=(\n \"Channel given to retrieve this collection. Also used by federation \"\n \"gateway to resolve this object in a federated query.\"\n ),\n )\n description_json = JSONString(\n description=\"Description of the collection.\" + RICH_CONTENT,\n deprecation_reason=(\n f\"{DEPRECATED_IN_3X_FIELD} Use the `description` field instead.\"\n ),\n )\n products = FilterConnectionField(\n ProductCountableConnection,\n filter=ProductFilterInput(description=\"Filtering options for products.\"),\n sort_by=ProductOrder(description=\"Sort products.\"),\n description=\"List of products in this collection.\",\n )\n background_image = ThumbnailField()\n translation = TranslationField(\n CollectionTranslation,\n type_name=\"collection\",\n resolver=ChannelContextType.resolve_translation,\n )\n channel_listings = PermissionsField(\n NonNullList(CollectionChannelListing),\n description=\"List of channels in which the collection is available.\",\n permissions=[\n ProductPermissions.MANAGE_PRODUCTS,\n ],\n )\n\n class Meta:\n default_resolver = ChannelContextType.resolver_with_context\n description = \"Represents a collection of products.\"\n interfaces = [relay.Node, ObjectWithMetadata]\n model = models.Collection\n\n @staticmethod\n def resolve_channel(root: ChannelContext[models.Product], _info):\n return root.channel_slug\n\n @staticmethod\n def resolve_background_image(\n root: ChannelContext[models.Collection], info, size=None, format=None\n ):\n node = root.node\n if not node.background_image:\n return\n\n alt = node.background_image_alt\n if not size:\n return Image(url=node.background_image.url, alt=alt)\n\n format = format.lower() if format else None\n size = get_thumbnail_size(size)\n\n def _resolve_background_image(thumbnail):\n url = get_image_or_proxy_url(thumbnail, node.id, \"Collection\", size, format)\n return Image(url=url, alt=alt)\n\n return (\n ThumbnailByCollectionIdSizeAndFormatLoader(info.context)\n .load((node.id, size, format))\n .then(_resolve_background_image)\n )\n\n @staticmethod\n def resolve_products(root: ChannelContext[models.Collection], info, **kwargs):\n requestor = get_user_or_app_from_context(info.context)\n qs = root.node.products.visible_to_user( # type: ignore\n requestor, root.channel_slug\n )\n qs = ChannelQsContext(qs=qs, channel_slug=root.channel_slug)\n\n kwargs[\"channel\"] = root.channel_slug\n qs = filter_connection_queryset(qs, kwargs)\n return create_connection_slice(qs, info, kwargs, ProductCountableConnection)\n\n @staticmethod\n def resolve_channel_listings(root: ChannelContext[models.Collection], info):\n return CollectionChannelListingByCollectionIdLoader(info.context).load(\n root.node.id\n )\n\n @staticmethod\n def resolve_description_json(root: ChannelContext[models.Collection], _info):\n description = root.node.description\n return description if description is not None else {}\n\n @staticmethod\n def __resolve_references(roots: List[\"Collection\"], info):\n from ..resolvers import resolve_collections\n\n channels = defaultdict(set)\n roots_ids = []\n for root in roots:\n _, root_id = from_global_id_or_error(root.id, Collection, raise_error=True)\n roots_ids.append(f\"{root.channel}_{root_id}\")\n channels[root.channel].add(root_id)\n\n collections = {}\n for channel, ids in channels.items():\n queryset = resolve_collections(info, channel).qs.filter(id__in=ids)\n\n for collection in queryset:\n collections[f\"{channel}_{collection.id}\"] = ChannelContext(\n channel_slug=channel, node=collection\n )\n\n return [collections.get(root_id) for root_id in roots_ids]\n\n\nclass CollectionCountableConnection(CountableConnection):\n class Meta:\n node = Collection\n\n\n@federated_entity(\"id\")\nclass Category(ModelObjectType):\n id = graphene.GlobalID(required=True)\n seo_title = graphene.String()\n seo_description = graphene.String()\n name = graphene.String(required=True)\n description = JSONString(description=\"Description of the category.\" + RICH_CONTENT)\n slug = graphene.String(required=True)\n parent = graphene.Field(lambda: Category)\n level = graphene.Int(required=True)\n description_json = JSONString(\n description=\"Description of the category.\" + RICH_CONTENT,\n deprecation_reason=(\n f\"{DEPRECATED_IN_3X_FIELD} Use the `description` field instead.\"\n ),\n )\n ancestors = ConnectionField(\n lambda: CategoryCountableConnection,\n description=\"List of ancestors of the category.\",\n )\n products = ConnectionField(\n ProductCountableConnection,\n channel=graphene.String(\n description=\"Slug of a channel for which the data should be returned.\"\n ),\n description=(\n \"List of products in the category. Requires the following permissions to \"\n \"include the unpublished items: \"\n f\"{', '.join([p.name for p in ALL_PRODUCTS_PERMISSIONS])}.\"\n ),\n )\n children = ConnectionField(\n lambda: CategoryCountableConnection,\n description=\"List of children of the category.\",\n )\n background_image = ThumbnailField()\n translation = TranslationField(CategoryTranslation, type_name=\"category\")\n\n class Meta:\n description = (\n \"Represents a single category of products. Categories allow to organize \"\n \"products in a tree-hierarchies which can be used for navigation in the \"\n \"storefront.\"\n )\n interfaces = [relay.Node, ObjectWithMetadata]\n model = models.Category\n\n @staticmethod\n def resolve_ancestors(root: models.Category, info, **kwargs):\n return create_connection_slice(\n root.get_ancestors(), info, kwargs, CategoryCountableConnection\n )\n\n @staticmethod\n def resolve_description_json(root: models.Category, _info):\n description = root.description\n return description if description is not None else {}\n\n @staticmethod\n def resolve_background_image(root: models.Category, info, size=None, format=None):\n if not root.background_image:\n return\n\n alt = root.background_image_alt\n if not size:\n return Image(url=root.background_image.url, alt=alt)\n\n format = format.lower() if format else None\n size = get_thumbnail_size(size)\n\n def _resolve_background_image(thumbnail):\n url = get_image_or_proxy_url(thumbnail, root.id, \"Category\", size, format)\n return Image(url=url, alt=alt)\n\n return (\n ThumbnailByCategoryIdSizeAndFormatLoader(info.context)\n .load((root.id, size, format))\n .then(_resolve_background_image)\n )\n\n @staticmethod\n def resolve_children(root: models.Category, info, **kwargs):\n def slice_children_categories(children):\n return create_connection_slice(\n children, info, kwargs, CategoryCountableConnection\n )\n\n return (\n CategoryChildrenByCategoryIdLoader(info.context)\n .load(root.pk)\n .then(slice_children_categories)\n )\n\n @staticmethod\n def resolve_url(root: models.Category, _info):\n return \"\"\n\n @staticmethod\n @traced_resolver\n def resolve_products(root: models.Category, info, *, channel=None, **kwargs):\n requestor = get_user_or_app_from_context(info.context)\n has_required_permissions = has_one_of_permissions(\n requestor, ALL_PRODUCTS_PERMISSIONS\n )\n tree = root.get_descendants(include_self=True)\n if channel is None and not has_required_permissions:\n channel = get_default_channel_slug_or_graphql_error()\n qs = models.Product.objects.all()\n if not has_required_permissions:\n qs = (\n qs.visible_to_user(requestor, channel)\n .annotate_visible_in_listings(channel)\n .exclude(\n visible_in_listings=False,\n )\n )\n if channel and has_required_permissions:\n qs = qs.filter(channel_listings__channel__slug=channel)\n qs = qs.filter(category__in=tree)\n qs = ChannelQsContext(qs=qs, channel_slug=channel)\n return create_connection_slice(qs, info, kwargs, ProductCountableConnection)\n\n @staticmethod\n def __resolve_references(roots: List[\"Category\"], _info):\n return resolve_federation_references(Category, roots, models.Category.objects)\n\n\nclass CategoryCountableConnection(CountableConnection):\n class Meta:\n node = Category\n\n\n@federated_entity(\"id\")\nclass ProductMedia(ModelObjectType):\n id = graphene.GlobalID(required=True)\n sort_order = graphene.Int()\n alt = graphene.String(required=True)\n type = ProductMediaType(required=True)\n oembed_data = JSONString(required=True)\n url = ThumbnailField(graphene.String, required=True)\n\n class Meta:\n description = \"Represents a product media.\"\n interfaces = [relay.Node]\n model = models.ProductMedia\n\n @staticmethod\n def resolve_url(root: models.ProductMedia, info, *, size=None, format=None):\n if root.external_url:\n return root.external_url\n\n if not root.image:\n return\n\n if not size:\n return build_absolute_uri(root.image.url)\n\n format = format.lower() if format else None\n size = get_thumbnail_size(size)\n\n def _resolve_url(thumbnail):\n url = get_image_or_proxy_url(\n thumbnail, root.id, \"ProductMedia\", size, format\n )\n return build_absolute_uri(url)\n\n return (\n ThumbnailByProductMediaIdSizeAndFormatLoader(info.context)\n .load((root.id, size, format))\n .then(_resolve_url)\n )\n\n @staticmethod\n def __resolve_references(roots: List[\"ProductMedia\"], _info):\n return resolve_federation_references(\n ProductMedia, roots, models.ProductMedia.objects\n )\n\n\nclass ProductImage(graphene.ObjectType):\n id = graphene.ID(required=True, description=\"The ID of the image.\")\n alt = graphene.String(description=\"The alt text of the image.\")\n sort_order = graphene.Int(\n required=False,\n description=(\n \"The new relative sorting position of the item (from -inf to +inf). \"\n \"1 moves the item one position forward, -1 moves the item one position \"\n \"backward, 0 leaves the item unchanged.\"\n ),\n )\n url = ThumbnailField(graphene.String, required=True)\n\n class Meta:\n description = \"Represents a product image.\"\n\n @staticmethod\n def resolve_id(root: models.ProductMedia, info):\n return graphene.Node.to_global_id(\"ProductImage\", root.id)\n\n @staticmethod\n def resolve_url(root: models.ProductMedia, info, *, size=None, format=None):\n if not root.image:\n return\n\n if not size:\n return build_absolute_uri(root.image.url)\n\n format = format.lower() if format else None\n size = get_thumbnail_size(size)\n\n def _resolve_url(thumbnail):\n url = get_image_or_proxy_url(\n thumbnail, root.id, \"ProductMedia\", size, format\n )\n return build_absolute_uri(url)\n\n return (\n ThumbnailByProductMediaIdSizeAndFormatLoader(info.context)\n .load((root.id, size, format))\n .then(_resolve_url)\n )\n","sub_path":"saleor/graphql/product/types/products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":68049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"367880054","text":"import numpy as np\r\nimport math\r\nimport matplotlib.pyplot as plt\r\n\r\ndef iteres_1(n):\r\n res = np.arange(n+1) + 1\r\n while n>0:\r\n res = np.sin(res)\r\n n -= 1\r\n return res\r\n\r\ndef iteres_2(n):\r\n res = (n+1) * [1.]\r\n for i in range(n):\r\n res[i+1] = np.sin(res[i])\r\n return res\r\n\r\ndef x(n):\r\n res = 1\r\n while n>0:\r\n res = np.sin(res)\r\n n -= 1\r\n return res\r\n\r\ndef iteres_3(n):\r\n res = list(range(n+1))\r\n for i in res:\r\n res[i] = x(i)\r\n return res\r\n\r\ndef calcu():\r\n n = 100000\r\n print(iteres_2(n)[-1] / math.sqrt(3/n))\r\n\r\n\r\ndef _dessin(u,n,f):\r\n \"\"\"\r\n u : point de depart\r\n n : nbr iteration\r\n f : fonction\r\n \"\"\"\r\n v = u\r\n X = [u]\r\n Y = [0]\r\n for k in range(n):\r\n w = f(v)\r\n X.append(v)\r\n Y.append(w)\r\n X.append(w)\r\n Y.append(w)\r\n\r\n v = w\r\n\r\n #X.append(v)\r\n #Y.append(0)\r\n\r\n plt.plot(X,Y)\r\n plt.plot([0,1],[0,1],\"r\")\r\n\r\n Xsin = [i/1000 for i in range(1000)]\r\n Ysin = []\r\n for x in Xsin:\r\n Ysin.append(math.sin(x))\r\n\r\n plt.plot(Xsin,Ysin,\"g\")\r\n plt.show()\r\n \r\ndef dessin():\r\n def f(x):\r\n return math.sin(x)\r\n u = 1\r\n n = 100\r\n _dessin(u,n,f)\r\n\r\ndef Sn():\r\n n = 1000\r\n X = [i for i in range(n)]\r\n Y = [1]\r\n for k in range(1,n):\r\n l = (-1)**k * x(k)\r\n Y.append(Y[-1] + l)\r\n\r\n\r\n plt.plot(X,Y)\r\n plt.show()\r\n\r\n\r\n\r\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\r\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\r\n\r\ndef formatage(L):\r\n M = []\r\n if len(L) == 1:\r\n return M\r\n \r\n for k in range(len(L)-1):\r\n u = L[k+1] - L[k]\r\n if u == 0:\r\n M.append(0)\r\n while u > 0:\r\n M.append(1)\r\n u -= 1\r\n while u < 0:\r\n M.append(-1)\r\n u += 1\r\n return M\r\n\r\n \r\n\r\ndef trace_ascenseur(L):\r\n #L = L = [1,0,2,-1,-1,1,2]\r\n M = formatage(L)\r\n X = [i for i in range(len(M)+1)]\r\n Y = [L[0]]\r\n niveau = L[0]\r\n for x in M:\r\n niveau += x\r\n Y.append(niveau)\r\n plt.plot(X,Y)\r\n plt.grid()\r\n plt.show()\r\n \r\n\r\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\r\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\r\n\r\nimport scipy.integrate as integr\r\n\r\n\r\ndef Cauchy():\r\n # x' = -x + cos(10t) , x(0)=3 sur [0,2]\r\n\r\n def f(x,t):\r\n return -x + math.cos(10*t)\r\n\r\n X = np.arange(0,2,0.001)\r\n Y = integr.odeint(f,3,X)\r\n plt.plot(X,Y)\r\n plt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__==\"__main__\":\r\n \r\n Cauchy()\r\n","sub_path":"TP1.py","file_name":"TP1.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"227101880","text":"# Python Import ==================\nimport sys\n\n# User Import ======================\nfrom cycle_classes.Start import Start\n\nstrCurrent_path = sys.path[0] \n\nFILE_CYC_INPUT = \"cycle_dat.csv\" # input file for cycle module\nFILE_CYC_OUTPUT = \"cycle_out.csv\" # output file for cycle module\nFILE_CAB2CYC = \"cab2cyc_out.csv\" # output file for cabinit module\n\nFOLDER_INPUT = strCurrent_path + \"\\\\\" + \"data\"\nFOLDER_OUTPUT = strCurrent_path + \"\\\\\" + \"data\"\n \nobj_start = Start(FILE_CYC_INPUT, \n FILE_CYC_OUTPUT, \n FILE_CAB2CYC, \n FOLDER_INPUT, \n FOLDER_OUTPUT)\n\nis_solution = obj_start.main(True) # DEBUG ON\n# is_solution = obj_start.main(False) # DEBUG OFF\n\nobj_start.print_scr_rep(is_solution)\n","sub_path":"App/cycle_start_debug.py","file_name":"cycle_start_debug.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"467223639","text":"\nfrom google.appengine.api import channel\nfrom google.appengine.ext import ndb\nfrom google.appengine.ext.ndb import polymodel\n\nimport datetime\n\nimport json\nimport logging\n\nfrom chat_settings import ChatSettings, ChatURL\nfrom site_settings import SiteSettings\n\ndef to_iso_format_hack(dt): \n # python datetime is bullshit and doesn't add the Z for iso\n # so isoformat does not actually return isoformat\n # this causes problems in firefox, which interprets it as a local date\n s = dt.isoformat()\n if (not s.endswith('Z')) and \\\n (not s.endswith('+00:00')) and \\\n (not s.endswith('-00:00')):\n return s + 'Z'\n else:\n return s\n\nclass ChatUser(polymodel.PolyModel): \n screenname = ndb.StringProperty()\n next_chat_msg_credit = ndb.DateTimeProperty(auto_now_add=True)\n chat_msg_credit = ndb.IntegerProperty(default=0)\n \n @ndb.transactional\n def chat_msg_rate_limit_check(self):\n if self.chat_msg_credit == 0:\n t = datetime.datetime.utcnow()\n if t < self.next_chat_msg_credit:\n return False\n else:\n self.next_chat_msg_credit = t + ChatSettings.CHAT_MSG_INTERVAL\n self.chat_msg_credit = ChatSettings.CHAT_MSG_PER_INTERVAL - 1\n self.put()\n return True\n self.chat_msg_credit -= 1\n self.put()\n return True\n \n def is_operator(self):\n return False\n \n @classmethod\n def channel_connected(cls, channel_user_id):\n args = channel_user_id.split('_')\n u = ChatUser.get_by_id(args[0])\n if u:\n u.handle_channel_connected(args)\n\n @classmethod\n def channel_disconnected(cls, channel_user_id):\n args = channel_user_id.split('_')\n u = ChatUser.get_by_id(args[0])\n if u:\n u.handle_channel_disconnected(args)\n\n def handle_channel_connected(self, vals):\n logging.info(\"connect not implemented?\")\n\n def handle_channel_disconnected(self, vals):\n logging.info(\"disconnect not implemented?\")\n \nclass ChatOperator(ChatUser):\n is_on_call = ndb.BooleanProperty(default=False)\n on_call_channel_token = ndb.StringProperty()\n on_call_channel_token_expiration = ndb.DateTimeProperty(auto_now_add=True)\n calls_answered = ndb.IntegerProperty(default=0)\n\n def is_operator(self):\n return True\n\n @ndb.transactional\n def answered_call(self):\n self.calls_answered += 1\n self.put()\n\n def answer_call(self, call_id):\n call = ChatCall.get_by_id(call_id)\n if not call:\n return None, None, None\n \n room, tok = call.answer(self)\n if not room:\n return None, None, None\n \n self.answered_call()\n\n return call, room, tok\n\n def refresh_calls(self, last_call_datetime):\n for c in ChatCall.calls_since(last_call_datetime):\n msg = c.to_operator_json(is_historic=True)\n channel.send_message(self.on_call_channel_token, msg) \n \n @classmethod\n def gauth_user_id(cls, raw_user_id): \n # in case we do non-google logins\n return \"gplus{0}\".format(raw_user_id)\n \n @classmethod\n def gauth_get_or_insert(cls, user_id): \n o = ChatOperator.get_or_insert(user_id)\n if o: \n o.put()\n return o\n\n @classmethod\n def announce_call(cls, call):\n msg = call.to_operator_json()\n operators = cls.query(cls.is_on_call==True).fetch()\n for operator in operators:\n channel.send_message(operator.on_call_channel_token, msg) \n\n @classmethod\n def verify_email(cls, email):\n return SiteSettings.verify_email(email)\n\n def to_on_call_channel_user_id(self):\n return str(self.key.id()) + '_oncall' \n\n def update_rooms(self):\n # find all rooms this user is in and refresh screen name lists\n rooms = ChatRoom.query(ChatRoom.chat_channels.user_key == self.key).fetch(20)\n if not rooms:\n return\n \n for r in rooms:\n r.refresh_screennames() \n \n @ndb.transactional\n def go_on_call(self, check_channel=True):\n # TODO save the date and do date compare\n # also this is bad but meh\n if check_channel:\n t = datetime.datetime.utcnow()\n if (not self.on_call_channel_token) or (t >= self.on_call_channel_token_expiration):\n self.on_call_channel_token = channel.create_channel(self.to_on_call_channel_user_id(),\n ChatSettings.OPERATOR_CHANNEL_MINUTES)\n self.on_call_channel_token_expiration = t + \\\n ChatSettings.OPERATOR_CHANNEL_DURATION\n self.is_on_call = True\n self.put()\n\n @ndb.transactional\n def go_off_call(self):\n self.is_on_call = False\n self.put()\n \n def handle_channel_connected(self, vals):\n if vals[1] == 'oncall':\n # sometimes channel disconnects in dev server\n # TODO: not sure if it's dev server nonsense or local bug\n self.is_on_call = True\n self.put()\n return\n\n def handle_channel_disconnected(self, vals):\n if vals[1] == 'oncall':\n self.go_off_call()\n else:\n room = ChatRoom.get_by_id(long(vals[1]))\n if room:\n room.remove_user(self)\n\nclass ChatCaller(ChatUser):\n def remote_addr(self):\n s = str(self.key.id())\n return s[len('caller'):]\n \n @classmethod\n def form_user_id(cls, remote_addr):\n return 'caller{0}'.format(remote_addr)\n\n @classmethod\n def caller_get_or_insert(cls, remote_addr, screenname):\n # TODO: need to make this multiple for people behind proxies/NATs (though unlikely for now)\n if not screenname:\n screenname = ''\n caller = cls.get_or_insert(ChatCaller.form_user_id(remote_addr), screenname=screenname)\n if not caller:\n return None\n \n caller.put()\n \n return caller\n \n def handle_channel_connected(self, vals):\n pass\n\n def handle_channel_disconnected(self, vals):\n room = ChatRoom.get_by_id(long(vals[1]))\n if room:\n room.remove_user(self)\n\nclass ChatChannel(ndb.Model):\n user_key = ndb.KeyProperty(kind=ChatUser)\n room_key = ndb.KeyProperty(kind='ChatRoom')\n channel_token = ndb.StringProperty()\n \nclass ChatRoom(polymodel.PolyModel): \n chat_channels = ndb.StructuredProperty(ChatChannel, repeated=True)\n parent_call = ndb.KeyProperty(kind='ChatCall', default=None)\n \n def has_user_key(self, user_key):\n c = self.get_channel_for_user(user_key)\n if c:\n return c\n else:\n return None\n\n @ndb.transactional\n def remove_user_key_t(self, user_key):\n remove_index = None\n for i, c in enumerate(self.chat_channels):\n if c.user_key == user_key:\n remove_index = i\n break\n if remove_index is not None:\n del self.chat_channels[remove_index]\n self.put()\n\n def remove_user(self, user):\n self.remove_user_key_t(user.key)\n self.announce_user_leave(user)\n \n # better have called room.put() and user.put() at least once so key is valid\n @ndb.transactional\n def add_user_key(self, user_key):\n # TODO: this should be a transaction probably\n c = self.has_user_key(user_key)\n if c:\n return c.channel_token, False\n \n tok = channel.create_channel(self.get_channel_id(user_key), ChatSettings.CHAT_CHANNEL_MINUTES) \n if not tok: \n return None, None\n \n self.chat_channels.append(ChatChannel(\n user_key = user_key,\n room_key = self.key,\n channel_token = tok)\n )\n self.put()\n \n return tok, True\n \n def get_channel_id(self, user_key):\n return '{0}_{1}'.format(user_key.id(), self.key.id()) \n\n def get_channel_for_user(self, user_key):\n for c in self.chat_channels:\n if c.user_key == user_key:\n return c\n return None\n \n def get_screennames(self):\n def user_key_to_screenname(user_key):\n u = user_key.get()\n if u:\n return u.screenname\n else:\n return \"\"\n \n return [ user_key_to_screenname(c.user_key) for c in self.chat_channels ]\n\n def refresh_screennames(self): \n msg = json.dumps({\n 'content' : 'screennames',\n 'screennames' : self.get_screennames(),\n })\n for chan in self.chat_channels:\n channel.send_message(chan.channel_token, msg) \n\n def announce_user_join(self, user):\n msg = json.dumps({\n 'content' : 'announcement',\n 'line' : u'{0} has joined the room'.format(user.screenname),\n })\n for chan in self.chat_channels:\n if chan.user_key != user.key:\n channel.send_message(chan.channel_token, msg) \n\n def announce_user_leave(self, user):\n msg = json.dumps({\n 'content' : 'announcement',\n 'line' : u'{0} has left the room'.format(user.screenname),\n })\n for chan in self.chat_channels:\n if chan.user_key != user.key:\n channel.send_message(chan.channel_token, msg) \n \nclass ChatCall(ndb.Model):\n caller_channel = ndb.StructuredProperty(ChatChannel)\n call_datetime = ndb.DateTimeProperty(auto_now_add=True) \n answered_datetime = ndb.DateTimeProperty(default=None)\n answered_by = ndb.KeyProperty(kind=ChatOperator, default=None)\n\n def caller_url(self):\n return '/room?room={0}&call={1}'.format(self.caller_channel.room_key.id(), self.key.id())\n\n def operator_url(self):\n return \"{0}?call_id={1}\".format(ChatURL.OANSWER, self.key.id())\n \n def to_operator_json(self, is_historic = False): \n msg = {\n 'call_id' : self.key.id(),\n 'call_url' : self.operator_url(),\n 'call_date' : to_iso_format_hack(self.call_datetime),\n }\n if is_historic:\n msg['is_historic'] = 1\n if not self.answered_datetime is None:\n msg['call_answered'] = str(self.answered_datetime)\n\n return json.dumps(msg)\n\n @ndb.transactional\n def mark_answered(self, operator):\n if self.answered_by is None:\n self.answered_by = operator.key\n self.put()\n return True\n elif self.answered_by == operator.key:\n return True\n else:\n return False\n \n def answer(self, operator):\n try:\n won = self.mark_answered(operator)\n except:\n logging.info('{0}: operator {1} call {2}'.format(e, operator, call))\n won = False\n\n if not won:\n return None, None\n \n room = self.caller_channel.room_key.get()\n if not room:\n logging.info('no room')\n return None, None\n\n try:\n tok, added = room.add_user_key(operator.key)\n except Exception as e:\n # could be transaction failure\n logging.info('{0}: room {1} operator {2}'.format(e, room, operator))\n tok = None\n \n if not tok:\n return None, None \n \n self.answered_datetime = datetime.datetime.utcnow()\n self.put() \n return room, tok\n\n @classmethod\n def calls_since(cls, last_call_datetime):\n # get 20 most recent, but sort from earliest time\n return sorted(\n cls.query(cls.call_datetime > last_call_datetime).order(-cls.call_datetime).fetch(20),\n key=lambda c: c.call_datetime)\n \n @classmethod\n def factory(cls, caller_key):\n call = ChatCall()\n if not call:\n return None\n \n room = ChatRoom()\n if not room:\n # call isn't put, so should be okay?\n return None\n\n call.put() # so call.key is valid\n room.parent_call = call.key\n room.put() # so room.key is valid\n\n tok, newly_added = room.add_user_key(caller_key)\n if not tok:\n call.key.delete()\n room.key.delete()\n return\n \n call.caller_channel = ChatChannel(user_key = caller_key,\n room_key = room.key,\n channel_token = tok)\n call.put()\n return call\n\nclass ChatMsg(ndb.Model):\n user_key = ndb.KeyProperty(kind=ChatUser) \n room_key = ndb.KeyProperty(kind=ChatRoom)\n msg = ndb.StringProperty()\n sent_datetime = ndb.DateTimeProperty(auto_now_add=True) \n","sub_path":"chat_objs.py","file_name":"chat_objs.py","file_ext":"py","file_size_in_byte":12912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"288755490","text":"from DSSM.batchiterators.fileiterators import NaturalQuestionsFileIterator\nfrom DSSM.dssm.model import *\nimport numpy as np\nfrom tqdm import tqdm\nfrom matplotlib import pyplot as plt\n\nfrom DSSM.helpers.helpers import correct_guesses_of_dssm\n\ninit = tf.compat.v1.global_variables_initializer()\n\nsaver = tf.compat.v1.train.Saver()\n\n# First just train on nq\ndef get_feed_dict(batch):\n q_indices_batch = batch.get_q_indices()\n p_indices_batch = batch.get_relevant_indices()\n n1_indices_batch, n2_indices_batch, n3_indices_batch, n4_indices_batch = batch.get_irrelevant_indices()\n\n q_values_batch = np.ones(q_indices_batch.shape[0], dtype=int)\n p_values_batch = np.ones(p_indices_batch.shape[0], dtype=int)\n n1_values_batch = np.ones(n1_indices_batch.shape[0], dtype=int)\n n2_values_batch = np.ones(n2_indices_batch.shape[0], dtype=int)\n n3_values_batch = np.ones(n3_indices_batch.shape[0], dtype=int)\n n4_values_batch = np.ones(n4_indices_batch.shape[0], dtype=int)\n\n feed_dict = {\n q_indices: q_indices_batch,\n q_values: q_values_batch,\n p_indices: p_indices_batch,\n p_values: p_values_batch,\n n1_indices: n1_indices_batch,\n n1_values: n1_values_batch,\n n2_indices: n2_indices_batch,\n n2_values: n2_values_batch,\n n3_indices: n3_indices_batch,\n n3_values: n3_values_batch,\n n4_indices: n4_indices_batch,\n n4_values: n4_values_batch\n }\n return feed_dict\n\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nplt.ion()\n\nfig.show()\nfig.canvas.draw()\n\n\ndef update_plot(data1, data2):\n ax.clear()\n ax.plot(data1)\n ax.plot(data2)\n ax.set_ylim(bottom=0)\n fig.canvas.draw()\n # `start_event_loop` is required for console, not jupyter notebooks.\n # Don't use `plt.pause` because it steals focus and makes it hard\n # to stop the app.\n fig.canvas.start_event_loop(0.001)\n\n\nwith tf.compat.v1.Session() as sess:\n sess.run(init)\n\n train_epoch_accuracies = []\n train_losses = []\n val_epoch_accuracies = []\n val_losses = []\n\n trainingSet = NaturalQuestionsFileIterator(\"/Users/sahandzarrinkoub/School/year5/thesis/datasets/preprocessed_backup/nq/smalltrain.csv\",\n batch_size = BATCH_SIZE,\n no_of_irrelevant_samples = 4,\n encodingType=\"NGRAM\")\n validationSet = NaturalQuestionsFileIterator(\"/Users/sahandzarrinkoub/School/year5/thesis/datasets/preprocessed_backup/nq/smallvalidation.csv\",\n batch_size=BATCH_SIZE,\n no_of_irrelevant_samples=4,\n encodingType=\"NGRAM\")\n for epoch in range(10):\n if epoch > 0:\n trainingSet.restart()\n validationSet.restart()\n\n ll_train_overall = 0\n correct_train = 0\n for batch in tqdm(trainingSet):\n feed_dict = get_feed_dict(batch)\n\n _, ll = sess.run([optimizer, logloss], feed_dict=feed_dict)\n print(ll)\n ll_train_overall += ll\n correct_train += correct_guesses_of_dssm(sess, feed_dict, prob_p, prob_n1, prob_n2, prob_n3, prob_n4)\n\n train_losses.append(ll_train_overall / trainingSet.getNoOfDataPoints())\n train_epoch_accuracies.append(correct_train / trainingSet.getNoOfDataPoints())\n\n\n #evaluate on validation set\n ll_val_overall = 0\n correct_val = 0\n for batch in validationSet:\n feed_dict = get_feed_dict(batch)\n (ll_val,) = sess.run([logloss], feed_dict=feed_dict)\n correct_val += correct_guesses_of_dssm(sess, feed_dict, prob_p, prob_n1, prob_n2, prob_n3, prob_n4)\n ll_val_overall += ll_val\n val_losses.append(ll_val_overall / validationSet.getNoOfDataPoints())\n val_epoch_accuracies.append(correct_val / validationSet.getNoOfDataPoints())\n\n update_plot(train_losses, val_losses)\n\n plt.figure()\n plt.plot(train_epoch_accuracies)\n plt.plot(val_epoch_accuracies)\n plt.xlabel(\"epoch\")\n plt.ylabel(\"accuracy\")\n plt.title(\"accuracies\")\n plt.figure()\n plt.plot(train_losses)\n plt.plot(val_losses)\n plt.xlabel(\"batch\")\n plt.title(\"loss\")\n plt.show()\n saver.save(sess, './saved_model', global_step=epoch)\n","sub_path":"dssm/dssm_word_vs_ngram.py","file_name":"dssm_word_vs_ngram.py","file_ext":"py","file_size_in_byte":4410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"347232412","text":"\"\"\"\nDescription\n-----------\n\n This module is linked to the Arvato Project Workbook (jupyterlab notebook). Many explanation is given in the workbook. \n\n\n\"\"\"\nimport pandas as pd\nimport numpy as np\n\n\n\nclass PreDataCleaner:\n \"\"\"\n Description\n -----------\n\n This class will provide some functionality to execute some basic cleanining \n a arvato data set\n \"\"\"\n \n \n @property\n def df_metadata(self):\n return self.__df_metadata\n \n @df_metadata.setter\n def df_metadata(self, val):\n self.__df_metadata = val \n \n def __init__(self, df_metadata:pd.DataFrame):\n \"\"\"\n Description\n -----------\n\n inits the class.\n \n Parameters\n ----------\n df_metadata: pd.DataFrame\n pandas dataframe with the loaded data from file \"DIAS Attributes - Values 2017.xlsx\". Containing\n information about the attribute values.\n \"\"\"\n self.df_metadata = df_metadata\n\n # replace the metadata attribute column ending \"_RZ\" by \"\" in order to match the dataset column names\n self.df_metadata['Attribute'] = self.df_metadata['Attribute'].str.replace('_RZ','')\n\n \n def transform(self, df:pd.DataFrame, drop_duplicates:bool=False, build_kind_features:bool=True, drop_cols:bool=True)->pd.DataFrame:\n \"\"\"\n Description\n -----------\n \n executes the data transformation (cleaning)\n\n Parameters\n ----------\n\n df : pd.DataFrame\n the dataframe that is to be cleaned\n\n \"\"\" \n df = self.__drop_customer_columns(df)\n df = self.__handle_data_load_errors(df)\n \n if drop_duplicates:\n df.drop_duplicates(inplace=True)\n \n df = self.__fix_year_columns(df)\n df = self.__mark_nans(df)\n \n if build_kind_features:\n df = self.__build_features_chidren(df, drop_childcols=False)\n \n df = self.__catvars_to_dummies(df) \n df = self.__catvars_to_binary(df) \n \n if drop_cols: \n df = self.__drop_columns(df) \n \n return df\n \n \n def fit (self, df:pd.DataFrame)->pd.DataFrame:\n \"\"\"\n Description\n -----------\n\n prepare data for transformation\n \"\"\"\n\n pass\n \n def __fix_year_columns(self, df:pd.DataFrame) ->pd.DataFrame:\n \"\"\"\n Description\n ------------\n \n converts year columns to int\n \"\"\" \n cols = ['MIN_GEBAEUDEJAHR','EINGEZOGENAM_HH_JAHR','GEBURTSJAHR']\n print(f'fixing year columns: {cols}')\n for col in cols:\n df[col].fillna(df[col].median(), inplace=True)\n df[col].astype('int')\n \n \n return df\n \n \n\n \n def __drop_customer_columns (self, df:pd.DataFrame, columns_to_drop:bool=None)->pd.DataFrame:\n \"\"\"\n drop additional coloumns of the customer dataset\n \"\"\"\n cols = ['CUSTOMER_GROUP', 'ONLINE_PURCHASE', 'PRODUCT_GROUP'] \n \n if cols[0] in df.columns:\n print(f'Dropping customer dataset cols: {cols}')\n df = self.__drop_columns(df, cols)\n \n return df\n \n \n \n def __handle_data_load_errors(self, df:pd.DataFrame) ->pd.DataFrame:\n \"\"\"\n handles the errors fo columns 18 and 19 of dtype float that contain two 18,19 \n \"\"\"\n cols_to_fix = {'CAMEO_DEUG_2015':'X', 'CAMEO_INTL_2015':'XX'}\n\n print(f'fixing load errors {cols_to_fix}')\n\n for col, val in cols_to_fix.items():\n n = df.loc[df[col] == val].shape[0]\n df.loc[df[col] == val, col] = np.NaN\n df.loc[:,col] = df.loc[:,col].astype('float')\n\n print(f'fixed column {col} - records fixed: {n}')\n \n return df\n\n\n def __drop_columns(self, df:pd.DataFrame, columns_to_drop:list=None)->pd.DataFrame:\n \"\"\"\n Description\n -----------\n \n LP_STATUS_GROB: drop this as LP_STATUS_FEIN contains the same information more detailed\n LP_FAMILIE_GROB : analogue to LP_STATUS_GROB\n D19_VERSAND_ANZ_24: drop\n EINGEFUEGT_AM : just timestamp information when the record has been created\n LP_LEBENSPHASE_FEIN: drop - we keep just LP_LEBENSPHASE_GROB\n\n \"\"\"\n # if columns to drop have been defined then use them \n # else execute the default cleaning \n if columns_to_drop: \n cols_to_drop = columns_to_drop\n else:\n # default set of columns to drop\n cols_to_drop = ['EINGEFUEGT_AM']\n \n # drop because of very high correlation to other columns (>=0.9).\n cols_toomuchcorrelation = ['CAMEO_DEU_2015','LP_STATUS_GROB','LP_FAMILIE_GROB','D19_VERSAND_ANZ_24','LP_LEBENSPHASE_FEIN', \n 'ANZ_STATISTISCHE_HAUSHALTE', 'CAMEO_INTL_2015', 'D19_VERSAND_ONLINE_DATUM', 'KBA13_HALTER_66',\n 'KBA13_HERST_SONST', #'LP_LEBENSPHASE_GROB'\n 'PLZ8_BAUMAX', 'PLZ8_GBZ', 'PLZ8_HHZ',\n 'D19_GESAMT_ANZ_24', 'D19_VERSAND_ANZ_12', 'D19_VERSAND_DATUM', 'KBA05_KRSHERST2', \n 'KBA05_KRSHERST3', 'KBA05_SEG9', 'KBA13_KMH_211', 'PLZ8_ANTG1', 'PLZ8_ANTG3']\n \n \n # drop because of too many NULL values (>30%) \n cols_toomanynulls = ['ALTER_KIND4', 'TITEL_KZ', 'ALTER_KIND3', 'ALTER_KIND2', 'ALTER_KIND1', \n 'AGER_TYP', 'EXTSEL992', 'KK_KUNDENTYP', 'KBA05_BAUMAX', 'ALTER_HH','D19_LETZTER_KAUF_BRANCHE']\n \n #\n # columns if threshold is 25% \n #\n # ['EXTSEL992','KK_KUNDENTYP', 'ALTERSKATEGORIE_FEIN', \n #'D19_LETZTER_KAUF_BRANCHE','D19_GESAMT_ONLINE_QUOTE_12', 'D19_SOZIALES', 'D19_LOTTO','D19_KONSUMTYP', \n # 'D19_VERSAND_ONLINE_QUOTE_12','D19_TELKO_ONLINE_QUOTE_12', 'D19_VERSI_ONLINE_QUOTE_12', 'D19_BANKEN_ONLINE_QUOTE_12',\n #'ALTER_HH', 'KBA05_BAUMAX', 'AGER_TYP', 'TITEL_KZ'] \n \n cols_to_drop = cols_to_drop + cols_toomuchcorrelation + cols_toomanynulls\n\n print(f'dropping columns: {cols_to_drop}') \n\n try:\n df.drop(labels=cols_to_drop, axis=1, inplace=True) \n except KeyError as ex_keyerror:\n print(f'CATCHED EXCEPTION: KeyError: you tried to drop non existing columns: {cols_to_drop}')\n print(f'Failed columns: {ex_keyerror.args}')\n\n return df \n\n def __catvars_to_dummies(self, df:pd.DataFrame)->pd.DataFrame:\n \"\"\"\n Description\n -----------\n\n handles categorical variables. This will generate one hot encodings for the defined columns\n \"\"\"\n #'CAMEO_DEU_2015' will be dropped - ignore this\n # D19_LETZTER_KAUF_BRANCHE-> will be deleted \n cat_cols = []\n\n print('creating one hot encoding columns for: ')\n for col in cat_cols:\n print(f'\\t{col}')\n\n if cat_cols:\n # create one hot encodings using pandas get_dummies function\n df_dummies = pd.get_dummies(df[cat_cols], prefix=cat_cols, drop_first=True).astype('int64')\n df = pd.concat([df, df_dummies], axis=1)\n\n # drop original columns\n df.drop(cat_cols, axis=1, inplace=True) \n\n return df\n\n def __catvars_to_binary(self, df:pd.DataFrame)->pd.DataFrame:\n \"\"\"\n Description\n -----------\n\n \"\"\"\n cat_cols = {'OST_WEST_KZ':{'W':0,'O':1}}\n\n print('convert to binary: ')\n for col, dict_map in cat_cols.items():\n print(f'\\tcolumn: {col} - Mapping: {dict_map}')\n df.loc[:,col] = df.loc[:,col].map(dict_map)\n\n return df\n\n\n\n def __mark_nans(self, df:pd.DataFrame)->pd.DataFrame:\n \"\"\"\n Description\n -----------\n\n replaces all unkown values by np.NAN so that the pandas NAN functions can be used.\n\n Parameters\n ----------\n\n df : pd.DataFrame\n pandas DataFrame that is to be cleaned. Frame is expected to have columns as AZDIAS or CUSTOMERS \n \"\"\" \n\n print('replace unkown values by NaNs: ') \n unknown_val_set = self.df_metadata.copy()\n \n # select all row that contain the term \"unknown\" \n unknown_val_set = unknown_val_set[(unknown_val_set['Meaning'].str.contains('unknown'))]\n unknown_val_set['value_list'] = unknown_val_set['Value'].str.split(',')\n \n #with progressbar.ProgressBar(max_value=unknown_val_set.index.shape[0]) as bar:\n cnt = 0\n max_value=unknown_val_set.index.shape[0]\n for idx in unknown_val_set.index:\n col = unknown_val_set.loc[idx,'Attribute']\n vals = unknown_val_set.loc[idx,'value_list']\n # str convert to integers\n vals = list(map(int,vals))\n if col in df:\n df.loc[df[col].isin(vals),col] = np.NaN\n\n cnt += 1\n if (cnt == max_value) or (cnt % (max_value // 10)==0):\n print(f'\\tProcessed columns\\r{cnt:4} of {max_value}', end='\\r')\n \n \n #fix CAMEO_DEU_2015 XX will be dropped\n df.loc[df['CAMEO_DEU_2015']=='XX','CAMEO_DEU_2015'] = np.NaN\n print()\n \n # fix for LP_LEBENSPHASE_GROB','LP_FAMILIE_FEIN => 0 is not described. We handle it as unknown == missing\n cols = ['LP_LEBENSPHASE_GROB','LP_FAMILIE_FEIN','GEBURTSJAHR']\n print(f'replace 0 by NaNs for : {cols}')\n df.replace({'LP_LEBENSPHASE_GROB':0 ,'LP_FAMILIE_FEIN':0, 'GEBURTSJAHR':0}, np.NaN, inplace=True)\n \n return df\n \n def __build_features_chidren(self, df:pd.DataFrame, drop_childcols:bool = True)->pd.DataFrame:\n \"\"\"\n Description\n -----------\n \n This function will build some features based on the given input data\n\n * Children and Teens: \n * Children:= number of children younger or equal than 10\n * Teens := number of children older or equal than 10\n\n Parameters\n ----------\n df : pd.DataFrame\n pandas DataFrame that is to be cleaned. Frame is expected to have columns as AZDIAS or CUSTOMERS \n \"\"\"\n \n # num of children > 0\n df['d_HAS_CHILDREN'] = 0\n # younger than or equal 10\n df['d_HAS_CHILDREN_YTE10'] = 0\n\n df.loc[df['ANZ_KINDER'] > 0, 'd_HAS_CHILDREN'] = 1\n\n # mask to filter rows that have at least one record\n mask = df[['ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4']].max(axis=1) < 11\n df.loc[mask, 'd_HAS_CHILDREN_YTE10'] = 1\n \n child_cols = ['ANZ_KINDER','ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4']\n \n if drop_childcols:\n df.drop(child_cols, axis='columns', inplace=True)\n \n return df\n \n\n\n def __calc_children_features(self, s):\n \"\"\"\n Description\n -----------\n uses features 'ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4', 'ANZ_KINDER' to reduce them to \n 'd_HAS_CHILDREN', 'd_HAS_CHILDREN_YTE10'\n\n\n * d_HAS_CHILDREN_YTE10 if person has children ANZ_KINDER>0\n * d_HAS_CHILDREN if person has at least one children <= 10 \n\n Parameters\n ----------\n s : pd.Series\n series of a particular DataFrame row containing at least these columns\n 'ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4', 'ANZ_KINDER', 'd_HAS_CHILDREN', 'd_HAS_CHILDREN_YTE10'\n \"\"\" \n yte_10 = (s[['ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4']] <= 10).sum()\n \n\n s['d_HAS_CHILDREN'] = s['ANZ_KINDER']>0\n s['d_HAS_CHILDREN_YTE10'] = yte_10>0\n \n return s\n\n def __calc_child_and_teens(self, s):\n \"\"\"\n Description\n -----------\n\n counts the number of children less 10 and greater equal than 10. I assume that for more than 5 children\n all children > 4 are older than 10. Based on the analysis this is in general true\n\n Parameters\n ----------\n s : pd.Series\n series of a particular DataFrame row containing at least these columns\n 'ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4', 'ANZ_KINDER', 'd_NUM_CHILDREN_LESS_10', 'd_NUM_CHILDREN_GTE_10'\n \"\"\" \n less_10 = (s[['ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4']] < 10).sum()\n gte_10 = s['ANZ_KINDER'] - less_10\n\n s['d_NUM_CHILDREN_LESS_10'] = less_10\n s['d_NUM_CHILDREN_GTE_10'] = gte_10\n \n return s\n\n\n \n \n \n \n\nclass FeatureBuilder:\n \"\"\"\n Description\n -----------\n\n executes some data transformations on a arvato dataset and creates some new features\n \"\"\"\n \n def __init__(self):\n pass\n\n def transform(self, df:pd.DataFrame)->pd.DataFrame:\n \"\"\"\n Description\n -----------\n\n executes the data transformation \n\n Parameters\n ----------\n df : pd.DataFrame\n pandas DataFrame that is to be cleaned. Frame is expected to have columns as AZDIAS or CUSTOMERS \n\n\n \"\"\"\n self.__build_features_chidren(df)\n\n return df\n \n def fit (self, df:pd.DataFrame)->pd.DataFrame:\n \"\"\"\n Description\n -----------\n \n prepare data for transformation\n \"\"\"\n pass\n\n def __build_features_chidren(self, df:pd.DataFrame, drop_childcols:bool = True)->pd.DataFrame:\n \"\"\"\n Description\n -----------\n \n This function will build some features based on the given input data\n\n * Children and Teens: \n * Children:= number of children younger or equal than 10\n * Teens := number of children older or equal than 10\n\n Parameters\n ----------\n df : pd.DataFrame\n pandas DataFrame that is to be cleaned. Frame is expected to have columns as AZDIAS or CUSTOMERS \n \"\"\"\n \n # num of children > 0\n df['d_HAS_CHILDREN'] = 0\n # younger than or equal 10\n df['d_HAS_CHILDREN_YTE10'] = 0\n\n df.loc[df['ANZ_KINDER'] > 0, 'd_HAS_CHILDREN'] = 1\n\n # mask to filter rows that have at least one record\n mask = df[['ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4']].max(axis=1) < 11\n df.loc[mask, 'd_HAS_CHILDREN_YTE10'] = 1\n \n child_cols = ['ANZ_KINDER','ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4']\n \n if drop_childcols:\n df.drop(child_cols, axis='columns', inplace=True)\n \n return df\n \n\n\n def __calc_children_features(self, s):\n \"\"\"\n Description\n -----------\n uses features 'ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4', 'ANZ_KINDER' to reduce them to \n 'd_HAS_CHILDREN', 'd_HAS_CHILDREN_YTE10'\n\n\n * d_HAS_CHILDREN_YTE10 if person has children ANZ_KINDER>0\n * d_HAS_CHILDREN if person has at least one children <= 10 \n\n Parameters\n ----------\n s : pd.Series\n series of a particular DataFrame row containing at least these columns\n 'ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4', 'ANZ_KINDER', 'd_HAS_CHILDREN', 'd_HAS_CHILDREN_YTE10'\n \"\"\" \n yte_10 = (s[['ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4']] <= 10).sum()\n \n\n s['d_HAS_CHILDREN'] = s['ANZ_KINDER']>0\n s['d_HAS_CHILDREN_YTE10'] = yte_10>0\n \n return s\n\n def __calc_child_and_teens(self, s):\n \"\"\"\n Description\n -----------\n\n counts the number of children less 10 and greater equal than 10. I assume that for more than 5 children\n all children > 4 are older than 10. Based on the analysis this is in general true\n\n Parameters\n ----------\n s : pd.Series\n series of a particular DataFrame row containing at least these columns\n 'ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4', 'ANZ_KINDER', 'd_NUM_CHILDREN_LESS_10', 'd_NUM_CHILDREN_GTE_10'\n \"\"\" \n less_10 = (s[['ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4']] < 10).sum()\n gte_10 = s['ANZ_KINDER'] - less_10\n\n s['d_NUM_CHILDREN_LESS_10'] = less_10\n s['d_NUM_CHILDREN_GTE_10'] = gte_10\n \n return s\n ","sub_path":"python/etl/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":17210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"80283950","text":"from pathlib import Path\nimport copy\n\nimport spikeextractors as se\nimport spiketoolkit as st\n\nfrom ..basesorter import BaseSorter\nfrom ..sorter_tools import recover_recording\n\ntry:\n import herdingspikes as hs\n HAVE_HS = True\nexcept ImportError:\n HAVE_HS = False\n\n\nclass HerdingspikesSorter(BaseSorter):\n \"\"\"\n HerdingSpikes is a sorter based on estimated spike location, developed by\n researchers at the University of Edinburgh. It's a fast and scalable choice.\n\n See: HILGEN, Gerrit, et al. Unsupervised spike sorting for large-scale,\n high-density multielectrode arrays. Cell reports, 2017, 18.10: 2521-2532.\n \"\"\"\n\n sorter_name = 'herdingspikes'\n installed = HAVE_HS\n requires_locations = True\n _default_params = None # later\n compatible_with_parallel = {'loky': True, 'multiprocessing': True, 'threading': False}\n\n installation_mesg = \"\"\"\n More information on HerdingSpikes at:\n * https://github.com/mhhennig/hs2\n \"\"\"\n\n def __init__(self, **kargs):\n BaseSorter.__init__(self, **kargs)\n \n @staticmethod\n def get_sorter_version():\n return hs.__version__\n\n def _setup_recording(self, recording, output_folder):\n \n p = self.params\n\n # Bandpass filter\n if p['filter'] and p['freq_min'] is not None and p['freq_max'] is not None:\n recording = st.preprocessing.bandpass_filter(\n recording=recording, freq_min=p['freq_min'], freq_max=p['freq_max'])\n\n if p['pre_scale']:\n recording = st.preprocessing.normalize_by_quantile(\n recording = recording, scale=p['pre_scale_value'],\n median=0.0, q1=0.05, q2=0.95\n )\n\n # this should have its name changed\n self.Probe = hs.probe.RecordingExtractor(\n recording,\n masked_channels=p['probe_masked_channels'],\n inner_radius=p['probe_inner_radius'],\n neighbor_radius=p['probe_neighbor_radius'],\n event_length=p['probe_event_length'],\n peak_jitter=p['probe_peak_jitter'])\n\n def _run(self, recording, output_folder):\n recording = recover_recording(recording)\n p = self.params\n\n if recording.is_filtered and p['filter']:\n print(\"Warning! The recording is already filtered, but Herding Spikes filter is enabled. You can disable \"\n \"filters by setting 'filter' parameter to False\")\n\n self.H = hs.HSDetection(\n self.Probe, file_directory_name=str(output_folder),\n left_cutout_time=p['left_cutout_time'],\n right_cutout_time=p['right_cutout_time'],\n threshold=p['detection_threshold'],\n to_localize=True,\n num_com_centers=p['num_com_centers'],\n maa=p['maa'],\n ahpthr=p['ahpthr'],\n out_file_name=p['out_file_name'],\n decay_filtering=p['decay_filtering'],\n save_all=p['save_all'],\n amp_evaluation_time=p['amp_evaluation_time'],\n spk_evaluation_time=p['spk_evaluation_time']\n )\n\n self.H.DetectFromRaw(load=True, tInc=100000)\n\n sorted_file = str(output_folder / 'HS2_sorted.hdf5')\n if(not self.H.spikes.empty):\n self.C = hs.HSClustering(self.H)\n self.C.ShapePCA(pca_ncomponents=p['pca_ncomponents'],\n pca_whiten=p['pca_whiten'])\n self.C.CombinedClustering(\n alpha=p['clustering_alpha'],\n cluster_subset=p['clustering_subset'],\n bandwidth=p['clustering_bandwidth'],\n bin_seeding=p['clustering_bin_seeding'],\n n_jobs=p['clustering_n_jobs'],\n min_bin_freq=p['clustering_min_bin_freq']\n )\n else:\n self.C = hs.HSClustering(self.H)\n\n if p['filter_duplicates']:\n uids = self.C.spikes.cl.unique()\n for u in uids:\n s = self.C.spikes[self.C.spikes.cl==u].t.diff()= 0x8000):\n return -((65535 - val) + 1)\n else:\n return val\n\n#Ensure that SPI is enabled on RPi\nbus = smbus.SMBus(1)\n\n# This is the address value read via the i2cdetect command\naddress = 0x68\n\n# Wake the Accelerometer MP6050 up as it starts in sleep mode\nbus.write_byte_data(address, power_mgmt_1, 0)\n\n\n\n# Get status of accelerometer\n\ndef get_accel_status(accel_list):\n # Read accelerometer data on the x-axis every second\n accel_xout = read_word_2c(0x3b) \n \n # Append new reading to the accelerometer list containing 5 values\n accel_list.append(accel_xout) \n \n # Delete the first value in the accelerometer list\n # This procedure updates the list every second \n del accel_list[0] \n \n # To check list of output values of accelerometer\n print(\"Accel List: \", accel_list)\n \n # Calculate the standard deviation\n std_dev=stdev(accel_list)\n \n # To check standard deviation of the values in the list\n print(\"Standard Deviation: \", std_dev)\n \n # Determine whether the washer is vibrating or not according to the\n # standard deviation of the list.\n # This value must be calibrated according to the surface and amount of \n # vibrating the accelerometer is subjected to.\n # Read documentation for calibration procedures.\n if std_dev<800:\n print(\"Not Vibrating\")\n vibrate = False\n else:\n print(\"Vibrating\")\n vibrate = True\n return vibrate\n\n\n\n# Get status of door\n \ndef get_door_status():\n # Door closed\n if GPIO.input(21) == GPIO.LOW: \n print(\"Closed\")\n door = True \n \n # Door opened\n if GPIO.input(21) == GPIO.HIGH: \n print(\"Opened\")\n door = False\n return door\n\n\n\n# State Machine\nclass Washing_Machine(sm.SM):\n \n # Initialising\n \n def __init__(self):\n self.start_state = 0\n self.details = ''\n self.name = ''\n self.scanned = False\n self.client = Client(account_sid, auth_token) \n # Create an object of the class MFRC522\n self.MIFAREReader = mfrc522.MFRC522()\n\n\n\n # Updating states according to input\n \n def get_next_values(self, state, inp):\n \n # State 0 and RFID card has not been scanned \n if state == 0 and not self.scanned:\n # Scan for RFID cards\n self.rfid_scanning()\n output = \"Available\"\n \n # Dictionary of door, accelerometer and RFID status\n to_check = {'door': inp[0], 'vibrate' : inp[1], 'scanned' : self.scanned}\n \n # State 0\n if state == 0:\n # Check that door closed, machine is vibrating and RFID card\n # has been scanned.\n if all(to_check.values()):\n next_state = 1\n output = \"Not Available\" \n else: \n next_state = 0\n output = \"Available\"\n \n # State 1\n elif state == 1: \n # Check if washer is still vibrating \n if to_check['vibrate']: \n next_state = 1\n output = \"Not Available\"\n else: \n next_state = 2 \n output = \"Not Available\"\n # Send SMS\n self.rfid_sms()\n \n # State 2\n else: \n # Check if door is still closed\n if to_check['door']: \n next_state = 2\n output = \"Not Available\" \n else: \n # Return to initial state \n next_state = 0 \n output = \"Available\"\n self.scanned = False\n \n assert(next_state in (0,1,2))\n return next_state, output\n\n\n\n # Check for any RFID cards scanned\n \n def rfid_scanning(self): \n # Welcome message\n print(\"Looking for cards\")\n print(\"Press Ctrl-C to stop.\")\n \n # Scan for cards\n (status,TagType) = self.MIFAREReader.MFRC522_Request(self.MIFAREReader.PICC_REQIDL)\n \n # Get the UID of the card\n (status,uid) = self.MIFAREReader.MFRC522_Anticoll()\n \n # If we have the UID, continue\n if status == self.MIFAREReader.MI_OK:\n\n # Print UID\n print(\"UID: \"+str(uid[0])+\",\"+str(uid[1])+\",\"+str(uid[2])+\",\"+str(uid[3]))\n user_uid = str(uid[0])+\",\"+str(uid[1])+\",\"+str(uid[2])+\",\"+str(uid[3])\n time.sleep(2)\n \n # Check that user is in the student database\n try:\n # Retrieve user name\n self.name = user_dict[user_uid]['Name']\n print(\"Name: \", self.name)\n \n # Retrieve user phone number\n self.details = user_dict[user_uid]['HP']\n print(\"Phone Number:\", self.details)\n \n # Update RFID status\n self.scanned = True\n \n except:\n print(\"No such user found\") \n\n\n \n # Send SMS to user\n \n def rfid_sms(self):\n message = self.client.messages.create(\n to=self.details,\n from_=sender_number,\n body= \"Hi \" + self.name + \", your laundry is ready!\")\n print(message)\n\n\n\n# Main Code\ndef main():\n #GPIO Settings\n # Use the BCM GPIO numbers as the numbering scheme.\n GPIO.setmode(GPIO.BCM)\n\n # Set GPIO 21 as input with pull-down resistor.\n GPIO.setup(21, GPIO.IN, GPIO.PUD_DOWN)\n \n # Create an object of the class Washing_Machine\n wm=Washing_Machine()\n wm.start()\n \n # Initial accelerometer list which should be changed during calibration.\n # Read documentation for calibration procedures.\n accel_list=[-1828, -1936, -1944, -1904, -1912]\n \n while True:\n # Retrieve boolean status of door\n door=get_door_status()\n \n # Retrieve boolean status of accelerometer\n vibrate=get_accel_status(accel_list)\n \n # Input both door and accelerometer status into state machine\n inp=(door, vibrate)\n wm.step(inp)\n \n # Update availability on firebase based on output of state machine\n availability_db.child(\"Washer 2\").set(wm.step(inp))\n\nif __name__ == '__main__':\n main()\n","sub_path":"Washer_2.py","file_name":"Washer_2.py","file_ext":"py","file_size_in_byte":8094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"12697551","text":"\n'''\n\n\tProject:\tCS100\n\tTitle:\t\tScenePrimitives\n\n\tAuthor:\t\tJohn Mooney\n\tDate:\t\t1/23/2013\n\n\tDescription:\n\t\tEntry point for running a debug CS100 version\n'''\n\n# Imports\nimport pyglet\nimport sys\nimport os\n\nfrom pyglet.graphics import GL_LINES\n\n\n'''\t\tSet Search Directory\t'''\nfor root, direcs, files in os.walk(os.getcwd()):\n\tfor direc in direcs:\n\t\tsys.path.append(os.path.join(root, direc))\n\n\n# Imports\nimport Color\n\nfrom Renderer import Renderer\nfrom TransformationGraph import Transform\nfrom ResourceManager import ResourceManager\n\nfrom Animation import Animation\nfrom Sprite import Sprite\n\n#-------------------------------------------------------#\t\n\nwindow \t\t\t= pyglet.window.Window(800, 600)\nwinDimensions \t= [800, 600]\n\nrendMan = Renderer(winSize=winDimensions)\nRenderer.activeRenderer = rendMan\n\nsg = rendMan.getSceneGraph()\n\nrm = ResourceManager(\"Tests\\\\data\")\nResourceManager.activeManager = rm\nrm.registerExtension(\".jpg\", \"img\", [\"img\"], pyglet.image.load)\nrm.registerExtension(\".bmp\", \"img\", [\"img\"], pyglet.image.load)\nrm.registerExtension(\".png\", \"img\", [\"img\"], pyglet.image.load)\nrm.registerExtension(\".anim\", \"anim\", [\"anim\"], Animation)\n\nim = rm.request(\"C:/Users/John/Pictures/Lake.jpg\")\n\nsp = pyglet.sprite.Sprite(im)\nsp2 = Sprite(im, t=sg.newTransform())\n\npyglet.gl.glClearColor(1,0,1,0);\ndef update(dt):\n\tpass\n\t\n\t\n@window.event\ndef on_draw():\n\twindow.clear()\n\trendMan.render()\n\tsp.draw()\n\npyglet.clock.schedule(update)\npyglet.app.run()\n\n","sub_path":"py ref/pyglet/CS100-master/Tests/SpriteTest.py","file_name":"SpriteTest.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"321355737","text":"#!/usr/bin/env python\n#coding:utf-8\nimport sys,re,os,argparse,fileinput\n\nparser = argparse.ArgumentParser(description=\"This Script is used to find all deg.pro-pro.txt, Co-Expression.xls, filter.coexpression.txt, and filter.propro.txt under a given path, and add the symbol from anno file. New file will replace the old file, old file will be renamed with '.no.symbol.txt' extension.\")\nparser.add_argument(\"-path\",\"--path\",type=str,help=\"The input path, 'deg.pro-pro.txt','Co-Expression.xls','filter.coexpression.txt','filter.propro.txt' file will be found under this path.\",required = True)\nparser.add_argument(\"-anno\",\"--anno\",type=str,help=\"The input anno file, first column must be id, seventh column must be symbol information.\",required = True)\nparser.add_argument(\"-v\",\"--version\",action=\"version\",version='%(prog)s 1.0')\nargs = parser.parse_args()\n\nfile_list = os.popen('find ' + args.path + \" -name deg.pro-pro.txt -o -name Co-Expression.xls -o -name filter.coexpression.txt -o -name filter.propro.txt\").read().rstrip(\"\\n\").split(\"\\n\")\nanno_d = {}\nwith open(args.anno,\"r\") as anno:\n for i in anno:\n anno_d[i.split(\"\\t\")[0]] = i.split(\"\\t\")[6]\n# anno_d[\"-\"] = \"-\"\nfor i in fileinput.input(file_list,backup = \".no.symbol.txt\",inplace = 1):\n t = re.sub('\\(-\\)?',\"(-:-)\",i)\n ensembol = re.findall('\\S+\\((.+?)\\)\\s+?',t)\n if ensembol:\n for ei in ensembol:\n if ei == \"-:-\":\n continue\n else:\n eii = ei.split(\",\")\n for index,eiii in enumerate(eii):\n eii[index] = eiii + \":\" + anno_d[eiii]\n an = \";\".join(eii)\n t = re.sub(ei,an,t,1)\n sys.stdout.write(t)\n else:\n a = i.split(\"\\t\")\n for n,ai in enumerate(a):\n if anno_d.has_key(ai):\n a[n] = ai + \"(\" + anno_d[ai] + \")\"\n sys.stdout.write(\"\\t\".join(a))\n\n \n","sub_path":"add_coex_and_pro_symbol.py","file_name":"add_coex_and_pro_symbol.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"622923781","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# 2019-03-12 Guolikai\n# 功能: 使用科学计算库NumPy简化程序\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n\n# 解决中文显示问题\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\n\ndef demo():\n # 生成1-99数组\n arr_1 = np.arange(1,100)\n print(arr_1)\n # s生成随机数是2-12的20列10行矩阵\n arr = np.random.randint(2,13,(10,20))\n print(arr)\n\n # 改变数组形状\n arr1 = np.reshape(arr,(8,25))\n print(arr1)\n\n\ndef main():\n \"\"\"\n 直方图绘制: plt.hist(data,bins) data,数据列表;bins:分组边界\n edgecolor: 边界颜色\n linewidth: 边界线宽度\n rwidth: 直方图宽度\n density: 概率统计(老版本normed)\n \"\"\"\n total_num = 10000\n arr_a = np.random.randint(1, 7, total_num)\n arr_b = np.random.randint(1, 7, total_num)\n # numpy数据运算,即向量化运算\n arr = arr_a + arr_b\n # print('第一个数组:{}'.format(arr_a))\n # print('第二个数组:{}'.format(arr_b))\n # print('二个数组和:{}'.format(arr))\n # print('二个数组相乘:{}'.format(arr_a * arr_b))\n\n # 直接生产直方图的统计具体数据\n hist, bins = np.histogram(arr,bins=range(2, 14))\n print(hist)\n print(bins)\n # 数据可视化\n # X轴加单位,设置x轴坐标点显示\n tick_labels = ['2点', '3点', '4点', '5点', '6点', '7点', '8点', '9点', '10点', '11点', '12点']\n tick_pos = np.arange(2, 13) + 0.5 #生成位置并修改\n plt.xticks(tick_pos, tick_labels)\n plt.title('骰子点数统计')\n plt.xlabel('点数')\n plt.ylabel('统计数量')\n plt.hist(arr, bins=range(2, 14), density=0, edgecolor='black', linewidth=1, rwidth=0.8)\n plt.show()\n\n\nif __name__ == '__main__':\n demo()\n # main()\n","sub_path":"demo_numpy.py","file_name":"demo_numpy.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"149668012","text":"import os\nimport random\nimport pygame\nimport sys\npygame.init()\npygame.font.init()\n\nFPS = 50\nWIDTH = 500\nHEIGHT = 500\nSTEP = 10\nTILE_WIDTH = TILE_HEIGHT = 50\nGRAVITY = 10\n\n\n\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\nclock = pygame.time.Clock()\n\nall_sprites = pygame.sprite.Group()\ntiles_group = pygame.sprite.Group()\nplayer_group = pygame.sprite.Group()\nbox_group = pygame.sprite.Group()\nmoney_group = pygame.sprite.Group()\nbox_black_group = pygame.sprite.Group()\nnps_group = pygame.sprite.Group()\nstar_group = pygame.sprite.Group()\nscreen_rect = (0, 0, WIDTH, HEIGHT)\n\n\n\ndef load_image(name, colorkey=None):\n fullname = os.path.join('data', name)\n try:\n image = pygame.image.load(fullname)\n except pygame.error as message:\n print('Cannot load image:', name)\n raise SystemExit(message)\n\n if colorkey is not None:\n if colorkey is -1:\n colorkey = image.get_at((0, 0))\n image.set_colorkey(colorkey)\n return image\n\ndef terminate():\n pygame.quit()\n sys.exit()\n\ndef start_screen():\n intro_text = [\"ЗАСТАВКА\", \"\",\n \"Правила игры\",\n \"Если в правилах несколько строк,\",\n \"приходится выводить их построчно\"]\n fon = pygame.transform.scale(load_image('fon.jpg'), (WIDTH, HEIGHT))\n screen.blit(fon, (0, 0))\n font = pygame.font.Font(\"freesansbold.ttf\", 30)\n text_coord = 50\n for line in intro_text:\n string_rendered = font.render(line, 1, pygame.Color('black'))\n intro_rect = string_rendered.get_rect()\n text_coord += 10\n intro_rect.top = text_coord\n intro_rect.x = 10\n text_coord += intro_rect.height\n screen.blit(string_rendered, intro_rect)\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n terminate()\n elif event.type == pygame.KEYDOWN or event.type == pygame.MOUSEBUTTONDOWN:\n return # начинаем игру\n pygame.display.flip()\n clock.tick(FPS)\n\n\na = [['' for i in range(100)] for j in range(100)]\n\nprint(a)\nfor i in range(100):\n for j in range(100):\n chance = random.randint(0, 100)\n if chance < 20:\n a[i][j] = '#'\n else:\n a[i][j] = '.'\n\nfor i in range(100):\n a[0][i] = '$'\n a[99][i] = '$'\n a[i][0] = '$'\n a[i][99] = '$'\n\ni = 0\nwhile i < 50:\n chance2 = random.randint(0, 100)\n if chance2 < 50:\n x, y = random.randint(2, 97), random.randint(2, 97)\n while a[x][y] == '#' and a[x][y] != '@' and a[x][y] != '$' and a[x][y] != '?':\n x, y = random.randint(2, 97), random.randint(2, 97)\n a[x][y] = '*'\n i += 1\n\n\n\nprint(a)\nx, y = random.randint(0, 100), random.randint(0, 100)\nwhile a[x][y] == '#':\n x, y = random.randint(0, 100), random.randint(0, 100)\na[x][y] = '@'\ncash_m = []\n\ndef generate_level(level):\n new_player = None\n for y in range(len(level)):\n for x in range(len(level[y])):\n if level[y][x] == '.':\n Tile('empty', x, y)\n elif level[y][x] == '#':\n Tile('wall', x, y)\n elif level[y][x] == '@':\n Tile('empty', x, y)\n new_player = Player(x, y)\n Tile('npc', x + 1, y + 1)\n elif level[y][x] == '*':\n Tile('empty', x, y)\n cash_m.append(Tile('cash', x, y))\n elif level[y][x] == '$':\n Tile('box_black', x, y)\n elif level[y][x] == '?':\n Tile('empty', x, y)\n Tile('npc', x, y)\n\n\n return new_player\n\ndef cvest_nps():\n chotchik = 0\n proverochra_na_petyxa = False\n\n if pygame.sprite.spritecollideany(player, nps_group):\n intro_text = [\"КВЕСТ ПОЛУЧЕН\", \"------\",\n \"СОБЕРИТЕ 50 МОНЕТ И ВАМ\",\n \"ОТКРОЕТСЯ ТАЙНА МИРОЗДАНИЯ\"]\n if pygame.sprite.spritecollideany(player, money_group) != None:\n chotchik += 1\n if chotchik == 50:\n proverochra_na_petyxa = True\n\n\n font = pygame.font.Font(None, 100)\n text_coord = 50\n for line in intro_text:\n string_rendered = font.render(line, 1, pygame.Color('black'))\n intro_rect = string_rendered.get_rect()\n text_coord += 10\n intro_rect.top = text_coord\n intro_rect.x = 10\n text_coord += intro_rect.height\n screen.blit(string_rendered, intro_rect)\n\n\n\n\ntile_images = {\n 'wall': load_image('box.png'),\n 'empty': load_image('grass.png'),\n 'player': load_image('mario.png'),\n 'cash': load_image('money.png'),\n 'box_black': load_image('box_black.png'),\n 'npc': load_image('npc.png'),\n 'star': load_image('star.png')\n}\n\nclass Tile(pygame.sprite.Sprite):\n def __init__(self, tile_type, pos_x, pos_y):\n if tile_type == 'wall':\n super().__init__(tiles_group, box_group, all_sprites)\n elif tile_type == 'cash':\n super().__init__(money_group, all_sprites)\n elif tile_type == 'box_black':\n super().__init__(box_black_group, all_sprites, tiles_group)\n elif tile_type == 'npc':\n super().__init__(nps_group, all_sprites)\n elif tile_type == 'star':\n super().__init__(star_group, all_sprites)\n else:\n super().__init__(tiles_group, all_sprites)\n self.image = tile_images[tile_type]\n self.rect = self.image.get_rect().move(TILE_WIDTH * pos_x,\n TILE_HEIGHT * pos_y)\n\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self, pos_x, pos_y):\n super().__init__(player_group, all_sprites)\n self.image = tile_images['player']\n self.rect = self.image.get_rect().move(TILE_WIDTH * pos_x + 15,\n TILE_HEIGHT * pos_y + 5)\n\nclass Camera:\n def __init__(self):\n self.dx = 0\n self.dy = 0\n\n def apply(self, obj): # перемещение любого спрайта\n obj.rect.x += self.dx\n obj.rect.y += self.dy\n\n def update(self, target): # следит за персонажем\n self.dx = -(target.rect.x + target.rect.w // 2 - WIDTH // 2)\n self.dy = -(target.rect.y + target.rect.h // 2 - HEIGHT // 2)\n\n\nstart_screen()\n\nplayer = generate_level(a)\ncamera = Camera()\n\npressed_left = pressed_right = pressed_up = pressed_down = False\nrunning = True\nscreen1 = pygame.transform.scale(load_image('grass.png'), (WIDTH, HEIGHT))\nscreen.blit(screen1, (0, 0))\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.KEYDOWN: # check for key presses\n if event.key == pygame.K_LEFT: # left arrow turns left\n pressed_left = True\n elif event.key == pygame.K_RIGHT: # right arrow turns right\n pressed_right = True\n elif event.key == pygame.K_UP: # up arrow goes up\n pressed_up = True\n elif event.key == pygame.K_DOWN: # down arrow goes down\n pressed_down = True\n elif event.type == pygame.KEYUP: # check for key releases\n if event.key == pygame.K_LEFT: # left arrow turns left\n pressed_left = False\n elif event.key == pygame.K_RIGHT: # right arrow turns right\n pressed_right = False\n elif event.key == pygame.K_UP: # up arrow goes up\n pressed_up = False\n elif event.key == pygame.K_DOWN: # down arrow goes down\n pressed_down = False\n print(money_group.sprites())\n # In your game loop, check for key states:\n for i in range(len(cash_m)):\n f = random.randint(1, 150)\n d1, d2 = 0, 0\n if 1 < f < 25 and pygame.sprite.spritecollideany(cash_m[i], box_black_group) == None:\n cash_m[i].rect.x += 15\n d1 = 15\n if 26 < f < 50 and pygame.sprite.spritecollideany(cash_m[i], box_black_group) == None:\n cash_m[i].rect.x -= 15\n d1 = -15\n if 51 < f < 75 and pygame.sprite.spritecollideany(cash_m[i], box_black_group) == None:\n cash_m[i].rect.y += 15\n d2 = 15\n if 76 < f < 100 and pygame.sprite.spritecollideany(cash_m[i], box_black_group) == None:\n cash_m[i].rect.y -= 15\n d2 = -15\n if pygame.sprite.spritecollideany(cash_m[i], box_black_group) != None:\n if d1 == 0:\n cash_m[i].rect.y += d2 * -1\n else:\n cash_m[i].rect.x += d1 * -1\n if pressed_left:\n player.rect.x -= STEP\n if pygame.sprite.spritecollideany(player, box_group) != None or \\\n pygame.sprite.spritecollideany(player, box_black_group) != None:\n player.rect.x += STEP\n elif pygame.sprite.spritecollideany(player, money_group) != None:\n pygame.sprite.spritecollideany(player, money_group).kill()\n if pressed_right:\n player.rect.x += STEP\n if pygame.sprite.spritecollideany(player, box_group) != None or \\\n pygame.sprite.spritecollideany(player, box_black_group) != None:\n player.rect.x -= STEP\n elif pygame.sprite.spritecollideany(player, money_group) != None:\n pygame.sprite.spritecollideany(player, money_group).kill()\n if pressed_up:\n player.rect.y -= STEP\n if pygame.sprite.spritecollideany(player, box_group) != None or \\\n pygame.sprite.spritecollideany(player, box_black_group) != None:\n player.rect.y += STEP\n elif pygame.sprite.spritecollideany(player, money_group) != None:\n pygame.sprite.spritecollideany(player, money_group).kill()\n if pressed_down:\n player.rect.y += STEP\n if pygame.sprite.spritecollideany(player, box_group) != None or \\\n pygame.sprite.spritecollideany(player, box_black_group) != None:\n player.rect.y -= STEP\n elif pygame.sprite.spritecollideany(player, money_group) != None:\n pygame.sprite.spritecollideany(player, money_group).kill()\n\n camera.update(player)\n for sprite in all_sprites:\n camera.apply(sprite)\n\n\n tiles_group.draw(screen)\n player_group.draw(screen)\n money_group.draw(screen)\n box_black_group.draw(screen)\n nps_group.draw(screen)\n star_group.draw(screen)\n cvest_nps()\n pygame.display.flip()\n clock.tick(FPS)\nterminate()\n","sub_path":"1321.py","file_name":"1321.py","file_ext":"py","file_size_in_byte":10667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"487149824","text":"#!/usr/bin/env python3\n\ndef f(n):\n try:\n x = 4 / n\n print('x =', x)\n except ZeroDivisionError as e:\n print('in except block,', e)\n else:\n print('in else block')\n finally:\n print('in finally block')\n\nf(2)\n\nprint()\n\nf(0)\n","sub_path":"22_try_catch.py","file_name":"22_try_catch.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"162602588","text":"\"\"\"\n\nDIFFICULTY: SIMPLE\n\nStephen's speech module is broken. This module is responsible for his number pronunciation. He has to click to input all of the numerical digits in a figure, so when there are big numbers it can take him a long time. Help the robot to speak properly and increase his number processing speed by writing a new speech module for him. All the words in the string must be separated by exactly one space character. Be careful with spaces -- it's hard to see if you place two spaces instead one.\nInput: A number as an integer.\nOutput: The string representation of the number as a string.\nHow it is used: This concept may be useful for the speech synthesis software or automatic reports systems. This system can also be used when writing a chatbot by assigning words or phrases numerical values and having a system retrieve responses based on those values.\nPrecondition: 0 < number < 1000\n\"\"\"\n\nFIRST_TEN = {\n \"1\": \"one\",\n \"2\": \"two\",\n \"3\": \"three\",\n \"4\": \"four\",\n \"5\": \"five\",\n \"6\": \"six\",\n \"7\": \"seven\",\n \"8\": \"eight\",\n \"9\": \"nine\"\n }\nSECOND_TEN = {\n \"10\": \"ten\",\n \"11\": \"eleven\",\n \"12\": \"twelve\",\n \"13\": \"thirteen\",\n \"14\": \"fourteen\",\n \"15\": \"fifteen\",\n \"16\": \"sixteen\",\n \"17\": \"seventeen\",\n \"18\": \"eighteen\",\n \"19\": \"nineteen\"\n }\nOTHER_TENS = {\n \"20\": \"twenty\",\n \"30\": \"thirty\",\n \"40\": \"forty\",\n \"50\": \"fifty\",\n \"60\": \"sixty\",\n \"70\": \"seventy\",\n \"80\": \"eighty\",\n \"90\": \"ninety\"\n }\nHUNDRED = \"hundred\"\n\ndef checkio(number):\n STR_NUM = str(number)\n LENGTH = len(STR_NUM)\n huns_dig = 0\n tens_dig = 0\n ones_dig = 0\n two_dig = 0\n final_str = \"\"\n \n if LENGTH == 1:\n final_str = FIRST_TEN[STR_NUM]\n \n if LENGTH == 2:\n if STR_NUM in SECOND_TEN:\n return SECOND_TEN[STR_NUM]\n else:\n tens_dig = number // 10 * 10\n ones_dig = number % 10\n final_str = OTHER_TENS[str(tens_dig)]\n if ones_dig != 0:\n final_str = final_str + \" \" + FIRST_TEN[str(ones_dig)]\n \n if LENGTH == 3:\n \n huns_dig = number // 100\n two_dig = number % 100\n tens_dig = number % 100 // 10 * 10\n ones_dig = number % 10\n \n final_str = FIRST_TEN[str(huns_dig)] + \" \" + HUNDRED\n \n if str(two_dig) in SECOND_TEN:\n final_str = final_str + \" \" + SECOND_TEN[str(two_dig)]\n \n if tens_dig != 0 and str(two_dig) not in SECOND_TEN:\n final_str = final_str + \" \" + OTHER_TENS[str(tens_dig)]\n \n if ones_dig != 0 and str(two_dig) not in SECOND_TEN:\n final_str = final_str + \" \" + FIRST_TEN[str(ones_dig)]\n return final_str\nif __name__ == '__main__':\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert checkio(4) == 'four', \"1st example\"\n assert checkio(133) == 'one hundred thirty three', \"2nd example\"\n assert checkio(12) == 'twelve', \"3rd example\"\n assert checkio(101) == 'one hundred one', \"4th example\"\n assert checkio(212) == 'two hundred twelve', \"5th example\"\n assert checkio(40) == 'forty', \"6th example\"\n assert not checkio(212).endswith(' '), \"Don't forget strip whitespaces at the end of string\"\n","sub_path":"home/speech_module.py","file_name":"speech_module.py","file_ext":"py","file_size_in_byte":3295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"596001711","text":"# from pathlib import Path\n#\n# p = Path('/tmp/test20200630.csv')\n# parent = p.parent\n# if not parent.exists():\n# parent.mkdir(parents=True)\n#\n# csv_body = '''\\\n# id,name,age,comment\n# 1,zs,18,\"I'm 18\"\n# 2,ls,20,\"this is a \"\"test\"\"string.\"\n# 3,ww,23,\"你好\n#\n# 计算机\n# \"\n# '''\n# p.write_text(csv_body)\n#\n# import csv\n#\n# p = Path('/tmp/test20200630.csv')\n# with open(str(p)) as f:\n# reader = csv.reader(f)\n# print(next(reader))\n# print(next(reader))\n#\n# rows = [\n# \t[4,'tom',22,'tom'],\n# \t(5,'jerry',24,'jerry'),\n# \t(6,'justin',22,'just\\t\"in'),\n# \t\"abcdefghi\",\n# \t((1,),(2,))\n# ]\n#\n# row = rows[0]\n# with open(str(p), 'a') as f:\n# writer = csv.writer(f)\n# writer.writerow(row)\n# writer.writerows(rows)\n# ===================================================\n# 处理ini文件\nfrom configparser import ConfigParser\n\nfilename = '/tmp/test.ini'\nnewfilename = '/tmp/mysql.ini'\n\ncfg = ConfigParser()\ncfg.read(filename)\n# print(cfg.sections())\n# print(cfg.has_section('client'))\n#\n# print(cfg.items('mysqld'))\n# for k,v in cfg.items():\n # print(k, type(v))\n # print(k, cfg.items(k))\n#\n# tmp = cfg.get('mysqld','port')\n# print(type(tmp), tmp)\n# print(cfg.get('mysqld', 'a'))\n# print(cfg.get('mysqld', 'magedu', fallback='python'))\n#\n# tmp = cfg.getint('mysqld', 'port')\n# print(type(tmp), tmp)\n#\nif cfg.has_section('test'):\n cfg.remove_section('test')\n\ncfg.add_section('test')\ncfg.set('test', 'test1', '1')\ncfg.set('test', 'test2', '2')\n\nwith open(newfilename, 'w') as f:\n cfg.write(f)\n\n# print(cfg.getint('test', 'test2'))\ncfg.remove_option('test', 'test2')\n\ncfg['test']['x'] = '100'\ncfg['test2'] = {'test2':'1000'}\n\n# print('x' in cfg['test'])\n# print('x' in cfg['test2'])\n#\nprint(cfg._dict)\n#\n# with open(newfilename, 'w') as f:\n# cfg.write(f)","sub_path":"练习/处理csv与ini文件/处理csv与ini文件.py","file_name":"处理csv与ini��件.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"446993883","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\nimport requests\nimport json\n\n# URL = 'http://www.wanfangdata.com.cn/searchResult/getCoreSearch.do?d=0.1815591873188529'\n# headers = {\n# \"User-Agent\": 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36',\n# \"Content-Type\": \"application/x-www-form-urlencoded; charset=UTF-8\"\n# }\n# datas = {\n# \"paramStrs\": \"主题:(汉韩)\",\n# \"classType\": \"degree-degree_artical\",\n# \"pageNum\": 1,\n# \"pageSize\": 20,\n# \"isSearchSecond\": \"false\",\n# \"chineseEnglishExpand\": \"false\",\n# \"topicExpand\": \"false\",\n# \"searchWay\": \"AdvancedSearch\",\n# \"corePerio\": \"false\",\n# }\n#\n# session = requests.session()\n# res = session.post(URL, data=datas, headers=headers)\n# data = res.content.decode()\n# print(data)\n\n# id_list = []\n#\n# num = 22\n# for page_id in range(1, num):\n#\n# datas = {\n# \"paramStrs\": \"主题:(汉韩)\",\n# \"classType\": \"degree-degree_artical\",\n# \"pageNum\": page_id,\n# \"pageSize\": 20,\n# \"isSearchSecond\": \"false\",\n# \"chineseEnglishExpand\": \"false\",\n# \"topicExpand\": \"false\",\n# \"searchWay\": \"AdvancedSearch\",\n# \"corePerio\": \"false\",\n# }\n#\n# session = requests.session()\n# res = session.post(URL, data=datas, headers=headers)\n# data = res.content.decode()\n# print(data)\n# # info = json.loads(data)\n# # for thesisnum in info['pageRow']:\n# # id_list.append(thesisnum['id'])\n# # print('第{}页:'.format(page_id))\n\n\n# url = 'http://d.wanfangdata.com.cn/Detail/Thesis/'\n# headers = {\n# \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36\",\n# \"Content-Type\": \"application/json;charset=UTF-8\",\n# \"Host\":\"d.wanfangdata.com.cn\"\n# }\n# body = {\n# \"Id\": \"D048000\"\n# }\n#\n# res = requests.post(url, headers=headers, json=body)\n# data = res.content.decode()\n# print(data)\n\n\nurl = ' http://d.wanfangdata.com.cn/Detail/Thesis/' # 坑,一定要从requests URL复制\n\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36\",\n \"Content-Type\": \"application/json;charset=UTF-8\",\n \"Host\": \"d.wanfangdata.com.cn\"\n}\nbody = {\n \"Id\": \"D048000\"\n}\n\njson_list = []\nfor i in range(0, len(id_list) + 1):\n thesis_id = '{}'.format(i)\n body['ID'] = thesis_id\n res = requests.post(url, headers=headers, json=body)\n data = res.content.decode()\n print(data)\n info = json.loads(data)\n # print(info)\n json_infos = {\n \"文章标题\": info['detail'][0]['thesis']['Title'],\n \"关键词\": info['detail'][0]['thesis']['Keywords'],\n \"摘要\": info['detail'][0]['thesis']['Abstract'],\n \"作者\": info['detail'][0]['thesis']['Creator'],\n \"作者单位\": info['detail'][0]['thesis']['OrganizationNorm'],\n \"层次\": info['detail'][0]['thesis']['Degree'],\n \"专业\": info['detail'][0]['thesis']['Major'],\n \"导师\": info['detail'][0]['thesis']['Tutor'],\n\n \"链接\": 'http://d.wanfangdata.com.cn/Detail/Thesis/' + info['detail'][0]['thesis']['Id']\n # \"在线发表时间\":['pageRow'],#??\n }\n json_list.append(json_infos)\nprint(json_infos)\n","sub_path":"web_spider/get_wanfang_serach_data.py","file_name":"get_wanfang_serach_data.py","file_ext":"py","file_size_in_byte":3325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"590799873","text":"from urllib.request import urlopen, Request\r\nfrom bs4 import BeautifulSoup\r\nimport os\r\n\r\n\r\n\r\n\r\ntheurl = urlopen(Request(\"http://www.culturalindia.net/monuments/taj-mahal.html\", headers={'User-Agent': 'Mozilla'}))\r\nsoup = BeautifulSoup(theurl,\"html.parser\")\r\n\r\nheader=\"TAJ MAHAL\\n\"\r\nfile = open(os.path.expanduser(\"mm1.txt\"),\"wb\")\r\nfor record in soup.findAll(\"div\"):\r\n for data in record.findAll(\"p\"):\r\n set = data.text\r\n print(set)\r\n file.write(bytes(header, encoding=\"ascii\", errors='ignore'))\r\n file.write(bytes(set, encoding=\"ascii\", errors='ignore'))\r\n\r\nfile.close()\r\n\r\nf = open('mm1.txt', 'r+')\r\nn = f.read().replace(',', ',\\n')\r\nf.truncate(0)\r\nf.write(n)\r\nf.close()\r\n\r\n\r\n","sub_path":"searchengine/backenddata collector/monuments/monumentdescription.py","file_name":"monumentdescription.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"469550600","text":"# http://www.blog.pythonlibrary.org/2012/08/02/python-101-an-intro-to-logging/\r\n# otherMod.py\r\nimport logging, sys\r\n\r\n# module_logger = logging.getLogger(\"exampleApp.altroModulo\")\r\n# print globals()\r\n# print __file__\r\n# print __name__\r\n\r\nlogger = None\r\nloggerName = None\r\n\r\ndef initLog(logID):\r\n global loggerName, logger\r\n loggerName = logID + '.' + __name__\r\n logger = logging.getLogger(loggerName)\r\n\r\n#----------------------------------------------------------------------\r\ndef add(x, y):\r\n # global logger\r\n funcName = sys._getframe().f_code.co_name\r\n logger.info(\"added %s and %s to get %s\" % (x, y, x+y))\r\n\r\n # logger = logging.getLogger(loggerName + '.'+ funcName)\r\n # logger1 = logging.getLogger(loggerName + \".add\")\r\n # logger.info(\"added %s and %s to get %s\" % (x, y, x+y))\r\n\r\n # logger = logging.getLogger(\"exampleApp.otherMod2.add\")\r\n # logger.info(\"added %s and %s to get %s\" % (x, y, x+y))\r\n\r\n return x+y","sub_path":"LnLogger/Samples/YAML_newLogger/Modulo_02.py","file_name":"Modulo_02.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"226313290","text":"# @Author : Xavier Faure\n# @Email : xavierf@kth.se\n\nfrom eppy.results import readhtml\nimport esoreader\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef getOutputList(path,idf,OutputsFile):\n OutputsVar = {}\n OutputsVar['Var'] = []\n outputs = open(os.path.join(path,OutputsFile), 'r')\n Lines = outputs.readlines()\n for line in Lines:\n tofind = 'Reporting_Frequency ='\n if tofind in line:\n OutputsVar['Reportedfrequency'] = line[line.index(tofind)+len(tofind)+1:-1]\n if '## ' in line[:3]:\n var = line[3:][::-1]\n var2add = var[var.index('[')+2:var.index(',')][::-1]\n keep = True\n if 'People' in var2add and len(idf.idfobjects[\"PEOPLE\"])==0:\n keep = False\n if keep:\n OutputsVar['Var'].append(var2add)\n return OutputsVar\n\ndef AddOutputs(idf,building,path,EMSOutputs,OutputsFile):\n\n OutputsVar = getOutputList(path,idf,OutputsFile)\n #we shall start by removing all predclared outputes from the template\n predef = idf.idfobjects[\"OUTPUT:VARIABLE\"]\n for i in reversed(predef):\n idf.removeidfobject(i)\n idf.newidfobject(\n \"OUTPUT:DIAGNOSTICS\",\n Key_1=\"DISPLAYEXTRAWARNINGS\",\n )\n\n for var in OutputsVar['Var']:\n idf.newidfobject(\n \"OUTPUT:VARIABLE\",\n Variable_Name=var,\n Reporting_Frequency=OutputsVar['Reportedfrequency'],\n )\n zonelist = getHeatedZones(idf)\n if EMSOutputs:\n setEMS4MeanTemp(idf, zonelist, OutputsVar['Reportedfrequency'],EMSOutputs[0])\n setEMS4TotHeatPow(idf, building,zonelist, OutputsVar['Reportedfrequency'], EMSOutputs[1])\n if len(EMSOutputs)>2:\n setEMS4TotDHWPow(idf, building, zonelist, OutputsVar['Reportedfrequency'], EMSOutputs[2])\n return idf\n\ndef getHeatedZones(idf):\n #returns the zone names that are above ground levels, which means heated zones\n zoneName = []\n AllZone = idf.idfobjects[\"ZONE\"]\n for idx, zone in enumerate(AllZone):\n if int(zone.Name[zone.Name.find('Storey')+6:]) >= 0: #the name ends with Storey # so lets get the storey number this way\n zoneName.append(zone.Name)\n return zoneName\n\ndef setEMS4MeanTemp(idf,zonelist,Freq,name):\n #lets create the temperature sensors for each zones and catch their volume\n for idx,zone in enumerate(zonelist):\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:SENSOR',\n Name = 'T'+str(idx),\n OutputVariable_or_OutputMeter_Index_Key_Name = zone,\n OutputVariable_or_OutputMeter_Name = 'Zone Mean Air Temperature',\n )\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:INTERNALVARIABLE',\n Name = 'Vol'+str(idx),\n Internal_Data_Index_Key_Name = zone,\n Internal_Data_Type = 'Zone Air Volume'\n )\n #lets create the prgm collingManager\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:PROGRAMCALLINGMANAGER',\n Name='Average Building Temperature',\n EnergyPlus_Model_Calling_Point='EndOfZoneTimestepBeforeZoneReporting' ,\n Program_Name_1='AverageZoneTemps'\n )\n #lets create the global Variable\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:GLOBALVARIABLE',\n Erl_Variable_1_Name='AverageBuildingTemp' ,\n )\n #lets create the EMS Output Variable\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:OUTPUTVARIABLE',\n Name=name,\n EMS_Variable_Name='AverageBuildingTemp' ,\n Type_of_Data_in_Variable='Averaged',\n Update_Frequency = 'ZoneTimeStep'\n )\n #lets create the program\n listofTemp = ['T'+str(i) for i in range(len(zonelist))]\n listofVol = ['Vol' + str(i) for i in range(len(zonelist))]\n SumNumerator = ''\n SumDenominator = ''\n for idx,Temp in enumerate(listofTemp):\n SumNumerator = SumNumerator+Temp+'*'+listofVol[idx]+'+'\n SumDenominator = SumDenominator + listofVol[idx] + '+'\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:PROGRAM',\n Name='AverageZoneTemps',\n Program_Line_1='SET SumNumerator = '+SumNumerator[:-1],\n Program_Line_2='SET SumDenominator = '+SumDenominator[:-1],\n Program_Line_3='SET AverageBuildingTemp = SumNumerator / SumDenominator',\n )\n #lets create now the ouputs of this EMS\n idf.newidfobject(\n 'OUTPUT:ENERGYMANAGEMENTSYSTEM',\n Actuator_Availability_Dictionary_Reporting='Verbose',\n EMS_Runtime_Language_Debug_Output_Level='Verbose',\n Internal_Variable_Availability_Dictionary_Reporting='Verbose',\n )\n #lets create now the final outputs\n idf.newidfobject(\n 'OUTPUT:VARIABLE',\n Variable_Name=name,\n Reporting_Frequency=Freq,\n )\n\ndef setEMS4TotHeatPow(idf,building,zonelist,Freq,name):\n #lets create the temperature sensors for each zones and catch their volume\n for idx,zone in enumerate(zonelist):\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:SENSOR',\n Name = 'Pow'+str(idx),\n OutputVariable_or_OutputMeter_Index_Key_Name = zone+' IDEAL LOADS AIR SYSTEM',\n OutputVariable_or_OutputMeter_Name = 'Zone Ideal Loads Supply Air Total Heating Rate'\n )\n #lets create the prgm collingManager\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:PROGRAMCALLINGMANAGER',\n Name='Compute Total Building Heat Pow',\n EnergyPlus_Model_Calling_Point='EndOfZoneTimestepBeforeZoneReporting' ,\n Program_Name_1='TotZonePow'\n )\n #lets create the global Variable\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:GLOBALVARIABLE',\n Erl_Variable_1_Name='TotBuildPow' ,\n )\n #lets create the EMS Output Variable\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:OUTPUTVARIABLE',\n Name=name,\n EMS_Variable_Name='TotBuildPow' ,\n Type_of_Data_in_Variable='Averaged',\n Update_Frequency = 'ZoneTimeStep'\n )\n #lets create the program\n listofPow = ['Pow'+str(i) for i in range(len(zonelist))]\n SumNumerator = ''\n for idx,Pow in enumerate(listofPow):\n SumNumerator = SumNumerator+Pow+'+'\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:PROGRAM',\n Name='TotZonePow',\n Program_Line_1='SET TotBuildPow = '+ SumNumerator[:-1],\n )\n #to uncomment if the EMS is not created before for the mean air tempeatrue\n # #lets create now the ouputs of this EMS\n # idf.newidfobject(\n # 'OUTPUT:ENERGYMANAGEMENTSYSTEM',\n # Actuator_Availability_Dictionary_Reporting='Verbose',\n # EMS_Runtime_Language_Debug_Output_Level='Verbose',\n # Internal_Variable_Availability_Dictionary_Reporting='Verbose',\n # )\n\n #lets create now the final outputs\n idf.newidfobject(\n 'OUTPUT:VARIABLE',\n Variable_Name=name,\n Reporting_Frequency=Freq,\n )\n\ndef setEMS4TotDHWPow(idf,building,zonelist,Freq,name):\n #lets create the temperature sensors for each zones and catch their volume\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:SENSOR',\n Name = 'DHWPow',\n OutputVariable_or_OutputMeter_Index_Key_Name = 'DHW',\n OutputVariable_or_OutputMeter_Name = 'Water Use Equipment Heating Rate'\n )\n\n #lets create the prgm collingManager\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:PROGRAMCALLINGMANAGER',\n Name='Compute Total DHW Heat Pow',\n EnergyPlus_Model_Calling_Point='EndOfZoneTimestepBeforeZoneReporting' ,\n Program_Name_1='prgmDHWPow'\n )\n #lets create the global Variable\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:GLOBALVARIABLE',\n Erl_Variable_1_Name='TotDHWPow' ,\n )\n #lets create the EMS Output Variable\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:OUTPUTVARIABLE',\n Name=name,\n EMS_Variable_Name='TotDHWPow' ,\n Type_of_Data_in_Variable='Averaged',\n Update_Frequency = 'ZoneTimeStep'\n )\n #lets create the program\n SumNumerator = 'DHWPow'\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:PROGRAM',\n Name='prgmDHWPow',\n Program_Line_1='SET TotDHWPow = '+ SumNumerator,\n )\n #to uncomment if the EMS is not created before for the mean air tempeatrue\n # #lets create now the ouputs of this EMS\n # idf.newidfobject(\n # 'OUTPUT:ENERGYMANAGEMENTSYSTEM',\n # Actuator_Availability_Dictionary_Reporting='Verbose',\n # EMS_Runtime_Language_Debug_Output_Level='Verbose',\n # Internal_Variable_Availability_Dictionary_Reporting='Verbose',\n # )\n\n #lets create now the final outputs\n idf.newidfobject(\n 'OUTPUT:VARIABLE',\n Variable_Name=name,\n Reporting_Frequency=Freq,\n )\n\ndef Read_OutputsEso(CaseName,ExtSurfNames, ZoneOutput):\n #visualization of the results\n eso = esoreader.read_from_path(CaseName)\n ZoneAgregRes = {}\n BuildAgregRes = {}\n #We agregate results per storey\n res ={}\n for idx in eso.dd.variables.keys():\n currentData = eso.dd.variables[idx]\n if 'Surface' in currentData[2] and currentData[1] not in ExtSurfNames:\n continue\n if currentData[1].find('STOREY')>0:\n try:\n nb = int(currentData[1][currentData[1].find('STOREY')+6:])\n except:\n test = 1\n finished = 0\n while finished == 0:\n try:\n nb = int(currentData[1][currentData[1].find('STOREY')+6:-test])\n finished = 1\n except:\n test += 1\n Firstkey = 'STOREY '+str(nb)\n else:\n Firstkey = currentData[1]\n if not res:\n res[Firstkey] = {}\n ZoneAgregRes[Firstkey] = {} #currentData[1]\n if not currentData[1] in res.keys():\n findsame = 0\n for key in res.keys():\n if currentData[1] in key or key in currentData[1]:\n Firstkey = key\n findsame = 1\n if not findsame:\n res[Firstkey] = {}\n ZoneAgregRes[Firstkey] = {}\n if not currentData[2] in res[Firstkey].keys():\n res[Firstkey][currentData[2]] = {}\n ZoneAgregRes[Firstkey][currentData[2]] = {}\n res[Firstkey][currentData[2]]['Data'] = []\n res[Firstkey][currentData[2]]['Data'].append(eso.data[idx])\n res[Firstkey][currentData[2]]['TimeStep'] = currentData[0]\n res[Firstkey][currentData[2]]['Unit'] = currentData[3]\n BuildAgregRes['HeatedArea']= {}\n BuildAgregRes['NonHeatedArea'] = {}\n BuildAgregRes['Other']= {}\n for nb, key in enumerate(res):\n KeyArea = 'Other'\n if 'STOREY' in key:\n numstor= int(key[6:])\n KeyArea= 'NonHeatedArea' if numstor<0 else 'HeatedArea'\n for j, i in enumerate(res[key]):\n ZoneAgregRes[key][i]['GlobData'] = []\n ZoneAgregRes[key][i]['TimeStep'] = res[key][i]['TimeStep']\n ZoneAgregRes[key][i]['Unit'] = res[key][i]['Unit']\n ZoneAgregRes[key][i]['NbNode'] = len(res[key][i]['Data'])\n #here I need to introduce some filtering in order to catch only outside facing surfaces (to compare core/perimeter thermal zoning woth other kind\n if res[key][i]['Unit'] in {'C','W/m2-K','W/m2'}: #then lets compute the mean, if not lets sum it\n for ii in zip(*res[key][i]['Data']):\n ZoneAgregRes[key][i]['GlobData'].append(sum(ii)/len(res[key][i]['Data']))\n else:\n for ii in zip(*res[key][i]['Data']):\n ZoneAgregRes[key][i]['GlobData'].append(sum(ii))\n #lets deal with data now at the building level\n if not i in BuildAgregRes[KeyArea].keys():\n BuildAgregRes[KeyArea][i] = {}\n BuildAgregRes[KeyArea][i]['GlobData'] = ZoneAgregRes[key][i]['GlobData']\n BuildAgregRes[KeyArea][i]['TimeStep'] = ZoneAgregRes[key][i]['TimeStep']\n BuildAgregRes[KeyArea][i]['Unit'] = ZoneAgregRes[key][i]['Unit']\n BuildAgregRes[KeyArea][i]['NbNode'] = ZoneAgregRes[key][i]['NbNode']\n else:\n if res[key][i]['Unit'] in {'C','W/m2-K','W/m2'}:\n BuildAgregRes[KeyArea][i]['GlobData'] = [sum(x)/2 for x in zip(BuildAgregRes[KeyArea][i]['GlobData'], ZoneAgregRes[key][i]['GlobData'])]\n else:\n BuildAgregRes[KeyArea][i]['GlobData'] = [sum(x) for x in zip(BuildAgregRes[KeyArea][i]['GlobData'], ZoneAgregRes[key][i]['GlobData'])]\n\n return ZoneAgregRes if ZoneOutput else BuildAgregRes\n\ndef Plot_Outputs(res,idf):\n # visualization of the results\n timestp = idf.idfobjects['TIMESTEP'][0].Number_of_Timesteps_per_Hour\n endtime = int(len(res['Environment']['Site Outdoor Air Drybulb Temperature']['GlobData']) / timestp)\n for nb,key in enumerate(res):\n plt.figure(nb)\n for j,i in enumerate(res[key]):\n plt.subplot(2,int((len(res[key])-1)/2+1),j+1)\n if not res[key][i]['TimeStep'] in 'TimeStep':\n timestp = 1\n plt.plot(np.linspace(0, endtime, endtime * timestp), res[key][i]['GlobData'])\n plt.title(i+'('+res[key][i]['Unit']+')')\n\n plt.show()\n\ndef Read_Outputhtml(CaseName):\n #compairons of surfaces\n fname = CaseName\n filehandle = open(fname, 'r',encoding='latin-1').read() # get a file handle to the html file\n htables = readhtml.titletable(filehandle)\n #this few lines below is just to grab the names of outdoor facing surfaces and windows\n for i in range(len(htables)):\n if htables[i][0] in 'Opaque Exterior':\n Opaque_exterior = htables[i][1][1:]\n elif htables[i][0] in 'Exterior Fenestration':\n Windows_exterior = htables[i][1][1:]\n EndUsesIdx = 3\n ExtSurf = [name[0] for name in Opaque_exterior if 'WALL' in name[1]]\n ExtWin = [name[0] for name in Windows_exterior]\n ExtNames = ExtSurf+ExtWin\n Res = {}\n\n for key in range(len(htables[EndUsesIdx][1][1:-2])):\n Res[htables[EndUsesIdx][1][key+1][0]] = {}\n for val in range(len(htables[EndUsesIdx][1][0][1:])):\n Res[htables[EndUsesIdx][1][key+1][0]][htables[EndUsesIdx][1][0][val+1]] = htables[EndUsesIdx][1][key+1][val+1]\n return {'GlobRes':Res, 'OutdoorSurfacesNames' : ExtNames}\n\ndef Read_OutputError(CaseName):\n fname = CaseName\n Endsinfo = open(fname, 'r', encoding='latin-1').read()\n Endsinfo\n\nif __name__ == '__main__' :\n print('Set_Outputs Main')","sub_path":"CoreFiles/Set_Outputs.py","file_name":"Set_Outputs.py","file_ext":"py","file_size_in_byte":14607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"591289103","text":"#!/usr/bin/env python3\n#-*- coding: utf-8 -*-\nM= [[0,0,0] for i in range(3)] \nM5=M\nM3=M[:]\nprint(M5)\nprint(M3[1][1])\nM[1][1]=5\nprint(M)\nimport copy\nM4=copy.deepcopy(M)\nM5=M\nM[1][1]=0\nprint(M4)\nprint(M5)","sub_path":"INF_1_BE_1/ListeCopie.py","file_name":"ListeCopie.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"545319484","text":"import django_filters\nfrom .models import Item, Category\nfrom django_filters import rest_framework as filters\n\n\nclass ItemFilter(django_filters.FilterSet):\n \n\n CHOICES = (\n ('ascending', 'Ascending'),\n ('descending', 'Decending')\n ) \n ordering = django_filters.ChoiceFilter(label='Ordering', choices=CHOICES, method='filter_by_order')\n \n\n class Meta:\n model = Item\n fields = {\n 'title':['icontains'],\n 'price': ['lt', 'gt'],\n \n }\n def filter_by_order(self,queryset,name,value):\n expression = 'pub_date' if value=='ascending' else '-pub_date'\n return queryset.order_by(expression)","sub_path":"core/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"300681770","text":"import socket\nimport _thread\nimport time\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\nclass Core(object):\n ipurl=0\n mode=1024\n menu1=False\n f=None\n network_speed=\"LAN\"\n menu2=False\n def GetData(self, url):\n self.url = url\n try:\n self.ipurl = socket.gethostbyname(self.url)\n except Exception as e:\n print (\"Invalid URL or IP\")\n exit(0)\n Core.ipurl=self.ipurl\n print (60*\"-\")\n print (22*\" \",bcolors.FAIL,\"Port Scanner v1\",bcolors.ENDC)\n print (60*\"-\")\n while Core.menu1 is not True:\n choice = input(\"\\n1 - simple \\n2 - extended\\n\")\n if choice == \"1\":\n Core.mode=1024\n menu=True\n break\n elif choice == \"2\":\n Core.mode=64000\n menu = True\n break\n else:\n print(\"Incorrect answer, choose 1 or 2\")\n while Core.menu2 is not True:\n choice = input(\"\\n1 - LAN \\n2 - Global Network\\n\")\n if choice == \"1\":\n Core.network_speed=0.05\n menu2=True\n break\n elif choice == \"2\":\n Core.network_speed=0.3\n menu2 = True\n break\n else:\n print(\"Incorrect answer, choose 1 or 2\")\n\n def Start_Scan(self, port_start, port_end):\n Core.f = open(Core.ipurl, \"a\")\n try:\n for x in range(port_start,port_end):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n res = sock.connect_ex((Core.ipurl,x))\n if res is 0:\n tmp=\"Port\",x,\"is open\", socket.getservbyport(x)\n tmp1=str(tmp[0])+\" \"+str(tmp[1])+\" \"+str(tmp[2])+\" \"+str(tmp[3])\n print(bcolors.OKGREEN,tmp1)\n Core.f.write(str(tmp)+\"\\n\")\n Core.f.close()\n except Exception as e:\n print (e)\ntry:\n scan = Core()\n scan.GetData(input(\"Type IP or address\\n\"))\n print(bcolors.WARNING,\"Range:\",Core.mode,\"\\n Target:\",Core.ipurl,\"\\n Scanning speed:\",Core.network_speed,bcolors.ENDC)\n print(bcolors.BOLD,\"Please wait...\",bcolors.ENDC)\n for count in range(0,Core.mode):\n #print (Core.mode)\n time.sleep(Core.network_speed)\n _thread.start_new_thread(scan.Start_Scan, (count,count+1))\n if count > Core.mode:\n exit(0)\nexcept Exception as e:\n print (e)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"120953099","text":"from keras.preprocessing.image import ImageDataGenerator\n\n\ndef train_on_gen(dirname, shape_target=(224,224)):\n train_datagen = ImageDataGenerator()\n train_generator = train_datagen.flow_from_directory(\n directory=dirname,\n target_size=shape_target,\n color_mode=\"rgb\",\n batch_size=32,\n class_mode=\"categorical\",\n shuffle=True,\n seed=42\n )\n return train_generator","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"275597014","text":"\"\"\"\nProjeto final de Eletrónica de Potência 2015/2016.\nCálculo do condensador.\n\"\"\"\n\n__author__ = \"paulogp\"\n__copyright__ = \"Copyright (C) 2015 Paulo G.P.\"\n__date__ = \"09/12/2015\"\n\n\ndef T(f):\n \"\"\"\n Período.\n :param f: float - frequência\n :return: float\n \"\"\"\n return 1 / f\n\n\ndef vo(_D, _vi):\n \"\"\"\n Tensão de saída.\n :param D: float - razão cíclica\n :param vi: float - tensão de entrada\n :return: float\n \"\"\"\n return _D * _vi\n\n\ndef C_out(_D, _T, _vo, _L, _Dvo):\n \"\"\"\n Condensador de saída.\n :param D: float - razão cíclica\n :param T: float - período\n :param vi: float - tensão de entrada\n :param L: float - indutância\n :param Dvo: float - ondulação da tensão de saída\n :return: float\n \"\"\"\n return ((1 - _D) * _T**2 * _vo) / (8 * _L * _Dvo)\n\nif __name__ == \"__main__\":\n # parametros\n D = 0.5 # 0.4\n vi = 11\n f = 40000 # 50000\n L = 680 * 10**-6\n Dvo = 0.1\n\n # output\n print(\"T: {:.3}s\".format(T(f)))\n print(\"vo: {:.3}V\".format(vo(D, vi)))\n print(\"Co: {:.3}F\".format(C_out(D, T(f), vo(D, vi), L, Dvo)))\n","sub_path":"eltrp/calculo_projeto.py","file_name":"calculo_projeto.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"84775439","text":"import logging\n\nlogging.basicConfig(level=logging.DEBUG)\nLOG = logging.getLogger(__name__)\n\n# create a file handler\nhandler = logging.FileHandler('cart_Log.log')\n\n# create a logging format\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\n\n# add the handlers to the logger\nLOG.addHandler(handler)\n","sub_path":"src/com/jalasoft/shopping_car/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"313363196","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport time\nfrom nxp_imu import IMU\nfrom slurm.rate import Rate\nfrom the_collector import BagIt, Pickle\nfrom colorama import Fore\nfrom threaded_camera import ThreadedCamera\n\nthumb = False\nbag = BagIt(Pickle)\nimu = IMU(gs=2, dps=2000, verbose=True)\nrate = Rate(20)\n\n# res = (3008,480)\n# res = (1024,720)\n# res = (640,480)\nres = (320,240)\ncamera = ThreadedCamera(res, fmt=\"gray\")\n\nstart = time.time()\nlast = start\ntry:\n while True:\n a, m, g = imu.get()\n ts = time.time()\n dt = ts - last\n hz = int(1/dt)\n last = ts\n print('{} Hz/{:.2f} s | {:>5.2f} {:>5.2f} {:>5.2f} | {:>6.1f} {:>6.1f} {:>6.1f} | {:>6.1f} {:>6.1f} {:>6.1f} |'.format(\n hz,dt,\n a[0], a[1], a[2],\n m[0], m[1], m[2],\n g[0], g[1], g[2]),\n end = \"\\r\"\n )\n bag.push(\"imu\", (a,g,m,time.time(),))\n\n frame = camera.read()\n\n if frame is None:\n print(f\"{Fore.RED}*** Camera Fail ***{Fore.RESET}\")\n else:\n bag.push(\"camera\", (frame,time.time(),))\n\n rate.sleep()\n\nexcept KeyboardInterrupt:\n camera.stop()\n camera.join()\n print(\">> bye ...\")\n bag.write(\"test\", timestamp=False)\n","sub_path":"python/dev/path-data/grab-data.py","file_name":"grab-data.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"109958896","text":"from datetime import datetime, date, time, timedelta\nfrom openerp import models, fields, api\nfrom openerp.tools.translate import _\nfrom openerp.osv import osv\n\n\nclass CustomSalesQuotation(models.Model):\n _inherit = 'sale.order'\n\n days_without_activity = fields.Integer(string=\"Notify after (days)\", default=5)\n group_to_be_notified = fields.Many2one('res.groups',string=\"Group to be notified\")\n inactivity_notification = fields.Boolean(string=\"Inactivity Notification\", default=True)\n notification_has_been_generated = fields.Boolean(string=\"Notification Generated\", default=False)\n\n @api.model\n def _get_see_all_leads_group(self):\n res = self.env['res.groups'].search([('name', '=', 'See all Leads')], limit=1)\n return res and res.id or False\n\n _defaults = {\n 'group_to_be_notified' : _get_see_all_leads_group,\n }\n\n\n @api.multi\n def check_for_inactive_quotations(self):\n\n #get the quotations in draft and have the inactivity notifier activated\n quotations = self.env['sale.order'].search([('state', '=', 'draft'),('inactivity_notification', '=', True),\n ('notification_has_been_generated', '=', False)])\n subtype = self.env['mail.message.subtype'].search([('id', '=', 1)])\n\n # For each quotation, check if the difference between the creation date\n # and update date is already equal or in excess of the\n # days without activity\n for quotation in quotations:\n write_date = datetime.strptime(quotation.write_date, '%Y-%m-%d %H:%M:%S')\n create_date = datetime.strptime(quotation.create_date, '%Y-%m-%d %H:%M:%S')\n days_inactivity = write_date - create_date\n\n pref_msg = \"Quotation number {} did not have any activity for the past {} days.\"\n pref_subject = \"{}: Quotation has no activity for {} days.\"\n pref_body = \"Get an update for Quotation {}\"\n if days_inactivity >= quotation.days_without_activity:\n # Generate notification\n quotation.notification_has_been_generated = True\n msg = pref_msg.format(quotation.name, str(days_inactivity))\n\n group = quotation.group_to_be_notified\n recipient_partners = list()\n for recipient in group.users:\n recipient_partners.append((4, recipient.partner_id.id))\n\n new_context = {'thread_model':'sale.order'}\n post_vars = {\n 'subject': pref_subject.format(quotation.name, quotation.days_without_activity),\n 'body': pref_body.format(quotation.name),\n 'partner_ids': recipient_partners,\n 'model' : 'sale.order',\n 'res_id' : quotation.id,\n }\n\n thread_pool = self.pool.get('mail.thread')\n thread_pool.message_post(\n self.env.cr,\n self.env.uid,\n quotation.id,\n type=\"notification\",\n subtype=\"mt_comment\",\n context=new_context,\n **post_vars\n )\n","sub_path":"quotation_notifier/notifier.py","file_name":"notifier.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"52657457","text":"from pandas._libs.tslibs.timestamps import Timestamp\nimport yfinance as yf\nimport pandas as pd\nimport argparse\nimport datetime\nimport logging\nimport pprint\nimport json\nimport sys\nimport os\n\n\n\ndef initArgparse() -> argparse.ArgumentParser:\n def make_wide(formatter, w=120, h=36):\n \"\"\"Return a wider HelpFormatter, if possible.\"\"\"\n try:\n # https://stackoverflow.com/a/5464440\n kwargs = {'width': w, 'max_help_position': h}\n formatter(None, **kwargs)\n return lambda prog: formatter(prog, **kwargs)\n except TypeError:\n return formatter\n\n parser = argparse.ArgumentParser(\n usage=\"%(prog)s [OPTIONS]...\",\n description=\"Fetches historical data for strategies' backtests\",\n formatter_class=make_wide(argparse.HelpFormatter, w=80, h=20)\n )\n\n parser.add_argument(\n \"-v\", \"--version\", action=\"version\",\n version=f\"{parser.prog} version 1.0.0\"\n )\n\n loglevels = [\"DEBUG\", \"INFO\", \"WARN\", \"ERROR\", \"FATAL\"]\n parser.add_argument(\n \"-l\", \"--loglevel\", metavar=\"LOGLEVEL\",\n default=\"INFO\", choices=loglevels,\n help=f\"Set LOGLEVEL{loglevels} [default='INFO']\"\n )\n\n def getDefultConfigFilename():\n return os.path.splitext(sys.argv[0])[0] + \".json\"\n\n defaultConfig = getDefultConfigFilename()\n parser.add_argument(\n \"-c\", \"--config\", default=defaultConfig,\n help=f\"Set configuration [default={defaultConfig}]\"\n )\n\n parser.add_argument(\n \"-f\", \"--force\", action='count', default=0,\n help=\"Overwrite existing data files [default=(skip download if the file exists)]\"\n )\n\n parser.add_argument(\n \"-d\", \"--dryrun\", action='store_true', default=False,\n help=\"Read config and validate target folders but don't download any data\"\n )\n\n return parser\n\nclass ConfigError(Exception):\n pass\n\ndef parseConfig(configJson):\n\n def collectSymbolsConfig(configJson) -> dict:\n\n def collectSymbolConfigParams(configSectionName, paramsJson) -> dict:\n params = {}\n currentDate = datetime.datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)\n if \"period\" in paramsJson:\n periodJson = paramsJson[\"period\"]\n\n if \"start\" in periodJson:\n start = datetime.datetime.strptime(periodJson[\"start\"], \"%Y-%m-%d\")\n else:\n raise ConfigError(f\"[{configSectionName}] Config doesn't have mandatory 'period.start' field\")\n if start >= currentDate:\n raise ConfigError(f\"[{configSectionName}] Config start[{start}] must be before current date[{currentDate}]\")\n\n if \"end\" in periodJson:\n end = datetime.datetime.strptime(periodJson[\"end\"], \"%Y-%m-%d\")\n if start < end:\n params[\"period\"] = {\"start\" : start, \"end\" : end}\n elif start == end:\n raise ConfigError(f\"[{configSectionName}] Config period start[{start}] == end[{end}]\")\n else:\n logging.warn(f\"[{configSectionName}] Config period start[{start}] > end[{end}]. Fixing it by swapping 2 values\")\n params[\"period\"] = {\"start\" : end, \"end\" : start}\n else:\n params[\"period\"] = {\"start\" : start, \"end\" : currentDate}\n\n params[\"symbols\"] = paramsJson.get(\"symbols\", [])\n\n if len(params[\"symbols\"]) and not \"period\" in params:\n raise ConfigError(f\"[{configSectionName}] Config has symbols but doesn't have period. Please specify period (or remove symbols)\")\n if not len(params[\"symbols\"]) and \"period\" in params:\n raise ConfigError(f\"[{configSectionName}] Config doesn't have symbols but has period. Please specify symbols (or remove period)\")\n\n return params\n\n def mergeSymbolsConfig(symbolsConfig, params) -> dict:\n newPeriod = params[\"period\"]\n for symbol in params[\"symbols\"]:\n if symbol in symbolsConfig:\n existingPeriod = symbolsConfig[symbol][\"period\"]\n symbolsConfig[symbol] = {\n \"period\": {\n \"start\" : min(existingPeriod[\"start\"], newPeriod[\"start\"]),\n \"end\" : max(existingPeriod[\"end\"], newPeriod[\"end\"])\n }\n }\n else:\n symbolsConfig[symbol] = {\"period\" : newPeriod}\n\n symbolsConfig = {}\n configSectionName = \"TopLevel\"\n globalParams = collectSymbolConfigParams(configSectionName, configJson)\n mergeSymbolsConfig(symbolsConfig, globalParams)\n if \"strategy\" in configJson:\n for stratJson in configJson[\"strategy\"]:\n configSectionName = stratJson.get(\"name\", \"UnknownStrat\")\n stratParams = collectSymbolConfigParams(configSectionName, stratJson)\n if len(stratParams[\"symbols\"]):\n mergeSymbolsConfig(symbolsConfig, stratParams)\n else:\n logging.warn(f\"[{configSectionName}] Config doesn't have symbols. This section will be skipped\")\n\n if not len(symbolsConfig):\n logging.warn(\"Config doesn't have symbols. Nothing to download\")\n\n return symbolsConfig\n\n def collectInterval(configJson) -> str:\n if \"interval\" in configJson:\n interval = configJson[\"interval\"]\n acceptedIntervals = [\"1h\",\"1d\"]\n if interval not in acceptedIntervals:\n raise ConfigError(\"[TopLevel] Config 'interval' field recognized values: {acceptedIntervals}, however '{interval}' found\")\n return interval\n else:\n raise ConfigError(\"[TopLevel] Config must contain 'interval' field, possible values : {acceptedIntervals}\")\n\n def collectFolder(configJson) -> str:\n if \"folder\" in configJson:\n return configJson[\"folder\"]\n else:\n raise ConfigError(\"[TopLevel] Config must contain 'folder' field\")\n\n symbolsFetchConfig = {\n \"symbols\" : collectSymbolsConfig(configJson),\n \"interval\" : collectInterval(configJson),\n \"folder\" : collectFolder(configJson)\n }\n\n return symbolsFetchConfig\n\n\ndef fetchData(args) -> None:\n logging.info(f\"Loading configuration from file : {args.config}\")\n with open(args.config) as configFile:\n baseDir = os.path.dirname(os.path.abspath(args.config))\n logging.debug(f\"Config folder : {baseDir}\")\n\n config = json.load(configFile)\n symbolsFetchConfig = parseConfig(config)\n logging.debug(f\"Use parsed config to fetch symbol data:\\n{pprint.pformat(symbolsFetchConfig)}\")\n\n def prepareFolder(baseDir, folder):\n if not os.path.isabs(folder):\n folder = os.path.normpath(os.path.join(baseDir, folder))\n if os.path.exists(folder):\n if not os.path.isdir(folder):\n raise Exception(f\"Path {folder} exists but it's not a folder. Please review your data folder strurcture\")\n else:\n logging.info(f\"Folder {folder} doesn't exist. Creating it...\")\n os.mkdir(folder)\n return folder\n \n symbolFolder = prepareFolder(baseDir, symbolsFetchConfig[\"folder\"])\n\n for symbol, symbolConfig in symbolsFetchConfig[\"symbols\"].items():\n symbolFilename = symbol.lower() + \".zip\"\n symbolPath = os.path.join(symbolFolder, symbolFilename)\n symbolStart = symbolConfig[\"period\"][\"start\"]\n symbolEnd = symbolConfig[\"period\"][\"end\"]\n interval = symbolsFetchConfig[\"interval\"]\n\n if os.path.exists(symbolPath):\n if not os.path.isfile(symbolPath):\n raise Exception(f\"Target symbol {symbol} path is not a regular file: {symbolPath}. Please review your data folder structure\")\n df = pd.read_csv(symbolPath, names=[\"Date\",\"Open\",\"High\",\"Low\",\"Close\",\"Volume\"])\n logging.debug(f\"First line :\\n{df.head(1)}\")\n logging.debug(f\"File dates : [{df.iloc[0]['Date']},{df.iloc[-1]['Date']}]\")\n fileStart = datetime.datetime.strptime(df.iloc[0]['Date'], \"%Y%m%d %H:%M\")\n fileEnd = datetime.datetime.strptime(df.iloc[-1]['Date'], \"%Y%m%d %H:%M\")\n\n isIncompleteDataFile = symbolStart < fileStart or fileEnd < symbolEnd\n if isIncompleteDataFile:\n shouldReplaceExistingFile = args.force > 0\n logMsg = (\n f\"Symbol {symbol} file exists : {symbolPath}. \"\n f\"However it doesn't contain required historical period. \"\n f\"File dates [{fileStart},{fileEnd}]. \"\n f\"Configured dates [{symbolStart},{symbolEnd}].\")\n else:\n shouldReplaceExistingFile = args.force > 1\n logMsg = (\n f\"Symbol {symbol} file exists : {symbolPath}. \"\n f\"It contains larger period of histrocal data. \"\n f\"File dates [{fileStart},{fileEnd}]. \"\n f\"Configured dates [{symbolStart},{symbolEnd}].\")\n\n if not shouldReplaceExistingFile:\n requiredForce = \"-f\" if isIncompleteDataFile else \"-ff\"\n logMsg += f\" Skipping symbol download (you can use {requiredForce} to enforce it).\"\n if isIncompleteDataFile:\n logging.info(logMsg)\n else:\n logging.debug(logMsg)\n continue\n else:\n shouldReplaceExistingFile = False\n\n def symbolEndAdjustment(interval):\n # This adjustment is required because:\n # 1) Yahoo Finance interpret \"end\" as open range, i.e. [start,end);\n # 2) Next day (or hour) may not be tradable so we need to more than 1 day or 1 hour\n if interval == \"1d\":\n return datetime.timedelta(days=7)\n elif interval == \"1h\":\n return datetime.timedelta(days=1, hours=1)\n else:\n raise Exception(f\"Uknonwn interval {interval}\")\n\n symbolEnd += symbolEndAdjustment(interval)\n\n logging.info(f\"Downloading {symbol} : {symbolStart} : {symbolEnd}\")\n logging.getLogger().handlers[0].flush()\n df = yf.download(\n symbol,\n start=symbolStart,\n end=symbolEnd,\n interval=interval,\n auto_adjust = True\n )\n print(\"Done\")\n sys.stdout.flush()\n\n def yahooFinanceDateToQuantConnect(yfDate : Timestamp) -> str:\n # 2021-06-30 -> 20210630 00:00\n return yfDate.strftime(\"%Y%m%d %H:%M\")\n def yahooFinanceNumToQuantConnect(yfNum : float) -> int:\n # 427.209991 -> 4272099\n return int(yfNum * 10000)\n logging.debug(f\"Got data\\n{df.head(2)}\\n...\")\n df.reset_index(level=0, inplace=True)\n\n # I use df.columns[0] instead of \"Date\" because yfinance doesn't always return the same name of the field for different intervals\n df[\"QCDate\"] = df[df.columns[0]].transform(yahooFinanceDateToQuantConnect)\n df[\"QCOpen\"] = df[\"Open\"].transform(yahooFinanceNumToQuantConnect)\n df[\"QCHigh\"] = df[\"High\"].transform(yahooFinanceNumToQuantConnect)\n df[\"QCLow\"] = df[\"Low\"].transform(yahooFinanceNumToQuantConnect)\n df[\"QCClose\"] = df[\"Close\"].transform(yahooFinanceNumToQuantConnect)\n\n logging.debug(f\"convert data format\\n{df[['QCDate', 'QCOpen', 'QCHigh', 'QCLow', 'QCClose', 'Volume']].head(1)}\")\n\n downloadSymbolPath = symbolPath + \".download\"\n csvFileName = symbol.lower() + \".csv\"\n df[['QCDate', 'QCOpen', 'QCHigh', 'QCLow', 'QCClose', 'Volume']].to_csv(\n downloadSymbolPath,\n index=False,\n header=False,\n compression={\n \"method\" : 'zip',\n \"archive_name\" : csvFileName\n }\n )\n if shouldReplaceExistingFile:\n os.replace(downloadSymbolPath, symbolPath)\n else:\n os.rename(downloadSymbolPath, symbolPath)\n logging.info(\"Data fetch is completed\")\n\n\ndef main() -> None:\n parser = initArgparse()\n args = parser.parse_args()\n\n logging.basicConfig(format=\"%(asctime)s %(levelname)-4s %(message)s\", level=logging.getLevelName(args.loglevel), datefmt=\"%Y-%m-%d %H:%M:%S\")\n\n fetchData(args)\n\nif __name__ == \"__main__\":\n main()\n\n\n\n","sub_path":"src/strategy/fetchData.py","file_name":"fetchData.py","file_ext":"py","file_size_in_byte":13001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"25422681","text":"# -*- coding: utf-8 -*-\n__author__ = 'jack'\n\nclass Solution:\n def preorder(self, root):\n res = []\n return self.helper(root, res)\n\n def helper(self, root, traverse):\n if root:\n traverse.append(root.val)\n if root.children:\n for child in root.children:\n self.helper(child, traverse)\n return traverse","sub_path":"Week_02/589.n_ary_tree_preorder_traversal.py","file_name":"589.n_ary_tree_preorder_traversal.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"378807182","text":"#\n# anova_feature_selection.py\n#\n# z katalogu --input-dir\n# trzy pliki: train_data.pkl, test_data.pkl, class_names.pkl\n#\n# Uczy się na danych z train_data.pkl wyliczając ANOVA i wybierając k-najlepszych cech\n# To same cechy wybiera z train_data.pkl\n# Wynik zapisywany jest do --output-dir\n\n# UWAGA zarówno output- jak i input-dir wpisujemy z '\\' (na windowsie) lub '/' (na Linuxie) na końcu ścieżki\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n# from sklearn.manifold import TSNE\n# from sklearn import decomposition\nfrom sklearn.feature_selection import SelectKBest, f_classif\n# from sklearn import datasets, metrics\n# from sklearn.manifold import TSNE\nfrom sklearn.metrics import classification_report\n\nimport time\nimport argparse\n\nfrom sklearn.model_selection import train_test_split\nimport pickle\n\n# from sklearn.naive_bayes import GaussianNB\n# from sklearn.neighbors import KNeighborsClassifier\n# from sklearn.svm import SVC\n\n\ndef read_data(input_dir):\n # wczytujemy dane treningowe:\n train_data_infile = open(input_dir + 'train_data.pkl', 'rb') # czytanie z pliku\n data_train_all_dict = pickle.load(train_data_infile)\n\n x_train = data_train_all_dict[\"data\"]\n y_train = data_train_all_dict[\"classes\"]\n\n # wczytujemy dane testowe:\n test_data_infile = open(input_dir + 'test_data.pkl', 'rb') # czytanie z pliku\n data_test_all_dict = pickle.load(test_data_infile)\n\n x_test = data_test_all_dict[\"data\"]\n\n y_test = data_test_all_dict[\"classes\"]\n\n # i nazwy klas\n cl_names_infile = open(input_dir + 'class_names.pkl', 'rb')\n classes_names = pickle.load(cl_names_infile)\n\n print(\"Data loaded from \" + input_dir)\n\n return x_train, y_train, x_test, y_test, classes_names\n\n\ndef save_data(x_train, y_train, x_test, y_test, classes_names, output_dir):\n # zapisujemy dane treningowe\n x_train_all_dict = {'data': x_train,\n 'classes': y_train}\n\n train_data_outfile = open(output_dir + 'train_data.pkl', 'wb')\n pickle.dump(x_train_all_dict, train_data_outfile)\n\n # zapisujemy dane testowe\n x_test_all_dict = {'data': x_test,\n 'classes': y_test}\n\n test_data_outfile = open(output_dir + 'test_data.pkl', 'wb')\n pickle.dump(x_test_all_dict, test_data_outfile)\n\n # zapisujemy nazwy klas\n cl_names_outfile = open(output_dir + 'class_names.pkl', 'wb')\n pickle.dump(classes_names, cl_names_outfile)\n\n print(\"Pickles saved in \", output_dir)\n\n\ndef ParseArguments():\n parser = argparse.ArgumentParser(description=\"Project\")\n parser.add_argument('--input-dir', default=\"\", required=True, help='data dir (default: %(default)s)')\n parser.add_argument('--output-dir', default=\"\", required=True, help='output dir (default: %(default)s)')\n parser.add_argument('--n', default=\"\", required=True, help='output dir (default: %(default)s)')\n args = parser.parse_args()\n\n return args.input_dir, args.output_dir, args.n\n\n\ninput_dir, output_dir, n_comp = ParseArguments()\n\nn_comp = int(n_comp)\n\n# wczytujemy dane\nx_train, y_train, x_test, y_test, classes_names = read_data(input_dir)\n\n###ANOVA\n\nprint(\"ANOVA reduction \", x_train.shape[1], \" -> \", n_comp, \" ...\", end =\" \")\n\nanova_filter = SelectKBest(f_classif, k=n_comp)\n\n## wwybranie odpowiednich cech na podstawie wyliczeń wykonanych na x_train\nstart_time = time.time()\nx_train_reduced = anova_filter.fit_transform(x_train, y_train)\nprint(\" took %s seconds \" % round((time.time() - start_time),5))\n\n# wybranie tych samych cech z x_test\n\nx_test_reduced = anova_filter.transform(x_test)\n\n\n# zapisujemy dane\n\nsave_data(x_train_reduced, y_train, x_test_reduced, y_test, classes_names, output_dir)\n","sub_path":"scripts/z6_anova_feature_selection.py","file_name":"z6_anova_feature_selection.py","file_ext":"py","file_size_in_byte":3715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"612780373","text":"import os\nimport sys\nimport shutil\nimport tempfile\nfrom os.path import dirname, isdir, realpath\n\n\ndef _convert_path(path):\n \"\"\"Given a Unix path, convert it for the current platform.\n \"\"\"\n return os.sep.join(path.split('/'))\n\n\ndef _convert_paths(paths):\n \"\"\"Given a tuple of Unix paths, convert them for the current platform.\n \"\"\"\n return tuple([_convert_path(p) for p in paths])\n\n\ndef _get_tempdir():\n \"\"\"Return a temporary directory we can use for our fixture.\n \"\"\"\n return os.path.realpath(os.path.join(tempfile.gettempdir(), 'fsfix'))\n\n\nclass Mk(object):\n\n def __init__(self, root=None):\n self.root = root if root is not None else _get_tempdir()\n self.cwd = None # set in __call__\n self.teardown() # start clean\n\n\n def __call__(self, *treedef):\n \"\"\"Given a treedef, build a filesystem fixture in self.root.\n\n treedef is a sequence of strings and tuples. If a string, it is interpreted\n as a path to a directory that should be created. If a tuple, the first\n element is a path to a file, the second is the contents of the file. We do\n it this way to ease cross-platform testing.\n\n \"\"\"\n self.cwd = os.getcwd()\n os.mkdir(self.root)\n os.chdir(self.root)\n for item in treedef:\n if isinstance(item, basestring):\n path = _convert_path(item.lstrip('/'))\n path = os.sep.join([self.root, path])\n os.makedirs(path)\n elif isinstance(item, tuple):\n filepath, contents = item\n path = _convert_path(filepath.lstrip('/'))\n path = os.sep.join([self.root, path])\n parent = dirname(path)\n if not isdir(parent):\n os.makedirs(parent)\n file(path, 'w').write(contents)\n\n\n def teardown(self):\n \"\"\"Roll back fixture.\n\n - reset the current working directory\n - remove self.root from the filesystem\n - remove self.root from sys.path\n\n \"\"\"\n if self.cwd is not None:\n os.chdir(self.cwd)\n self.remove()\n while self.root in sys.path:\n sys.path.pop(self.root)\n\n tear_down = tearDown = teardown\n\n\n def resolve(self, path=''):\n \"\"\"Given a relative path, return an absolute path under self.root.\n\n The incoming path is in UNIX form (/foo/bar.html). The outgoing path is in\n native form, with symlinks removed.\n\n \"\"\"\n path = os.sep.join([self.root] + path.split('/'))\n return realpath(path)\n\n\n def remove(self):\n \"\"\"Remove the filesystem fixture at self.root.\n \"\"\"\n if isdir(self.root):\n shutil.rmtree(self.root)\n","sub_path":"fsfix.py","file_name":"fsfix.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"101731172","text":"from nba_api.stats.endpoints import leaguegamefinder, playergamelog, playernextngames, commonplayerinfo, playercareerstats\nfrom nba_api.stats.static import players, teams\n\nnba_players = players.get_players()\nnba_teams = teams.get_teams()\n\n\ndef get_player_info(player_id):\n commoninfo = commonplayerinfo.CommonPlayerInfo(player_id=player_id)\n\n return {'commoninfo': commoninfo.get_dict()}\n\n\ndef get_player_stats(player_id, season_year, season_type):\n career = playercareerstats.PlayerCareerStats(\n player_id=player_id, per_mode36='Totals')\n\n season = career.get_dict()\n season_stats = season['resultSets'][0]['rowSet'][-1]\n\n ppg = season_stats[26] / season_stats[6]\n ast = season_stats[21] / season_stats[6]\n reb = season_stats[20] / season_stats[6]\n\n return {'careerstats': season, 'PTS': \"{:.1f}\".format(ppg), 'AST': \"{:.1f}\".format(ast), 'REB': \"{:.1f}\".format(reb)}\n\n\ndef get_ids(player_name, team_name):\n player_info = [\n player for player in nba_players if player['full_name'] == player_name][0]\n player_id = player_info['id']\n\n team_info = [\n team for team in nba_teams if team['abbreviation'] == team_name][0]\n team_id = team_info['id']\n\n return {'player_id': player_id, 'team_id': team_id}\n\n\ndef get_games(player_id, season_year, season_type):\n game = playergamelog.PlayerGameLog(\n player_id=player_id, season=season_year, season_type_all_star=season_type)\n\n next_game = playernextngames.PlayerNextNGames(\n number_of_games=\"1\", player_id=player_id, season_all=season_year, season_type_all_star=season_type)\n\n return {'game': game.get_dict(), 'nextgame': next_game.get_dict()}\n","sub_path":"info/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"628766206","text":"\n# coding: utf-8\n\n# In[2]:\n\n\nimport caffe\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os.path\nimport json\nimport scipy\nimport argparse\nimport math\nimport pylab\nfrom skimage import io\nfrom sklearn.preprocessing import normalize\nimport csv\nimport os\nfrom collections import Counter\n\n\n# In[ ]:\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--model', type=str, required=True, help=':the url of your model file')\nparser.add_argument('--weights', type=str, required=True, help=':the url of your weights file')\nparser.add_argument('--img_list_root', type=str, required=True, help=\":the original iamges' folder\")\nparser.add_argument('--output_csv', type=str, required=True, help=':where you want to output csv file')\nparser.add_argument('--output_image_root', type=str, required=True, help='where you want to output classified image files')\nargs = parser.parse_args()\nargs.img_list_root = args.img_list_root+'/'\nargs.output_image_root = args.output_image_root+'/'\n\n# model = '/home/bigdata/caffe-segnet/Segnet/Example_Models/segnet_model_driving_webdemo.prototxt'\n# weights = '/home/bigdata/caffe-segnet/Segnet/Models/caffemodel/segnet_weights_driving_webdemo.caffemodel'\n# img_list_root = '/home/bigdata/caffe-segnet/Segnet/testdata/'\n# output_csv_root = '/home/bigdata/temp.csv'\n# output_classified_image_root = '/home/bigdata/caffe-segnet/Segnet/temp/'\n\n\n# In[ ]:\n\n\n# initialize the net\nnet = caffe.Net(args.model, args.weights, caffe.TEST)\n\n\n# In[ ]:\n\n\n# fit the picture for caffe\ntransformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})\ntransformer.set_transpose('data', (2,0,1))\ntransformer.set_raw_scale('data', 255)\ntransformer.set_channel_swap('data', (2,1,0))\n\n\n# In[ ]:\n\n\n# define the colour of each class and the label_colours which index correspond with the class name\nSky = [128,128,128]\nBuilding = [128,0,0]\nPole = [192,192,128]\nRoad_marking = [255,69,0]\nRoad = [128,64,128]\nPavement = [60,40,222]\nTree = [128,128,0]\nSignSymbol = [192,128,128]\nFence = [64,64,128]\nCar = [64,0,128]\nPedestrian = [64,64,0]\nBicyclist = [0,128,192]\nRoad_Marking = [0,0,0]\nlabel_colours = np.array([Sky, Building, Pole, Road_Marking, Road, Pavement, Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist])\n\n\n# In[ ]:\n\n\n# defien a list to store all image root\nimage_name_list = os.listdir(args.img_list_root)\ncsv_content = []\nimage_root_list = []\nfor name in image_name_list:\n image_root_list.append(args.img_list_root+name)\n csv_content.append([name.split('_')[1], name.split('_')[2], name.split('_')[3]])\n\n\n# In[ ]:\n\n\n# pocess for each image\nfor index, each_image in enumerate(image_root_list):\n net = caffe.Net(args.model, args.weights, caffe.TEST)\n im = caffe.io.load_image(each_image)\n net.blobs['data'].data[...] = transformer.preprocess('data', im)\n print('image - '+repr(each_image)+' is processing....')\n out = net.forward()\n predicted = net.blobs['argmax'].data\n ind = np.squeeze(predicted[0,:,:,:])\n ind_temp = np.reshape(ind, -1)\n ind_dic = Counter(ind_temp.tolist())\n # ----------------------------------------------\n # the code blow is used for save classified image \n r = ind.copy()\n b = ind.copy()\n g = ind.copy()\n for l in range(0,11):\n r[ind == l] = label_colours[l, 0]\n g[ind == l] = label_colours[l, 1]\n b[ind == l] = label_colours[l, 2]\n rgb = np.zeros((ind.shape[0], ind.shape[1], 3))\n rgb[:,:,0] = r/255.0\n rgb[:,:,1] = g/255.0\n rgb[:,:,2] = b/255.0\n io.imsave(args.output_image_root+repr(index)+'.jpg',rgb)\n # ----------------------------------------------\n for each_class in range(12):\n if each_class in ind_dic.keys():\n csv_content[index].append(ind_dic[each_class])\n else:\n csv_content[index].append(0)\n print('image - ', index, ' process finished....')\n\n\n# In[ ]:\n\n\n# write the content into csv file\nwith open(args.output_csv, 'w') as f:\n writer = csv.writer(f)\n writer.writerows(csv_content)\n\n","sub_path":"batch_segmentation.py","file_name":"batch_segmentation.py","file_ext":"py","file_size_in_byte":3986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"4718494","text":"# -*- coding: utf-8 -*-\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\nimport plotly.graph_objs as go\nimport plotly.plotly as py\nfrom plotly.graph_objs import *\nimport pandas as pd\nimport plotly.figure_factory as ff\nfrom geopy.geocoders import Nominatim\nimport datetime\n\nlt=pd.read_csv('export.csv', sep=';', \n error_bad_lines=False,header=None)\nlt.head()\nlt.fillna(0)\nfor c in [1,2,3]:\n lt[c]=lt[c].str.strip().str.lower()\nlt[5]=lt[5].str.strip().replace('(', '').replace(')', '')\n\nlt[0]=pd.to_datetime(lt[0])\n#lt=lt[(lt[0].dt.year==2018)&(lt[0] =datetime.date(2018, slider[0] ,1)) & (lt[0]=datetime.date(2018, slider[0] ,1)) & (lt[0]=datetime.date(2018, slider[0] ,1)) & (lt[0]\", places[finalPath[i]], end = \"\")\n subdest.append(places[finalPath[i]])\n \n # TSP optimization using mip\n else:\n model = Model()\n x = [[model.add_var(var_type=BINARY) for j in V] for i in V]\n y = [model.add_var() for i in V]\n\n # objective function: minimize the distance\n model.objective = minimize(xsum(c[i][j]*x[i][j] for i in V for j in V))\n\n # constraint : leave each point only once\n for i in V:\n model += xsum(x[i][j] for j in V - {i}) == 1\n\n # constraint : enter each point only once\n for i in V:\n model += xsum(x[j][i] for j in V - {i}) == 1\n\n # subtour elimination\n for (i, j) in product(V - {n}, V - {n}):\n if i != j:\n model += y[i] - (n+1)*x[i][j] >= y[j]-n\n\n # optimizing\n status = model.optimize(max_seconds=30)\n print(status)\n\n print(\"=========================================================\")\n print(\"Tour\", str(tour+1), \":\")\n print(\"Subgraph Matrix:\")\n printMatrix(c, dest)\n print(\"\")\n\n # checking if a solution was found\n if model.num_solutions:\n print('route with total distance found: ', model.objective_value)\n print(startCity, end = \"\")\n nc = n\n subdest = []\n while True:\n nc = [i for i in V if x[nc][i].x >= 0.99][0]\n print(\" ->\", places[nc], end = \"\")\n subdest.append(places[nc])\n if nc == n:\n break \n else:\n print(model.objective_bound) \n\n print(\"\")\n print(\"\")\n\n # visualize the graph\n visualize(G, startCity, subdest, nodes, tour)\n\n plt.show()\n\n\ndef eucledian(point1, point2):\n return (math.sqrt((point1.x-point2.x)**2 + (point1.y-point2.y)**2))","sub_path":"src/mtsp.py","file_name":"mtsp.py","file_ext":"py","file_size_in_byte":6480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"499559151","text":"from util import get_num_lines, get_pos2idx_idx2pos, index_sequence, get_vocab, embed_indexed_sequence, \\\n get_word2idx_idx2word, get_embedding_matrix, write_predictions, get_performance_VUAverb_val\nfrom util import TextDatasetWithGloveElmoSuffix as TextDataset\nfrom util import evaluate\nfrom model import RNNSequenceModel\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\n\nimport csv\nimport h5py\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport random\n\nprint(\"PyTorch version:\")\nprint(torch.__version__)\nprint(\"GPU Detected:\")\nprint(torch.cuda.is_available())\nusing_GPU = True\n\n\"\"\"\n1. Data pre-processing\n\"\"\"\n\n'''\n1.2 TroFi\nget raw dataset as a list:\n Each element is a triple:\n a sentence: string\n a index: int: idx of the focus verb\n a label: int 1 or 0\n'''\nraw_trofi = []\n\nwith open('../data/TroFi/TroFi_formatted_all3737.csv') as f:\n lines = csv.reader(f)\n next(lines)\n for line in lines:\n sentence = line[1]\n label_seq = [0] * len(sentence.split())\n pos_seq = [0] * len(label_seq)\n verb_idx = int(line[2])\n verb_label = int(line[3])\n label_seq[verb_idx] = verb_label\n pos_seq[verb_idx] = 1 # idx2pos = {0: 'words that are not focus verbs', 1: 'focus verb'}\n raw_trofi.append([sentence.strip(), label_seq, pos_seq])\n\n\nprint('TroFi dataset division: ', len(raw_trofi))\n\n\n\n\"\"\"\n2. Data preparation\n\"\"\"\n'''\n2. 1\nget vocabulary and glove embeddings in raw dataset \n'''\n# vocab is a set of words\nvocab = get_vocab(raw_trofi)\n# two dictionaries. : 0, : 1\nword2idx, idx2word = get_word2idx_idx2word(vocab)\n# glove_embeddings a nn.Embeddings\nglove_embeddings = get_embedding_matrix(word2idx, idx2word, normalization=False)\n# elmo_embeddings\n# set elmos_trofi=None to exclude elmo vectors. Also need to change the embedding_dim in later model initialization\nelmos_trofi = h5py.File('../elmo/TroFi3737.hdf5', 'r')\n\n\n'''\n2. 2\nembed the datasets\n'''\nrandom.seed(0)\nrandom.shuffle(raw_trofi)\n\n# second argument is the post sequence, which we don't need\nembedded_trofi = [[embed_indexed_sequence(example[0], example[2], word2idx,\n glove_embeddings, elmos_trofi, None),\n example[2], example[1]]\n for example in raw_trofi]\n\n\n\n'''\n2. 3 10-fold cross validation\n'''\n# separate the embedded_sentences and labels into 2 list, in order to pass into the TextDataset as argument\nsentences = [example[0] for example in embedded_trofi]\nposs = [example[1] for example in embedded_trofi]\nlabels = [example[2] for example in embedded_trofi]\n# ten_folds is a list of 10 tuples, each tuple is (list_of_embedded_sentences, list_of_corresponding_labels)\nten_folds = []\nfold_size = int(3737 / 10)\nfor i in range(10):\n ten_folds.append((sentences[i * fold_size:(i + 1) * fold_size],\n poss[i * fold_size:(i + 1) * fold_size],\n labels[i * fold_size:(i + 1) * fold_size]))\n\nidx2pos = {0: 'words that are not focus verbs', 1: 'focus verb'}\n\noptimal_f1s = []\noptimal_ps = []\noptimal_rs = []\noptimal_accs = []\npredictions_all = []\nfor i in range(10):\n '''\n 2. 3\n set up Dataloader for batching\n '''\n training_sentences = []\n training_labels = []\n training_poss = []\n for j in range(10):\n if j != i:\n training_sentences.extend(ten_folds[j][0])\n training_poss.extend(ten_folds[j][1])\n training_labels.extend(ten_folds[j][2])\n training_dataset_trofi = TextDataset(training_sentences, training_poss, training_labels)\n val_dataset_trofi = TextDataset(ten_folds[i][0], ten_folds[i][1], ten_folds[i][2])\n\n # Data-related hyperparameters\n batch_size = 10\n # Set up a DataLoader for the training, validation, and test dataset\n train_dataloader_trofi = DataLoader(dataset=training_dataset_trofi, batch_size=batch_size, shuffle=True,\n collate_fn=TextDataset.collate_fn)\n val_dataloader_trofi = DataLoader(dataset=val_dataset_trofi, batch_size=batch_size, shuffle=False,\n collate_fn=TextDataset.collate_fn)\n \"\"\"\n 3. Model training\n \"\"\"\n '''\n 3. 1 \n set up model, loss criterion, optimizer\n '''\n # Instantiate the model\n # embedding_dim = glove + elmo + suffix indicator\n # dropout1: dropout on input to RNN\n # dropout2: dropout in RNN; would be used if num_layers=1\n # dropout3: dropout on hidden state of RNN to linear layer\n RNNseq_model = RNNSequenceModel(num_classes=2, embedding_dim=300+1024, hidden_size=300,\n num_layers=1, bidir=True,\n dropout1=0.5, dropout2=0, dropout3=0.2)\n # Move the model to the GPU if available\n if using_GPU:\n RNNseq_model = RNNseq_model.cuda()\n # Set up criterion for calculating loss\n loss_criterion = nn.NLLLoss()\n # Set up an optimizer for updating the parameters of the rnn_clf\n rnn_optimizer = optim.Adam(RNNseq_model.parameters(), lr=0.001)\n # Number of epochs (passes through the dataset) to train the model for.\n num_epochs = 10\n\n '''\n 3. 2\n train model\n '''\n train_loss = []\n val_loss = []\n performance_matrix = None\n val_f1 = []\n val_p = []\n val_r = []\n val_acc = []\n train_f1 = []\n # A counter for the number of gradient updates\n num_iter = 0\n model_index = 0\n comparable = []\n for epoch in range(num_epochs):\n print(\"Starting epoch {}\".format(epoch + 1))\n for (__, example_text, example_lengths, labels) in train_dataloader_trofi:\n example_text = Variable(example_text)\n example_lengths = Variable(example_lengths)\n labels = Variable(labels)\n if using_GPU:\n example_text = example_text.cuda()\n example_lengths = example_lengths.cuda()\n labels = labels.cuda()\n # predicted shape: (batch_size, seq_len, 2)\n predicted = RNNseq_model(example_text, example_lengths)\n batch_loss = loss_criterion(predicted.view(-1, 2), labels.view(-1))\n rnn_optimizer.zero_grad()\n batch_loss.backward()\n rnn_optimizer.step()\n num_iter += 1\n # Calculate validation and training set loss and accuracy every 200 gradient updates\n if num_iter % 200 == 0:\n avg_eval_loss, performance_matrix = evaluate(idx2pos, val_dataloader_trofi, RNNseq_model,\n loss_criterion, using_GPU)\n val_loss.append(avg_eval_loss)\n val_p.append(performance_matrix[1][0])\n val_r.append(performance_matrix[1][1])\n val_f1.append(performance_matrix[1][2])\n val_acc.append(performance_matrix[1][3])\n print(\"Iteration {}. Validation Loss {}.\".format(num_iter, avg_eval_loss))\n# avg_eval_loss, performance_matrix = evaluate(idx2pos, train_dataloader_trofi, RNNseq_model,\n# loss_criterion, using_GPU)\n# train_loss.append(avg_eval_loss)\n# train_f1.append(performance_matrix[1][1])\n# print(\"Iteration {}. Training Loss {}.\".format(num_iter, avg_eval_loss))\n print(\"Training done for fold {}\".format(i))\n\n \"\"\"\n 3.3\n plot the training process: MET F1 and losses for validation and training dataset\n \"\"\"\n# plt.figure(0)\n# plt.title('F1 for TroFI dataset on fold ' + str(i))\n# plt.xlabel('iteration (unit:200)')\n# plt.ylabel('F1')\n# plt.plot(val_f1, 'g')\n# # plt.plot(train_f1, 'b')\n# plt.legend(['Validation F1', 'Training F1'], loc='upper right')\n# plt.show()\n\n# plt.figure(1)\n# plt.title('Loss for TroFi dataset on fold ' + str(i))\n# plt.xlabel('iteration (unit:200)')\n# plt.ylabel('Loss')\n# plt.plot(val_loss, 'g')\n# # plt.plot(train_loss, 'b')\n# plt.legend(['Validation loss', 'Training loss'], loc='upper right')\n# plt.show()\n\n \"\"\"\n store the best f1\n \"\"\"\n print('val_f1: ', val_f1)\n idx = 0\n if math.isnan(max(val_f1)):\n optimal_f1s.append(max(val_f1[6:]))\n idx = val_f1.index(optimal_f1s[-1])\n optimal_ps.append(val_p[idx])\n optimal_rs.append(val_r[idx])\n optimal_accs.append(val_acc[idx])\n else:\n optimal_f1s.append(max(val_f1))\n idx = val_f1.index(optimal_f1s[-1])\n optimal_ps.append(val_p[idx])\n optimal_rs.append(val_r[idx])\n optimal_accs.append(val_acc[idx])\n\n\n\"\"\"\nprint out the performance\nplot the performance on each fold\n\"\"\"\nprint('F1 on TroFi by 10-fold = ', optimal_f1s)\nprint('Precision on TroFi = ', np.mean(np.array(optimal_ps)))\nprint('Recall on TroFi = ', np.mean(np.array(optimal_rs)))\nprint('F1 on TroFi = ', np.mean(np.array(optimal_f1s)))\nprint('Accuracy on TroFi = ', np.mean(np.array(optimal_accs)))\n# plt.figure(2)\n# plt.title('F1 for TroFi dataset on ten folds')\n# plt.xlabel('fold')\n# plt.ylabel('F1')\n# plt.plot(optimal_ps,'r')\n# plt.plot(optimal_rs,'b')\n# plt.plot(optimal_f1s,'g')\n# plt.plot(optimal_accs,'c')\n# plt.plot([np.mean(np.array(optimal_f1s))] * 10, 'y')\n# plt.legend(['precision for each fold', 'recall for each fold', 'F1 for each fold', 'accuracy for each fold', 'Average F1'], loc='upper right')\n# plt.show()\n","sub_path":"sequence/main_trofi.py","file_name":"main_trofi.py","file_ext":"py","file_size_in_byte":9537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"362943621","text":"#!/usr/bin/env python\n\nimport sys, os\nimport json, urllib\nfrom urllib.parse import quote_plus\nfrom urllib.request import urlopen\nimport asyncio\nimport requests\nfrom PIL import Image, ImageFont, ImageDraw\n\nrequest = 'ケーキ'\nurl = 'http://jisho.org/api/v1/search/words?keyword=' + quote_plus(request)\nwith urlopen(url) as f:\n fdata = json.loads(f.read().decode('utf-8'))\n\ndata = fdata['data']\n#print(data)\n\n\"\"\"print(data[0]['japanese'][0]['word'] + '(' + data[0]['japanese'][0]['reading'] + ')\\n')\nprint(data[0]['senses'][0]['parts_of_speech'][0])\nprint('1. ' + data[0]['senses'][0]['english_definitions'][0])\nprint('2. ' + data[0]['senses'][1]['english_definitions'][0])\nprint('3. ' + data[0]['senses'][2]['english_definitions'][0])\"\"\"\n\nimageDir = './gen/'\nblack = (33, 33, 33, 255)\ngrey = (117, 117, 117, 255)\nwhite = (255, 255, 255, 255)\nfontTitle = ImageFont.truetype(imageDir + 'NotoSansCJKjp-Medium.otf', 36, encoding='utf-8')\nfontMain = ImageFont.truetype(imageDir + 'Roboto-Medium.ttf', 14, encoding='utf-8')\nfontFuri = ImageFont.truetype(imageDir + 'NotoSansCJKjp-Medium.otf', 12, encoding='utf-8')\nfontTags = ImageFont.truetype(imageDir + 'NotoSansCJKjp-Regular.otf', 10, encoding='utf-8')\nfontSmall = ImageFont.truetype(imageDir + 'Roboto-Medium.ttf', 10, encoding='utf-8')\n\ntemplate = Image.open(imageDir + 'jisho.png')\nbase = template\n\nif (data[0]['is_common']):\n base = Image.alpha_composite(base, Image.open(imageDir + 'jisho_cw.png'))\n common = True\nelse:\n common = False\n\ntxt = Image.new('RGBA', base.size, (255,255,255,0))\nd = ImageDraw.Draw(txt)\n\nif (data[0]['tags'] != []):\n if (data[0]['tags'][0][:8] == 'wanikani'):\n tag = 'wanikani level ' + data[0]['tags'][0][8:]\n else:\n tag = data[0]['tags'][0]\n w, h = fontTags.getsize(tag)\n x = (100 - w) / 2\n if (common):\n base = Image.alpha_composite(base, Image.open(imageDir + 'jisho_b2.png'))\n d.text((275 + x, 52), tag, font=fontTags, fill=white)\n else:\n base = Image.alpha_composite(base, Image.open(imageDir + 'jisho_b1.png'))\n d.text((275 + x, 28), tag, font=fontTags, fill=white)\n\nif ('word' in data[0]['japanese'][0]):\n d.text((24, 22), data[0]['japanese'][0]['word'], font=fontTitle, fill=black)\n w, h = fontTitle.getsize(data[0]['japanese'][0]['word'])\n W, H = fontFuri.getsize(data[0]['japanese'][0]['reading'])\n x = (w - W) / 2\n d.text((24 + x, 16), data[0]['japanese'][0]['reading'], font=fontFuri, fill=black)\nelse:\n d.text((24, 22), data[0]['japanese'][0]['reading'], font=fontTitle, fill=black)\n\ni = 0\nwordType = ''\nfor x in data[0]['senses'][0]['parts_of_speech']:\n if (i > 0):\n wordType += ', '\n wordType += x\n i += 1\nd.text((24, 77), wordType, font=fontSmall, fill=grey)\n\nix = 1\ni = 0\ndesc = '\\n'\ndescN = '\\n'\nfor x in data[0]['senses']:\n if (ix > 6):\n break\n if (i > 0):\n desc += '\\n'\n descN += '\\n'\n\n if (x['parts_of_speech'] != [] and ix > 1):\n if (ix > 5):\n break\n ii = 0\n wordType = ''\n for y in x['parts_of_speech']:\n if (ii > 0):\n wordType += ', '\n wordType += y\n ii += 1\n d.text((24, 77 + (ix * 17)), wordType, font=fontSmall, fill=grey)\n desc += '\\n'\n descN += '\\n'\n ix += 1\n\n descN += str(i + 1) + '.'\n descL = ''\n ii = 0\n for y in x['english_definitions']:\n if (ii > 0):\n descL += '; '\n descL += y\n ii += 1\n\n if (len(descL) > 48):\n desc += descL[:48] + '...'\n else:\n desc += descL\n\n i += 1\n ix += 1\n\nd.text((24, 72), descN, font=fontMain, fill=grey)\nd.text((40, 72), desc, font=fontMain, fill=black)\n\n\nout = Image.alpha_composite(base, txt)\nout.save(imageDir + 'output/output.png', 'PNG')\n","sub_path":"jisho.py","file_name":"jisho.py","file_ext":"py","file_size_in_byte":3865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"631952177","text":"import calendar\nimport collections\nimport gzip\nimport hashlib\nimport logging\nimport resource\nimport time\nimport zlib\nfrom copy import copy\nfrom csv import DictWriter\nfrom datetime import date, datetime, timedelta\nfrom enum import Enum, IntEnum\nfrom inspect import isclass\nfrom io import BytesIO, StringIO\nfrom itertools import chain\nfrom threading import Lock\nfrom types import MappingProxyType\nfrom uuid import uuid4\n\nimport pytz\nimport simplejson as json\nfrom _csv import reader\nfrom redis_lock import Lock as RedisLock\nfrom six import add_metaclass\n\nfrom hsredshift.analytics.filters import (\n\tget_filter, is_filter_param, lookup_filter_for_bind_param\n)\nfrom hsredshift.analytics.queries import RedshiftCatalogue\nfrom hsredshift.analytics.scheduling import (\n\tQueryRefreshPriority, RedshiftQueryMetaLockTimeoutError\n)\nfrom hsredshift.utils.encoders import HSRedshiftJSONEncoder\nfrom hsredshift.utils.sql import (\n\tQueryStatementGenerator, get_engine_from_environ,\n\tis_in_flight, run_redshift_background_statement\n)\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass QueryDisplayVisual(IntEnum):\n\t\"\"\"\n\t\t# TABLE Result Sets Should be Structured As Follows:\n\t\ttable_result = {\n\t\t\t\"render_as\": \"list_table\",\n\t\t\t\"series\": {\n\t\t\t\t\t\"metadata\": {\n\t\t\t\t\t\t\"DRUID\": {},\n\t\t\t\t\t\t\"HUNTER\": {},\n\t\t\t\t\t\t\"MAGE\": {},\n\t\t\t\t\t\t\"PALADIN\": {},\n\t\t\t\t\t\t\"PRIEST\": {},\n\t\t\t\t\t\t\"ROGUE\": {},\n\t\t\t\t\t\t\"SHAMAN\": {},\n\t\t\t\t\t\t\"WARLOCK\": {},\n\t\t\t\t\t\t\"WARRIOR\": {},\n\t\t\t\t\t},\n\t\t\t\t\t\"data\": {\n\t\t\t\t\t\t\"DRUID\": [\n\t\t\t\t\t\t\t{},\n\t\t\t\t\t\t\t{},\n\t\t\t\t\t\t\t# ...\n\t\t\t\t\t\t],\n\t\t\t\t\t\t\"HUNTER\": [],\n\t\t\t\t\t\t\"MAGE\": [],\n\t\t\t\t\t\t\"PALADIN\": [],\n\t\t\t\t\t\t\"PRIEST\": [],\n\t\t\t\t\t\t\"ROGUE\": [],\n\t\t\t\t\t\t\"SHAMAN\": [],\n\t\t\t\t\t\t\"WARLOCK\": [],\n\t\t\t\t\t\t\"WARRIOR\": [],\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t}\n\n\t\t# CHART Result Sets Should Be Structured As Follows:\n\t\tchart_result = {\n\t\t\t\"render_as\": \"line_chart\",\n\t\t\t\"series\": [\n\t\t\t\t{\n\t\t\t\t\t\"name\": \"foo\",\n\t\t\t\t\t\"metadata\": {\n\t\t\t\t\t\t\"is_winrate_data\": True,\n\t\t\t\t\t\t\"num_data_points\": 100\n\t\t\t\t\t},\n\t\t\t\t\t\"data\": [\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"x\": None,\n\t\t\t\t\t\t\t\"y\": None\n\t\t\t\t\t\t},\n\t\t\t\t\t\t# ....\n\t\t\t\t\t]\n\t\t\t\t}\n\t\t\t]\n\t\t}\n\t\"\"\"\n\tCHART = 0\n\tTABLE = 1\n\n\nclass QueryRefreshInterval(Enum):\n\tNEVER = (0, -1)\n\tHOURLY = (1, 3600)\n\tEVERY_TWO_HOURS = (2, 7200)\n\tEVERY_THREE_HOURS = (3, 10800)\n\tEVERY_SIX_HOURS = (4, 21600)\n\tDAILY = (5, 86400)\n\n\tdef __init__(self, index, max_seconds):\n\t\tself.index = index\n\t\tself.max_seconds = max_seconds\n\n\nclass InvalidOrMissingQueryParameterError(Exception):\n\tdef __init__(self, message, query_name, parameter_name, value=None):\n\t\tsuper(InvalidOrMissingQueryParameterError, self).__init__(message)\n\t\tself.query_name = query_name\n\t\tself.parameter_name = parameter_name\n\t\tself.value = value\n\n\nclass RedshiftQueryMetaContextManager:\n\t\"\"\"A Reentrant Context Manager for synchronizing write access across Lambdas\"\"\"\n\tdef __init__(self, parameterized_query, global_context=False):\n\t\tself.global_context = global_context\n\t\tself.parameterized_query = parameterized_query\n\t\tself.cache = self.parameterized_query.catalogue.cache\n\t\tself.namespace = self.parameterized_query.catalogue.s3_unload_namespace\n\n\t\tif self.global_context:\n\t\t\tself.cache_key_meta = self.parameterized_query.unload_key\n\t\telse:\n\t\t\tself.cache_key_meta = self.parameterized_query.cache_key_meta\n\n\t\tself.lock_name = \"%s_%s\" % (self.namespace, self.cache_key_meta)\n\t\tself.serial_meta_access = self.parameterized_query.catalogue.serialize_meta_access\n\t\tself.cache_ttl_seconds = self.parameterized_query.query.cache_ttl_seconds\n\t\tself.meta = None\n\n\t\tif self.serial_meta_access:\n\t\t\t# This ensures no race conditions mutating the meta dict between Lambdas\n\t\t\tself.lock = RedisLock(\n\t\t\t\tself.cache,\n\t\t\t\tself.lock_name,\n\t\t\t\texpire=60,\n\t\t\t\tauto_renewal=True\n\t\t\t)\n\t\telse:\n\t\t\t# For testing this allows use of FakeRedis\n\t\t\tself.lock = Lock()\n\n\t\tself.depth = 0\n\t\tself.acquired = False\n\n\tdef __enter__(self):\n\t\t\"\"\"Blocks until we have exclusive write access to the query meta dict\n\n\t\tRaises RedshiftQueryMetaLockTimeoutError if the lock could not be\n\t\tacquired within the configured timeout. This might happen when an\n\t\tUNLOADed result set is taking a long time to be inserted into Redis.\n\t\t\"\"\"\n\n\t\tif not self.acquired:\n\t\t\tself.acquired = self.lock.acquire(\n\t\t\t\ttimeout=self.parameterized_query.query.meta_lock_wait_seconds\n\t\t\t)\n\t\t\tif not self.acquired:\n\t\t\t\traise RedshiftQueryMetaLockTimeoutError()\n\n\t\t\tif self.cache.exists(self.cache_key_meta):\n\t\t\t\tself.meta = json.loads(self.cache.get(self.cache_key_meta))\n\t\t\telse:\n\t\t\t\tself.meta = {}\n\n\t\tself.depth += 1\n\t\treturn self.meta\n\n\tdef __exit__(self, *exc):\n\t\tself.depth -= 1\n\t\tif self.depth == 0:\n\t\t\tmeta_val = json.dumps(\n\t\t\t\tself.meta,\n\t\t\t\tcls=HSRedshiftJSONEncoder,\n\t\t\t\tseparators=(\",\", \":\"),\n\t\t\t)\n\t\t\tself.cache.set(self.cache_key_meta, meta_val, ex=self.cache_ttl_seconds)\n\t\t\tself.lock.release()\n\t\t\tself.acquired = False\n\n\t\treturn False\n\n\nclass ParameterizedRedshiftQuery(object):\n\n\tdef __init__(self, query, supplied_parameters):\n\t\tself.query = query\n\t\tself.supplied_parameters = supplied_parameters\n\t\tself.verify_required_parameters(supplied_parameters)\n\n\t\tself.has_premium_values = False\n\n\t\tself.final_bind_params = {}\n\n\t\tfor param_name, bind in query.bind_params().items():\n\t\t\tfinal_val = None\n\n\t\t\t# Attempt to come up with a final_val for this param\n\t\t\t# First check whether it was provided directly, e.g. card_id\n\t\t\tif param_name in supplied_parameters:\n\t\t\t\tval_from_args = supplied_parameters[param_name]\n\t\t\t\tfinal_val = self._convert_val_to_bind_type(val_from_args, bind)\n\n\t\t\t# Second, check whether it was provided as a Filter Enum\n\t\t\tfilter = lookup_filter_for_bind_param(param_name)\n\t\t\tif final_val is None and filter:\n\t\t\t\tfilter_member = None\n\t\t\t\tfilter_name = filter.filter_name()\n\t\t\t\tif filter_name in supplied_parameters:\n\t\t\t\t\tsupplied_params_filter_val = supplied_parameters[filter_name]\n\t\t\t\t\tif supplied_params_filter_val in filter.__members__:\n\t\t\t\t\t\tfilter_member = filter[supplied_params_filter_val]\n\t\t\t\t\telse:\n\t\t\t\t\t\tmsg = \"Invalid member %s for filter %s for query: %s\" % (\n\t\t\t\t\t\t\tsupplied_params_filter_val, filter_name, self.query.name\n\t\t\t\t\t\t)\n\t\t\t\t\t\traise InvalidOrMissingQueryParameterError(msg, self.query.name, filter_name)\n\n\t\t\t\tif filter_member and not self.query.is_supported_filter_member(filter, filter_member):\n\t\t\t\t\tmsg = \"%s is not supported or enabled for query: %s\" % (\n\t\t\t\t\t\tfilter_member.name, self.query.name\n\t\t\t\t\t)\n\t\t\t\t\traise InvalidOrMissingQueryParameterError(msg, self.query.name, filter_name)\n\n\t\t\t\tif filter_member is None:\n\t\t\t\t\tfilter_member = self.query.get_default_value_for_filter(filter)\n\n\t\t\t\tif self.query.is_premium or self.query.filter_member_is_premium(filter, filter_member):\n\t\t\t\t\tself.has_premium_values = True\n\n\t\t\t\tfinal_val = filter.resolve_bind_param(param_name, filter_member)\n\n\t\t\t# If we don't have a value then check if there is a default\n\t\t\tif final_val is None and bind.value:\n\t\t\t\tfinal_val = bind.value\n\n\t\t\t# If there is no default then check whether it's required (raise exception if it is)\n\t\t\tif final_val is None and bind.required:\n\t\t\t\tmsg = \"Required argument %s has not been provided\" % param_name\n\t\t\t\traise InvalidOrMissingQueryParameterError(msg, self.query.name, param_name)\n\n\t\t\tif final_val is not None:\n\t\t\t\tself.final_bind_params[param_name] = final_val\n\n\tdef verify_required_parameters(self, supplied_parameters):\n\t\tfor param in self.query.required_parameters:\n\t\t\tif param not in supplied_parameters:\n\t\t\t\tmsg = \"Required param %s was not provided\" % param\n\t\t\t\traise InvalidOrMissingQueryParameterError(msg, self.query.name, param)\n\n\tdef _convert_val_to_bind_type(self, val, bind):\n\t\tconverter = bind.type.python_type\n\t\tif converter == date:\n\t\t\treturn datetime.strptime(val, \"%Y-%m-%d\").date()\n\t\telse:\n\t\t\treturn converter(val)\n\n\t@property\n\tdef catalogue(self):\n\t\treturn self.query.catalogue\n\n\t@property\n\tdef is_global(self):\n\t\treturn self.query.global_query\n\n\t@property\n\tdef is_personalized(self):\n\t\treturn self.query.is_personalized\n\n\t@property\n\tdef is_backfillable(self):\n\t\treturn self.query.is_backfillable\n\n\t@property\n\tdef fully_qualified_parameters(self):\n\t\tresult = {}\n\t\tfor available_param in self.query.available_parameters:\n\t\t\tif available_param in self.supplied_parameters:\n\t\t\t\tresult[available_param] = self.supplied_parameters[available_param]\n\t\t\telif is_filter_param(available_param):\n\t\t\t\tdefault_val = self.query.get_default_value_for_filter(\n\t\t\t\t\tget_filter(available_param)\n\t\t\t\t)\n\t\t\t\tresult[available_param] = str(default_val.name)\n\t\t\telse:\n\t\t\t\tresult[available_param] = \"NOT_PROVIDED\"\n\t\treturn result\n\n\t@property\n\tdef supplied_filters_dict(self):\n\t\t# This is mostly useful for capturing metrics related to what is being requested.\n\t\t# It's useful to be able to separate params that are filters and have a small bounded\n\t\t# range of values. And those that have an unbounded range for use with tools like\n\t\t# Influx that treat tags and fields differently.\n\t\tresults = {}\n\t\tfor k, v in self.fully_qualified_parameters.items():\n\t\t\tif is_filter_param(k):\n\t\t\t\tresults[k] = str(v)\n\n\t\treturn results\n\n\t@property\n\tdef supplied_non_filters_dict(self):\n\t\tresults = {}\n\t\tfor k, v in self.fully_qualified_parameters.items():\n\t\t\tif not is_filter_param(k):\n\t\t\t\tresults[k] = str(v)\n\n\t\treturn results\n\n\t@property\n\tdef executable_sql(self):\n\t\tengine = self.catalogue.engine\n\t\tcompiled_statement = self.query.stmt.params(\n\t\t\tself.final_bind_params\n\t\t).compile(bind=engine)\n\t\tq = QueryStatementGenerator(engine, compiled_statement)\n\t\treturn q.query\n\n\tdef generate_unload_key(self, prefix=\"\"):\n\t\tnamespace = self.catalogue.s3_unload_namespace\n\t\treturn \"%s/%s/%s/%s_\" % (namespace, self.query_name, self.unload_key, str(prefix))\n\n\tdef generate_unload_location(self, prefix=\"\"):\n\t\ts3_bucket = self.catalogue.s3_unload_bucket\n\t\treturn \"s3://%s/%s\" % (s3_bucket, self.generate_unload_key(prefix))\n\n\tdef executable_unload_statement(self, prefix=\"\"):\n\t\tengine = self.catalogue.engine\n\t\taws_access_key_id = self.catalogue.aws_access_key_id\n\t\taws_secret_access_key = self.catalogue.aws_secret_access_key\n\n\t\tcompiled_statement = self.query.stmt.params(\n\t\t\tself.final_bind_params\n\t\t).compile(bind=engine)\n\t\tq = QueryStatementGenerator(engine, compiled_statement)\n\t\tunload_location = self.generate_unload_location(prefix)\n\n\t\tunload_template = \"\"\"\n\t\t\tUNLOAD ('{select}') TO '{unload_location}'\n\t\t\tACCESS_KEY_ID '{aws_access_key_id}'\n\t\t\tSECRET_ACCESS_KEY '{aws_secret_access_key}'\n\t\t\tMANIFEST\n\t\t\tGZIP\n\t\t\tDELIMITER AS ','\n\t\t\tMAXFILESIZE AS 10 MB\n\t\t\tADDQUOTES\n\t\t\tNULL AS '_N_'\n\t\t\tPARALLEL OFF\n\t\t\tALLOWOVERWRITE\n\t\t\"\"\"\n\n\t\tstmt = unload_template.format(\n\t\t\tselect=q.query.replace(\"'\", r\"\\'\"),\n\t\t\tunload_location=unload_location,\n\t\t\taws_access_key_id=aws_access_key_id,\n\t\t\taws_secret_access_key=aws_secret_access_key\n\t\t)\n\n\t\treturn stmt\n\n\tdef __str__(self):\n\t\treturn self.cache_key\n\n\tdef __repr__(self):\n\t\treturn str(self)\n\n\tdef __eq__(self, other):\n\t\tif isinstance(other, ParameterizedRedshiftQuery):\n\t\t\treturn self.cache_key == other.cache_key\n\t\treturn NotImplemented\n\n\tdef schedule_refresh(self, priority=None):\n\t\trefresh_priority = priority or self.query.refresh_priority\n\t\tif refresh_priority == QueryRefreshPriority.IMMEDIATE:\n\t\t\tself.refresh_result(run_async=True)\n\t\telse:\n\t\t\tself.catalogue.scheduler._schedule_refresh(self, refresh_priority)\n\n\tdef preschedule_refresh(self):\n\t\tif self.will_be_stale_at:\n\t\t\tself.catalogue.scheduler._preschedule_refresh(self, self.will_be_stale_at)\n\n\t@property\n\tdef will_be_stale_at(self):\n\t\tif self.query.refresh_interval == QueryRefreshInterval.NEVER:\n\t\t\treturn None\n\t\telse:\n\t\t\ttd = timedelta(seconds=self.query.refresh_interval.max_seconds)\n\t\t\treturn self.result_as_of + td\n\n\t@property\n\tdef refresh_as_of_key(self):\n\t\tprefix = self.query.cache_key_prefix + \":refresh_as_of\"\n\t\toverride_vals = {}\n\t\tfor exclusion in self.query.exclude_from_global_permutations_key:\n\t\t\toverride_vals[exclusion] = \"*\"\n\n\t\treturn self._generate_cache_key([], override_vals=override_vals, prefix=prefix)\n\n\t@property\n\tdef unload_key(self):\n\t\tprefix = self.query.cache_key_prefix + \":unload=True\"\n\t\toverride_vals = {}\n\t\tfor exclusion in self.query.exclude_from_global_permutations_key:\n\t\t\toverride_vals[exclusion] = \"*\"\n\n\t\treturn self._generate_cache_key([], override_vals=override_vals, prefix=prefix)\n\n\t@property\n\tdef refresh_as_of(self):\n\t\tcache = self.query.catalogue.cache\n\t\tas_of_ts = cache.get(self.refresh_as_of_key)\n\t\tif as_of_ts:\n\t\t\treturn datetime.utcfromtimestamp(float(as_of_ts))\n\t\telse:\n\t\t\treturn None\n\n\t@property\n\tdef cache_key(self):\n\t\treturn self._generate_cache_key([])\n\n\tdef _generate_cache_key(self, exclusions, override_vals={}, prefix=None):\n\t\tif prefix:\n\t\t\telements = [prefix]\n\t\telse:\n\t\t\telements = [self.query.cache_key_prefix]\n\n\t\tfor available_param in self.query.available_parameters:\n\t\t\tif available_param in exclusions:\n\t\t\t\tcontinue\n\t\t\tif available_param in override_vals:\n\t\t\t\telement = \"%s=%s\" % (available_param, override_vals[available_param])\n\t\t\t\telements.append(element)\n\t\t\telif available_param in self.supplied_parameters:\n\t\t\t\telement = \"%s=%s\" % (available_param, self.supplied_parameters[available_param])\n\t\t\t\telements.append(element)\n\t\t\telif available_param in self.query.required_parameters:\n\t\t\t\traise ValueError(\"Required cache_key_element not found in supplied_parameters: %s\" % available_param)\n\t\t\telse:\n\t\t\t\tif is_filter_param(available_param):\n\t\t\t\t\tfilter = get_filter(available_param)\n\t\t\t\t\tdefault_member = self.query.get_default_value_for_filter(filter)\n\t\t\t\t\telement = \"%s=%s\" % (available_param, default_member.name)\n\t\t\t\telse:\n\t\t\t\t\tdefault_value = self.query.get_default_value(available_param)\n\t\t\t\t\telement = \"%s=%s\" % (available_param, default_value)\n\t\t\t\telements.append(element)\n\n\t\treturn \":\".join(elements)\n\n\t@property\n\tdef cache_key_meta(self):\n\t\treturn self.cache_key + \":meta\"\n\n\t@property\n\tdef query_name(self):\n\t\treturn self.query.name\n\n\t@property\n\tdef params_cache_key(self):\n\t\treturn self.cache_key + \":params\"\n\n\tdef evict_cache(self):\n\t\tcache = self.catalogue.cache\n\t\tcache.delete(self.cache_key)\n\t\tcache.delete(self.params_cache_key)\n\t\tcache.delete(self.refresh_as_of_key)\n\t\tcache.delete(self.cache_key_meta)\n\n\tdef evict_all_from_cache(self):\n\t\treturn self.query.evict_all_from_cache()\n\n\tdef mark_stale(self):\n\t\twith self.get_meta_context() as meta:\n\t\t\tmeta[\"is_stale\"] = True\n\n\tdef mark_all_stale(self):\n\t\treturn self.query.mark_all_stale()\n\n\t@property\n\tdef result_as_of(self):\n\t\tmeta = self.read_only_meta\n\t\tas_of_ts = meta.get(\"as_of\", None)\n\t\tif as_of_ts and not self.cached_records_are_invalid:\n\t\t\treturn datetime.utcfromtimestamp(float(as_of_ts))\n\t\telse:\n\t\t\treturn None\n\n\t@property\n\tdef read_only_meta(self):\n\t\tif self.catalogue.cache.exists(self.cache_key_meta):\n\t\t\treturn MappingProxyType(\n\t\t\t\tjson.loads(self.catalogue.cache.get(self.cache_key_meta))\n\t\t\t)\n\t\telse:\n\t\t\treturn MappingProxyType(dict())\n\n\t@property\n\tdef read_only_global_meta(self):\n\t\tif self.catalogue.cache.exists(self.unload_key):\n\t\t\treturn MappingProxyType(\n\t\t\t\tjson.loads(self.catalogue.cache.get(self.unload_key))\n\t\t\t)\n\t\telse:\n\t\t\treturn MappingProxyType(dict())\n\n\tdef get_meta_context(self):\n\t\tif not hasattr(self, \"_meta_context\"):\n\t\t\tself._meta_context = RedshiftQueryMetaContextManager(self)\n\t\treturn self._meta_context\n\n\tdef get_global_meta_context(self):\n\t\tif not hasattr(self, \"_global_meta_context\"):\n\t\t\tself._global_meta_context = RedshiftQueryMetaContextManager(\n\t\t\t\tself,\n\t\t\t\tglobal_context=True\n\t\t\t)\n\t\treturn self._global_meta_context\n\n\t@property\n\tdef cache_is_populated(self):\n\t\tif self.refresh_as_of and not self.cached_records_are_invalid:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\t@property\n\tdef cached_records_are_invalid(self):\n\t\t# This is true when the query SQL is changed.\n\t\t# Any result records in the cache are considered invalid\n\t\tmeta = self.read_only_meta\n\t\tif meta:\n\t\t\treturn meta.get(\"query_hash\", None) != self.query.query_hash\n\t\telse:\n\t\t\treturn False\n\n\t@property\n\tdef result_available(self):\n\t\tcache = self.query.catalogue.cache\n\t\tif self.cached_records_are_invalid:\n\t\t\t# If the query_hash has changed then the records are for an old version\n\t\t\t# of the query and are not usable\n\t\t\treturn False\n\n\t\treturn cache.exists(self.cache_key)\n\n\t@property\n\tdef result_is_stale(self):\n\t\tif self.cached_records_are_invalid:\n\t\t\treturn True\n\n\t\tmeta = self.read_only_meta\n\t\tif not meta or meta.get(\"is_stale\", False):\n\t\t\treturn True\n\n\t\tcurrent_dt = datetime.utcnow()\n\t\tcached_as_of_dt = self.result_as_of\n\t\tif not cached_as_of_dt:\n\t\t\treturn True\n\n\t\tseconds_stale = (current_dt - cached_as_of_dt).total_seconds()\n\t\tif self.query.refresh_interval == QueryRefreshInterval.NEVER:\n\t\t\treturn False\n\t\telif seconds_stale > self.query.refresh_interval.max_seconds:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\t@property\n\tdef response_payload_type(self):\n\t\tif self.query.as_csv:\n\t\t\treturn \"text/csv\"\n\t\telse:\n\t\t\treturn \"application/json\"\n\n\t@property\n\tdef response_payload_data(self):\n\t\treturn self._get_response_payload(as_json=True)\n\n\t@property\n\tdef response_payload(self):\n\t\treturn self._get_response_payload(as_json=False)\n\n\t@property\n\tdef response_payload_json(self):\n\t\treturn self._get_response_payload(as_json=True)\n\n\tdef _get_response_payload(self, as_json=False):\n\t\tcache = self.query.catalogue.cache\n\t\tval = cache.get(self.cache_key)\n\t\tif val is None:\n\t\t\tif self.query.raw_series_data:\n\t\t\t\treturn None\n\n\t\t\tres = dict(\n\t\t\t\trender_as=self.query.display_visual.name.lower(),\n\t\t\t\tseries=None,\n\t\t\t)\n\n\t\t\tif as_json:\n\t\t\t\treturn json.dumps(res)\n\t\t\telse:\n\t\t\t\treturn res\n\n\t\tval = zlib.decompress(val).decode(\"utf-8\")\n\n\t\tif as_json or self.query.as_csv:\n\t\t\treturn val\n\t\telse:\n\t\t\treturn json.loads(val)\n\n\tdef refresh_result(self, wlm_queue=None, executor_class=None, run_async: bool=False):\n\t\tif run_async:\n\t\t\treturn self._refresh_result_async(wlm_queue, executor_class)\n\t\telse:\n\t\t\treturn self._refresh_result_synchronous(wlm_queue, executor_class)\n\n\tdef _refresh_result_synchronous(self, wlm_queue=None, executor_class=None):\n\t\tif executor_class is None:\n\t\t\texecutor_class = RedshiftCursorQueryExecutor\n\n\t\texecutor = executor_class(self.query)\n\t\tengine = self.query.catalogue.engine\n\t\tcache = self.query.catalogue.cache\n\n\t\tas_of_ts = executor.execute(engine, self, cache, wlm_queue)\n\t\t# Record in the cache the last time the results where refreshed.\n\t\t# This is useful for global queries to be able to determine if a parameter permutation\n\t\t# That is not in cache is missing because the data needs a refresh\n\t\t# Or because that permutation is not valid.\n\t\tcache.set(\n\t\t\tself.refresh_as_of_key,\n\t\t\tas_of_ts,\n\t\t\tex=self.query.cache_ttl_seconds\n\t\t)\n\n\t\treturn as_of_ts\n\n\t@property\n\tdef in_flight(self):\n\t\tengine = self.catalogue.engine\n\t\twith self.get_global_meta_context() as meta:\n\t\t\tquery_handle = meta.get(\"query_handle\", None)\n\t\t\tlogger.info(\"Checking in flight status for: %s\" % query_handle)\n\t\t\twith engine.connect() as conn:\n\t\t\t\tresult = is_in_flight(conn, query_handle)\n\t\t\t\tlogger.info(\"is_in_flight=%s\" % str(result))\n\t\t\t\treturn result\n\n\tdef _update_in_flight_status(self, meta):\n\t\tif \"query_heartbeat\" not in meta or \"query_handle\" not in meta:\n\t\t\tmsg = \"query_heartbeat or query_handle missing from meta dict\"\n\t\t\traise RuntimeError(msg)\n\n\t\tquery_heartbeat = meta[\"query_heartbeat\"]\n\t\tquery_handle = meta[\"query_handle\"]\n\t\tnow_ts = calendar.timegm(datetime.utcnow().utctimetuple())\n\t\tengine = self.catalogue.engine\n\n\t\ttime_delta_seconds = now_ts - query_heartbeat\n\t\tif time_delta_seconds >= self.query.ASYNC_HEARTBEAT_INTERVAL:\n\t\t\twith engine.connect() as conn:\n\t\t\t\tif is_in_flight(conn, query_handle):\n\t\t\t\t\t# The query is still running\n\t\t\t\t\tmeta[\"query_heartbeat\"] = now_ts\n\t\t\t\telse:\n\t\t\t\t\t# The query either failed or just finished\n\t\t\t\t\t# Even if we clean up inflight here, the resolution lambda should\n\t\t\t\t\t# resolve without errors.\n\t\t\t\t\tself.cleanup_in_flight_data(meta)\n\t\t\t\t\t# Unload timeout alert\n\t\t\t\t\tif meta.get(\"unload_running\", False) and \"unload_start\" in meta:\n\t\t\t\t\t\tunload_start = meta[\"unload_start\"]\n\t\t\t\t\t\tunload_delta_seconds = now_ts - unload_start\n\t\t\t\t\t\tif unload_delta_seconds > 300: # Lambdas time out after 5 minutes\n\t\t\t\t\t\t\tmeta[\"unload_running\"] = False\n\t\t\t\t\t\t\tmsg = \"Unload timed out: '%s' took more than 300 seconds\"\n\t\t\t\t\t\t\traise RuntimeError(msg % self.unload_key)\n\n\t\t\t\t\t# Informational duration alert\n\t\t\t\t\tif \"most_recent_duration\" in meta:\n\t\t\t\t\t\tmost_recent_duration = meta[\"most_recent_duration\"]\n\t\t\t\t\t\tcurrent_duration = now_ts - meta[\"query_start\"]\n\t\t\t\t\t\tif current_duration > (most_recent_duration * 10):\n\t\t\t\t\t\t\tmsg = \"Query handle %s duration is 10x the previous run\"\n\t\t\t\t\t\t\tlogger.warning(msg % meta[\"query_handle\"])\n\n\tdef refresh_results_from_unload_manifest(self, manifest_key):\n\t\tlogger.info(\"About to aquire meta context\")\n\t\tacquire_start = time.time()\n\t\twith self.get_global_meta_context() as meta:\n\t\t\tacquire_end = time.time()\n\t\t\tlogger.info(\n\t\t\t\t\"Acquiring context took: %i seconds\" % int(acquire_end - acquire_start)\n\t\t\t)\n\t\t\tnow = calendar.timegm(datetime.utcnow().utctimetuple())\n\t\t\tif \"query_start\" in meta:\n\t\t\t\tstart_ts = meta[\"query_start\"]\n\t\t\t\tend_ts = now\n\t\t\t\tmeta[\"query_end\"] = end_ts\n\t\t\t\tduration = end_ts - start_ts\n\t\t\t\tmeta[\"most_recent_duration\"] = duration\n\t\t\t\tlogger.info(\"Query Execution Duration: %s seconds\" % duration)\n\t\t\telse:\n\t\t\t\tstart_ts = now\n\n\t\t\t# Register that we're working on unloading\n\t\t\tmeta[\"unload_start\"] = now\n\t\t\tmeta[\"unload_running\"] = True\n\n\t\t\tcursor = S3ManifestCursor(\n\t\t\t\tself,\n\t\t\t\tmanifest_key,\n\t\t\t\tstart_ts,\n\t\t\t\tfetch_size=self.query.fetch_size\n\t\t\t)\n\t\t\tcache = self.catalogue.cache\n\t\t\tlogger.info(\"About to begin updating cache\")\n\t\t\tcache_update_start = time.time()\n\n\t\t\ttry:\n\t\t\t\tself.query.update_cache(self, cursor, cache)\n\t\t\tfinally:\n\t\t\t\tmeta[\"unload_running\"] = False\n\t\t\t\tself.cleanup_in_flight_data(meta)\n\n\t\t\tcache_update_end = time.time()\n\t\t\tcache_update_duration = cache_update_end - cache_update_start\n\t\t\tlogger.info(\n\t\t\t\t\"Updating Cache Took: %s seconds\" % str(round(cache_update_duration, 2))\n\t\t\t)\n\n\t@property\n\tdef most_recent_duration(self):\n\t\tmeta = self.read_only_global_meta\n\t\tif \"most_recent_duration\" in meta:\n\t\t\treturn meta[\"most_recent_duration\"]\n\t\telse:\n\t\t\treturn None\n\n\t@property\n\tdef most_recent_query_handle(self):\n\t\tmeta = self.read_only_global_meta\n\t\tif \"query_handle\" in meta:\n\t\t\treturn meta[\"query_handle\"]\n\t\telse:\n\t\t\treturn None\n\n\tdef cleanup_in_flight_data(self, meta):\n\t\tmeta[\"in_flight\"] = False\n\n\tdef _refresh_result_async(self, wlm_queue=None, executor_class=None):\n\t\tif not self.catalogue.aws_configured:\n\t\t\tmsg = \"An AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, and S3_UNLOAD_BUCKET are required.\"\n\t\t\traise RuntimeError(msg)\n\t\tlogger.info(\"Async refreshing query: %s\" % self.unload_key)\n\n\t\tif executor_class is None:\n\t\t\texecutor_class = RedshiftAsyncQueryExecutor\n\n\t\texecutor = executor_class(self.query)\n\t\tengine = self.query.catalogue.engine\n\t\tcache = self.query.catalogue.cache\n\n\t\twith self.get_global_meta_context() as meta:\n\t\t\tif meta.get(\"in_flight\", False):\n\t\t\t\tlogger.info(\"Query is in_flight will update in_flight status\")\n\t\t\t\t# This query is currently running on the cluster\n\t\t\t\t# Update the in_flight statistics\n\t\t\t\tself._update_in_flight_status(meta)\n\t\t\telse:\n\t\t\t\tlogger.info(\"Query is not in_flight - will execute UNLOAD statement\")\n\n\t\t\t\t# Launch the query, which should return quickly.\n\t\t\t\tas_of_ts = executor.execute(engine, self, cache, meta)\n\n\t\t\t\t# Record in the cache the last time the results where refreshed.\n\t\t\t\t# This is useful for global queries to be able to determine if a\n\t\t\t\t# parameter permutation that is not in cache is missing because\n\t\t\t\t# the data needs a refresh or because that permutation is not valid.\n\t\t\t\tcache.set(\n\t\t\t\t\tself.refresh_as_of_key,\n\t\t\t\t\tas_of_ts,\n\t\t\t\t\tex=self.query.cache_ttl_seconds\n\t\t\t\t)\n\n\t\t\treturn meta[\"query_start\"]\n\n\tdef generate_params_with_vals(self, override_vals):\n\t\tcopied_supplied_params = copy(self.supplied_parameters)\n\t\tcopied_supplied_params.update(override_vals)\n\t\treturn ParameterizedRedshiftQuery(self.query, copied_supplied_params)\n\n\nclass RedshiftQueryMeta(type):\n\t\"\"\"A Metaclass to provide automatic registration of all queries in the hsredshift.analytics.library package\"\"\"\n\tdef __init__(self, name, bases, dict):\n\t\ttype.__init__(self, name, bases, dict)\n\t\t# Filter out the Query Base Class\n\t\tif name != \"BaseRedshiftQuery\":\n\t\t\tRedshiftCatalogue.register(self)\n\n\n@add_metaclass(RedshiftQueryMeta)\nclass BaseRedshiftQuery(object):\n\tname = None\n\tenabled = True\n\tis_premium = False\n\tis_personalized = False\n\tis_backfillable = False\n\tglobal_query = False\n\traw_series_data = False\n\tas_csv = False\n\tuses_archetypes = False\n\tfetch_size = 10000\n\t# 30 Day TTL (30 * 24 * 60 * 60)\n\tcache_ttl_seconds = 2592000\n\trefresh_interval = QueryRefreshInterval.EVERY_THREE_HOURS\n\trefresh_priority = QueryRefreshPriority.MEDIUM\n\tmeta_lock_wait_seconds = 10\n\tcache_warming_enabled = True\n\tstmt = None\n\tdisplay_visual = None\n\trequired_parameters = []\n\tavailable_parameters = []\n\texclude_from_global_permutations_key = []\n\t# MIN_PILOTS protects privacy and protects from huge result sets\n\tMIN_PILOTS = 10\n\t# MIN_ELIGIBLE_GAMES protects from having having too many results\n\tMIN_ELIGIBLE_GAMES = 400\n\t# MIN_STANDARD_GAMES & MIN_WILD_GAMES ensure we have enough data for statistical significance\n\t# in the context of whatever filters might be further restricting the eligible games\n\tMIN_STANDARD_GAMES = 400\n\tMIN_WILD_GAMES = 200\n\tASYNC_HEARTBEAT_INTERVAL = 60\n\tAPPLICATION_PREFIX = \"RedshiftQuery\"\n\n\tdef __init__(self, catalogue):\n\t\tself.catalogue = catalogue\n\n\tdef __str__(self):\n\t\treturn self.name\n\n\tdef build_full_params(self, partial_params):\n\t\ttransformed_params = self._transform_params(partial_params)\n\t\treturn ParameterizedRedshiftQuery(self, transformed_params)\n\n\tdef _transform_params(self, partial_params):\n\t\tresult = partial_params\n\t\tfor available_param in self.available_parameters:\n\t\t\tif is_filter_param(available_param):\n\t\t\t\tfilter = get_filter(available_param)\n\t\t\t\tresult = filter.transform_supplied_params(result)\n\t\treturn result\n\n\tdef bind_params(self):\n\t\tresult = {}\n\t\tfor param_name, bind in self.stmt.compile().binds.items():\n\t\t\tresult[param_name] = bind\n\t\treturn result\n\n\tdef get_default_value(self, available_param):\n\t\traise NotImplementedError(\"Must be implemented by queries exposing non-required, non-filter parameters\")\n\n\tdef get_default_value_for_filter(self, filter):\n\t\t# Queries whose default value deviates from the norm must sub-class this\n\t\treturn filter.get_default_member()\n\n\tdef get_supported_filter_members(self, filter):\n\t\t# Return the set that have is_enabled = True\n\t\t# Sub-classes can override this method on an individual basis to expose additional filters\n\t\treturn [member for member in filter.__members__.values() if member.is_enabled]\n\n\tdef is_supported_filter_member(self, filter, member):\n\t\treturn member in self.get_supported_filter_members(filter)\n\n\tdef filter_member_is_premium(self, filter, filter_member):\n\t\treturn filter_member.is_premium\n\n\t@property\n\tdef query_hash(self):\n\t\tif not hasattr(self, \"_query_hash\"):\n\t\t\tself._query_hash = hashlib.md5(str(self.stmt).encode(\"utf-8\")).hexdigest()\n\t\treturn self._query_hash\n\n\t@property\n\tdef cache_key_prefix(self):\n\t\t\"\"\"Useful for SCAN prefix* to discover what is cached for this query\"\"\"\n\t\treturn \"%s:%s\" % (self.APPLICATION_PREFIX, self.name)\n\n\tdef as_result_set(self):\n\t\treturn ResultSetRedshiftQueryExecutor(self)\n\n\tdef get_available_non_filter_parameters(self):\n\t\treturn [param for param in self.available_parameters if not is_filter_param(param)]\n\n\tdef evict_all_from_cache(self):\n\t\tcount = 0\n\t\tfor permutation in self.generate_cachable_parameter_permutations():\n\t\t\tparameterized_query = self.build_full_params(permutation)\n\t\t\tparameterized_query.evict_cache()\n\t\t\tcount += 1\n\t\treturn count\n\n\tdef mark_all_stale(self):\n\t\tcount = 0\n\t\tfor permutation in self.generate_cachable_parameter_permutations():\n\t\t\tparameterized_query = self.build_full_params(permutation)\n\t\t\tparameterized_query.mark_stale()\n\t\t\tcount += 1\n\t\treturn count\n\n\tdef generate_cachable_parameter_permutations(self):\n\t\tresult = []\n\n\t\tfor parameter_permutation in self.generate_supported_filter_permutations():\n\t\t\tfor non_filter_param in self.get_available_non_filter_parameters():\n\t\t\t\tparameter_permutation[non_filter_param] = \"*\"\n\t\t\tresult.append(parameter_permutation)\n\n\t\treturn result\n\n\tdef generate_personalized_parameter_permutation_bases(self):\n\t\tif not self.is_personalized:\n\t\t\traise RuntimeError(\"Can only generate cachable permutation bases for personalized queries.\")\n\t\treturn self.generate_supported_filter_permutations(exclude=[\"Region\"])\n\n\tdef generate_supported_filter_permutations(self, exclude=[]):\n\t\tfilters = []\n\t\tfor param in self.available_parameters:\n\t\t\tif is_filter_param(param) and param not in exclude:\n\t\t\t\tfilters.append(get_filter(param))\n\n\t\tpermutations = []\n\t\tself._generate_permutations(permutations, [], filters)\n\t\treturn permutations\n\n\tdef _generate_permutations(self, accumulator, members, filters):\n\t\tif len(filters):\n\t\t\tnext_filter = filters.pop()\n\t\t\tfor member in self.get_supported_filter_members(next_filter):\n\t\t\t\tmembers.append(member)\n\t\t\t\tself._generate_permutations(accumulator, members, filters)\n\t\t\t\tmembers.pop()\n\t\t\tfilters.append(next_filter)\n\t\telse:\n\t\t\tfinal_permutation = {}\n\t\t\tfor m in members:\n\t\t\t\tfinal_permutation[m.filter_name()] = m.name\n\t\t\taccumulator.append(final_permutation)\n\n\tdef execute(self, engine, params):\n\t\tresult = self.as_result_set().execute(engine, params, as_json=False)\n\t\tresponse_payload = self.to_response_payload(result, params)\n\t\treturn response_payload\n\n\tdef _prepare_param_overrides(self, non_filter_params, current_non_filter_param_vals):\n\t\tresult = {}\n\t\tfor param, val in zip(non_filter_params, current_non_filter_param_vals):\n\t\t\tfilter_enum = get_filter(param)\n\t\t\tif filter_enum:\n\t\t\t\tfilter_member = filter_enum.from_int(int(val))\n\t\t\t\tresult[param] = filter_member.name\n\t\t\telse:\n\t\t\t\tresult[param] = val\n\t\treturn result\n\n\tdef update_cache(self, params, cursor, cache, pipeline=True, pipeline_batch_size=10000):\n\t\t# cursor is an iterable of row records from the query statement.\n\t\t# cache is a python redis client.\n\t\t# The order by in the query will be preserved in the cursor.\n\t\tatomic_data_units = 0\n\t\tif not self.global_query:\n\t\t\t# This is a query like trending_decks, or cards_included\n\t\t\t# Or it is a query for personalized stats\n\t\t\t# The entire result set will get stored in a single value\n\t\t\trecord_set = []\n\t\t\tfor row in cursor:\n\t\t\t\tatomic_data_units += len(row)\n\t\t\t\trecord_set.append(row)\n\n\t\t\tself._send_record_set_to_cache(cache, cursor.as_of, params, record_set)\n\t\t\tmetric_fields = {\n\t\t\t\t\"units\": atomic_data_units,\n\t\t\t}\n\t\t\tmetric_fields.update(\n\t\t\t\tparams.supplied_non_filters_dict\n\t\t\t)\n\t\t\tself.catalogue.metrics.influx_metric(\n\t\t\t\t\"redshift_data_units\",\n\t\t\t\tmetric_fields,\n\t\t\t\tquery_name=params.query_name,\n\t\t\t\t**params.supplied_filters_dict\n\t\t\t)\n\t\telse:\n\t\t\tnon_filter_params = self.exclude_from_global_permutations_key\n\t\t\tcurrent_non_filter_param_vals = None\n\t\t\trecord_set = []\n\t\t\tcurrent_pipeline_batch = 0\n\t\t\tcache_or_pipeline = cache\n\t\t\tif pipeline:\n\t\t\t\tcache_or_pipeline = cache.pipeline(transaction=True)\n\n\t\t\tfor row in cursor:\n\t\t\t\tatomic_data_units += len(row)\n\t\t\t\trow_vals = []\n\t\t\t\tfor non_filter_param in non_filter_params:\n\t\t\t\t\trow_val = row.get(non_filter_param.lower(), None)\n\t\t\t\t\tif not row_val:\n\t\t\t\t\t\traise ValueError(\"a column name must match the non_filter_param\")\n\t\t\t\t\trow_vals.append(row_val)\n\n\t\t\t\tif not current_non_filter_param_vals:\n\t\t\t\t\tcurrent_non_filter_param_vals = row_vals\n\n\t\t\t\tif row_vals == current_non_filter_param_vals:\n\t\t\t\t\trecord_set.append(row)\n\t\t\t\telse:\n\t\t\t\t\tcurrent_val_params = params.generate_params_with_vals(\n\t\t\t\t\t\tdict(self._prepare_param_overrides(\n\t\t\t\t\t\t\tnon_filter_params,\n\t\t\t\t\t\t\tcurrent_non_filter_param_vals\n\t\t\t\t\t\t))\n\t\t\t\t\t)\n\t\t\t\t\tself._send_record_set_to_cache(\n\t\t\t\t\t\tcache_or_pipeline,\n\t\t\t\t\t\tcursor.as_of,\n\t\t\t\t\t\tcurrent_val_params,\n\t\t\t\t\t\trecord_set,\n\t\t\t\t\t)\n\t\t\t\t\tcurrent_pipeline_batch += 1\n\n\t\t\t\t\tmetric_fields = {\n\t\t\t\t\t\t\"units\": atomic_data_units,\n\t\t\t\t\t}\n\t\t\t\t\tmetric_fields.update(\n\t\t\t\t\t\tcurrent_val_params.supplied_non_filters_dict\n\t\t\t\t\t)\n\t\t\t\t\tself.catalogue.metrics.influx_metric(\n\t\t\t\t\t\t\"redshift_data_units\",\n\t\t\t\t\t\tmetric_fields,\n\t\t\t\t\t\tquery_name=current_val_params.query_name,\n\t\t\t\t\t\t**current_val_params.supplied_filters_dict\n\t\t\t\t\t)\n\t\t\t\t\tatomic_data_units = 0\n\n\t\t\t\t\t# Reset the record_set buffer for the next chunk of records\n\t\t\t\t\trecord_set = [row]\n\t\t\t\t\tcurrent_non_filter_param_vals = row_vals\n\n\t\t\t\tif pipeline:\n\t\t\t\t\tif current_pipeline_batch >= pipeline_batch_size:\n\t\t\t\t\t\t# The execute statement will reset the pipeline's state\n\t\t\t\t\t\tcache_or_pipeline.execute()\n\t\t\t\t\t\tcurrent_pipeline_batch = 0\n\n\t\t\tif len(record_set):\n\t\t\t\t# Flush the last result to cache\n\t\t\t\tcurrent_val_params = params.generate_params_with_vals(\n\t\t\t\t\tdict(self._prepare_param_overrides(\n\t\t\t\t\t\tnon_filter_params,\n\t\t\t\t\t\tcurrent_non_filter_param_vals\n\t\t\t\t\t))\n\t\t\t\t)\n\t\t\t\tself._send_record_set_to_cache(\n\t\t\t\t\tcache_or_pipeline,\n\t\t\t\t\tcursor.as_of,\n\t\t\t\t\tcurrent_val_params,\n\t\t\t\t\trecord_set,\n\t\t\t\t)\n\t\t\t\tmetric_fields = {\n\t\t\t\t\t\"units\": atomic_data_units,\n\t\t\t\t}\n\t\t\t\tmetric_fields.update(\n\t\t\t\t\tcurrent_val_params.supplied_non_filters_dict\n\t\t\t\t)\n\t\t\t\tself.catalogue.metrics.influx_metric(\n\t\t\t\t\t\"redshift_data_units\",\n\t\t\t\t\tmetric_fields,\n\t\t\t\t\tquery_name=current_val_params.query_name,\n\t\t\t\t\t**current_val_params.supplied_filters_dict\n\t\t\t\t)\n\n\t\t\tif pipeline:\n\t\t\t\t# Commit any outstanding pipeline work (noop if nothing was set())\n\t\t\t\t# This will also release the connection if bound\n\t\t\t\tcache_or_pipeline.execute()\n\n\tdef _send_record_set_to_cache(self, cache, as_of, params, record_set):\n\t\tas_of_datetime = None\n\t\tif as_of:\n\t\t\tas_of_datetime = datetime.utcfromtimestamp(as_of).replace(tzinfo=pytz.utc)\n\n\t\tchart_series = self.to_chart_series(params, record_set)\n\t\ttry:\n\t\t\tself.validate_chart_series(chart_series, params.supplied_filters_dict)\n\t\texcept UserWarning as w:\n\t\t\tself.catalogue.metrics.influx_metric(\n\t\t\t\t\"redshift_query_warnings\",\n\t\t\t\t{\n\t\t\t\t\t\"message\": str(w),\n\t\t\t\t},\n\t\t\t\tquery_name=params.query_name,\n\t\t\t\t**params.supplied_filters_dict\n\t\t\t)\n\n\t\tif self.raw_series_data:\n\t\t\tresult = chart_series\n\t\telse:\n\t\t\tresult = {\n\t\t\t\t\"render_as\": self.display_visual.name.lower(),\n\t\t\t\t\"series\": chart_series,\n\t\t\t\t\"as_of\": as_of_datetime,\n\t\t\t}\n\n\t\tserialized_result = self.serialize_data_for_cache(result)\n\n\t\tcompressed_val = zlib.compress(serialized_result.encode(\"utf-8\"))\n\n\t\twith params.get_meta_context() as meta:\n\t\t\tmeta[\"as_of\"] = as_of\n\t\t\tmeta[\"is_stale\"] = False\n\t\t\tmeta[\"query_hash\"] = self.query_hash\n\n\t\tcache.set(params.cache_key, compressed_val, ex=self.cache_ttl_seconds)\n\n\tdef serialize_data_for_cache(self, data):\n\t\tif self.as_csv:\n\t\t\tcolumn_names = self.csv_column_names()\n\t\t\toutput = StringIO()\n\t\t\tcsvwriter = DictWriter(output, fieldnames=column_names)\n\t\t\tcsvwriter.writeheader()\n\t\t\tcsvwriter.writerows(data)\n\t\t\treturn output.getvalue()\n\t\telse:\n\t\t\treturn json.dumps(\n\t\t\t\tdata,\n\t\t\t\tcls=HSRedshiftJSONEncoder,\n\t\t\t\tseparators=(\",\", \":\"),\n\t\t\t)\n\n\tdef csv_column_names(self):\n\t\treturn [c.name for c in self.stmt.columns]\n\n\tdef csv_column_converters(self):\n\t\tresult = collections.OrderedDict()\n\t\tfor c in self.stmt.columns:\n\t\t\tresult[c.name] = c.type.python_type\n\t\treturn result\n\n\tdef to_chart_series(self, params, result_set):\n\t\treturn result_set\n\n\tdef validate_chart_series(self, chart_series, params={}):\n\t\t# This can be optionally implemented by subclasses to validate the output of\n\t\t# to_chart_series. UserWarnings will be caught and sent to Influx.\n\t\tpass\n\n\tdef example_parameters(self):\n\t\t# This should be implemented by subclasses\n\t\t# In order to use the LocalQueryRunner testing tool\n\t\tpass\n\n\tdef to_response_payload(self, result_set, params):\n\t\tchart_series_data = self.to_chart_series(params, result_set)\n\n\t\tresult = {\n\t\t\t\"render_as\": self.display_visual.name.lower(),\n\t\t\t\"series\": chart_series_data\n\t\t}\n\n\t\treturn result\n\n\t@classmethod\n\tdef local_query(cls, locals):\n\t\tfor loc in locals.values():\n\t\t\tif isclass(loc) and issubclass(loc, cls) and loc != cls:\n\t\t\t\treturn loc()\n\n\tdef to_executable_sql(self, args, engine=None, echo=True):\n\t\tif not engine:\n\t\t\tengine = get_engine_from_environ(echo=echo)\n\n\t\tparams = self.build_full_params(args)\n\t\tcompiled_statement = self.stmt.params(params.final_bind_params).compile(bind=engine)\n\t\tq = QueryStatementGenerator(engine, compiled_statement)\n\t\treturn q.query\n\n\tdef execute_with_args(self, args, engine=None, echo=True, as_table_rows=False, limit=None, print_duration=True):\n\t\tif not engine:\n\t\t\tengine = get_engine_from_environ(echo=echo)\n\n\t\tparams = self.build_full_params(args)\n\t\tif as_table_rows:\n\t\t\tstart = time.time()\n\t\t\tresult = self.as_result_set().execute(engine, params, as_json=False)\n\t\t\tend = time.time()\n\t\t\tduration = round(end - start, 2)\n\t\t\tif print_duration:\n\t\t\t\tprint(\"Duration Seconds: %s\" % duration)\n\t\t\tcolumn_formatters = self._create_column_formatters(result)\n\t\t\tcolumn_names = [column_formatters[c.name].format(c.name) for c in self.stmt.columns]\n\n\t\t\toutput = BytesIO()\n\t\t\tcsvwriter = DictWriter(output, fieldnames=column_names)\n\t\t\tcsvwriter.writeheader()\n\t\t\tif limit is not None:\n\t\t\t\tcount = 0\n\t\t\t\tfor row in result:\n\t\t\t\t\tif count >= limit:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tformatter = lambda k, v: column_formatters[k].format(str(v))\n\t\t\t\t\tformatted_row = {formatter(k, k): formatter(k, v) for k, v in row.items()}\n\t\t\t\t\tcsvwriter.writerow(formatted_row)\n\t\t\t\t\tcount += 1\n\t\t\telse:\n\t\t\t\tfor row in result:\n\t\t\t\t\tformatter = lambda k, v: column_formatters[k].format(str(v))\n\t\t\t\t\tformatted_row = {formatter(k, k): formatter(k, v) for k, v in row.items()}\n\t\t\t\t\tcsvwriter.writerow(formatted_row)\n\t\t\treturn output.getvalue()\n\t\telse:\n\t\t\tstart = time.time()\n\t\t\tresult = self.execute(engine, params)\n\t\t\tend = time.time()\n\t\t\tduration = round(end - start, 2)\n\t\t\tif print_duration:\n\t\t\t\tprint(\"Duration Seconds: %s\" % duration)\n\t\t\treturn json.dumps(result)\n\n\tdef _create_column_formatters(self, results):\n\t\tcolumn_widths = collections.defaultdict(int)\n\t\tfor row in results:\n\t\t\tfor col, val in row.items():\n\t\t\t\tcolumn_widths[col] = max(column_widths[col], len(str(val)), len(str(col)))\n\n\t\treturn {k: \"{:>%is}\" % (v) for k, v in column_widths.items()}\n\n\nclass ResultSetRedshiftQueryExecutor:\n\tdef __init__(self, query):\n\t\tself._query = query\n\n\tdef execute(self, engine, params, wlm_queue=None, as_json=True):\n\t\tgroup = wlm_queue if wlm_queue else self._query.catalogue._wlm_queue\n\t\tconn = engine.connect()\n\t\tconn.execute(\"SET QUERY_GROUP TO '%s';\" % group)\n\t\tcompiled_statement = self._query.stmt.params(params.final_bind_params).compile(bind=conn)\n\n\t\tstart_time = time.time()\n\t\tresult = conn.execute(compiled_statement)\n\t\tend_time = time.time()\n\t\tduration = round(end_time - start_time, 2)\n\t\tlogger.debug(\"Query Runtime: %s\" % duration)\n\n\t\tif as_json:\n\t\t\treturn json.dumps(\n\t\t\t\t(dict(row.items()) for row in result),\n\t\t\t\tcls=HSRedshiftJSONEncoder,\n\t\t\t\tseparators=(\",\", \":\"),\n\t\t\t\titerable_as_array=True\n\t\t\t)\n\t\telse:\n\t\t\treturn [dict(row.items()) for row in result]\n\n\nclass PostgresQueryExecutor:\n\tdef __init__(self, query):\n\t\tself._query = query\n\t\tself.as_of = None\n\n\tdef execute(self, engine, params, cache, wlm_queue=None):\n\t\tconn = engine.connect()\n\t\tcompiled_statement = self._query.stmt.params(params.final_bind_params).compile(bind=conn)\n\t\tas_of = datetime.utcnow()\n\t\t# wrap the ResultProxy to return rows as dicts (like LazyRedshiftCursor)\n\t\tcursor = DictCursorProxy(conn.execute(compiled_statement))\n\t\tcursor.as_of = calendar.timegm(as_of.utctimetuple())\n\n\t\tself._query.update_cache(params, cursor, cache)\n\n\t\treturn cursor.as_of\n\n\nclass DictCursorProxy(object):\n\tdef __init__(self, cursor):\n\t\tself.cursor = cursor\n\t\tif not cursor.returns_rows:\n\t\t\traise ValueError(\"cursor must return rows\")\n\n\tdef __iter__(self):\n\t\treturn self\n\n\tdef __next__(self):\n\t\trow = self.cursor.fetchone()\n\t\tif row is None:\n\t\t\traise StopIteration()\n\t\treturn dict(row.items())\n\n\nclass RedshiftCursorQueryExecutor:\n\tdef __init__(self, query):\n\t\tself._query = query\n\t\tself.cursor_handle = \"%s_cursor_%s\" % (query.name, str(uuid4())[:7])\n\t\tself.as_of = None\n\n\tdef _make_final_stmt(self, engine, conn, params):\n\t\tcompiled_statement = self._query.stmt.params(params.final_bind_params).compile(bind=conn)\n\t\tq = QueryStatementGenerator(engine, compiled_statement)\n\t\treturn \"DECLARE %s CURSOR FOR %s\" % (self.cursor_handle, q.query)\n\n\tdef execute(self, engine, params, cache, wlm_queue=None):\n\t\tgroup = wlm_queue if wlm_queue else \"analytics\"\n\t\tconn = engine.connect()\n\t\tdeclare_statement = self._make_final_stmt(engine, conn, params)\n\n\t\tself.as_of = datetime.utcnow()\n\t\tconn.execute(\"SET QUERY_GROUP TO '%s';\" % group)\n\t\tconn.execute(\"BEGIN;\")\n\t\tconn.execute(str(declare_statement))\n\n\t\tcursor = LazyRedshiftCursor(self, conn, self._query.fetch_size)\n\t\tself._query.update_cache(params, cursor, cache)\n\n\t\tconn.execute(\"CLOSE %s;\" % self.cursor_handle)\n\t\tconn.execute(\"COMMIT;\")\n\t\tconn.close()\n\n\t\treturn cursor.as_of\n\n\nclass RedshiftAsyncQueryExecutor:\n\t\"\"\"Uses UNLOAD command to make query asynchronous.\"\"\"\n\n\tdef __init__(self, query):\n\t\tself._query = query\n\n\tdef _make_async_query_handle(self, params):\n\t\tnamespace = params.catalogue.s3_unload_namespace\n\t\treturn \"%s-unload-%s\" % (namespace, str(uuid4())[:7])\n\n\tdef execute(self, engine, params, cache, meta):\n\t\tas_of = calendar.timegm(datetime.utcnow().utctimetuple())\n\t\tunload_location = params.generate_unload_location(prefix=str(as_of))\n\t\tstmt = params.executable_unload_statement(prefix=str(as_of))\n\t\thandle = self._make_async_query_handle(params)\n\n\t\tlogger.info(\"Query handle for %s is %s\" % (params.unload_key, handle))\n\n\t\tredshift_pid = run_redshift_background_statement(stmt, handle, engine)\n\n\t\tmeta[\"in_flight\"] = True\n\t\tmeta[\"redshift_pid\"] = redshift_pid\n\t\tmeta[\"unload_location\"] = unload_location\n\t\tmeta[\"query_start\"] = as_of\n\t\tmeta[\"query_heartbeat\"] = as_of\n\t\tmeta[\"query_handle\"] = handle\n\n\t\treturn as_of\n\n\nclass LazyRedshiftCursor(object):\n\tdef __init__(self, executor, conn, fetch_size=1000):\n\t\tself._executor = executor\n\t\tself._cursor_handle = executor.cursor_handle\n\t\tself._conn = conn\n\t\tself._fetch_size = fetch_size\n\t\tself._buffer = []\n\n\t@property\n\tdef as_of(self):\n\t\treturn calendar.timegm(self._executor.as_of.utctimetuple())\n\n\tdef _fill_buffer(self):\n\t\tfetch_stmt = \"FETCH FORWARD %i FROM %s;\" % (self._fetch_size, self._cursor_handle)\n\t\tresult = self._conn.execute(fetch_stmt)\n\t\tself._buffer.extend([dict(row.items()) for row in result])\n\n\tdef __iter__(self):\n\t\treturn self\n\n\tdef __next__(self):\n\t\tif not self._buffer:\n\t\t\tself._fill_buffer()\n\n\t\tif self._buffer:\n\t\t\treturn self._buffer.pop(0)\n\t\telse:\n\t\t\traise StopIteration()\n\n\nclass LazyZippedStreamingBodyReader(object):\n\tdef __init__(self, streaming_body, name, is_gzipped=True):\n\t\tself.name = name\n\t\tself.is_gzipped = is_gzipped\n\t\tself.streaming_body = streaming_body\n\t\tif self.is_gzipped:\n\t\t\tself.data = self.streaming_gzip_decompress(streaming_body)\n\t\telse:\n\t\t\tself.data = self.read_in_chunks(streaming_body)\n\t\tself.started = False\n\t\tself._buffer = b\"\"\n\n\tdef read_in_chunks(self, streaming_body):\n\t\tfor chunk in iter(lambda: streaming_body.read(65536), b\"\"):\n\t\t\tyield chunk\n\n\tdef streaming_gzip_decompress(self, streaming_body):\n\t\t# offset 32 to skip the header\n\t\tdec = zlib.decompressobj(32 + zlib.MAX_WBITS)\n\t\tfor chunk in self.read_in_chunks(streaming_body):\n\t\t\trv = dec.decompress(chunk)\n\t\t\tif rv:\n\t\t\t\tyield rv\n\n\tdef __iter__(self):\n\t\treturn self\n\n\tdef __next__(self):\n\t\tif not self._buffer or self._buffer.find(b\"\\n\") == -1:\n\t\t\tif not self.started:\n\t\t\t\tlogger.info(\"Starting: %s\" % self.name)\n\t\t\t\tself.started = True\n\t\t\tself._buffer += next(self.data, b\"\")\n\n\t\tif self._buffer:\n\t\t\tline, partition, remainder = self._buffer.partition(b\"\\n\")\n\t\t\tself._buffer = remainder\n\t\t\treturn line.decode(\"utf8\")\n\t\telse:\n\t\t\traise StopIteration()\n\n\nclass ZippedStreamingBodyReader(object):\n\tdef __init__(self, streaming_body, name):\n\t\tself.name = name\n\t\tself.streaming_body = streaming_body\n\t\tself._lines = collections.deque([])\n\n\tdef __iter__(self):\n\t\tlogger.info(\"Starting: %s\" % self.name)\n\t\tdata = gzip.decompress(self.streaming_body.read())\n\t\ttext = data.decode(\"utf8\")\n\t\tself._lines.extend(text.split(\"\\n\"))\n\t\treturn self\n\n\tdef __next__(self):\n\t\trow = self._lines.popleft()\n\t\tif row:\n\t\t\treturn row\n\t\telse:\n\t\t\traise StopIteration()\n\n\nclass S3ManifestCursor(object):\n\tdef __init__(self, params, manifest_key, as_of, fetch_size=10000):\n\t\tself.params = params\n\t\tself.s3 = self.params.catalogue.s3\n\t\tself.as_of = as_of\n\t\tself.bucket = params.catalogue.s3_unload_bucket\n\t\tself.manifest_key = manifest_key\n\t\tmanifest_object = self.s3.get_object(\n\t\t\tBucket=self.bucket,\n\t\t\tKey=manifest_key\n\t\t)\n\t\tself.manifest = json.loads(manifest_object[\"Body\"].read().decode(\"utf8\"))\n\t\tself.readers = []\n\n\t\tcolumn_converters = params.query.csv_column_converters()\n\t\tself.converters = list(column_converters.values())\n\t\tself.column_keys = list(column_converters.keys())\n\t\tfor k, v in enumerate(self.converters):\n\t\t\tif issubclass(v, datetime):\n\t\t\t\tself.converters[k] = lambda v: \\\n\t\t\t\t\tdatetime.strptime(v, \"%Y-%m-%d %H:%M:%S\").replace(tzinfo=pytz.UTC)\n\t\t\telif issubclass(v, date):\n\t\t\t\tself.converters[k] = lambda v: datetime.strptime(v, \"%Y-%m-%d\").date()\n\n\t\tself.start_ts = int(time.time())\n\n\t\t# We are using the following logic so that we can stream through\n\t\t# The results on S3 using a fixed amount of memory.\n\t\tfor entry in self.manifest[\"entries\"]:\n\t\t\ttokens = entry[\"url\"].split(\"/\")\n\t\t\tkey = \"/\".join(tokens[-4:])\n\t\t\tentry_object = self.s3.get_object(\n\t\t\t\tBucket=self.bucket,\n\t\t\t\tKey=key\n\t\t\t)\n\t\t\tr = ZippedStreamingBodyReader(entry_object[\"Body\"], key)\n\t\t\tself.readers.append(r)\n\n\t\tself.dict_reader = reader(chain.from_iterable(self.readers))\n\n\t\tself._fetch_size = fetch_size\n\t\tself._buffer = collections.deque([])\n\t\tself._buffer_fill_count = 0\n\n\tdef _fill_buffer(self):\n\t\tself._buffer_fill_count += 1\n\t\tlogger.info(\"Buffer Fill %i\" % self._buffer_fill_count)\n\t\tlogger.info(\"Elapsed Seconds: %i\" % int(time.time() - self.start_ts))\n\t\tmem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\n\t\tlogger.info(\"Memory Usage: %s MB\" % str(round(mem / 1024, 2)))\n\n\t\treader = self.dict_reader\n\t\tnext_line = next(reader, None)\n\t\tcurrent_buffer_size = 0\n\t\ttarget_size = self._fetch_size - 1\n\t\twhile next_line and current_buffer_size < target_size:\n\t\t\tself._buffer.append(next_line)\n\t\t\tcurrent_buffer_size += 1\n\t\t\tnext_line = next(reader, None)\n\n\t\tif next_line:\n\t\t\tself._buffer.append(next_line)\n\n\tdef __iter__(self):\n\t\treturn self\n\n\tdef _restore_types(self, next_result):\n\t\tresult = {}\n\t\tfor i, v in enumerate(next_result):\n\t\t\tk = self.column_keys[i]\n\t\t\tif v == \"_N_\":\n\t\t\t\tresult[k] = None\n\t\t\telse:\n\t\t\t\tresult[k] = self.converters[i](v)\n\n\t\treturn result\n\n\tdef __next__(self):\n\t\tif not self._buffer:\n\t\t\tself._fill_buffer()\n\n\t\tif self._buffer:\n\t\t\tnext_result = self._buffer.popleft()\n\t\t\t# Restore types so output is identical to sqlalchemy cursor\n\t\t\treturn self._restore_types(next_result)\n\t\telse:\n\t\t\traise StopIteration()\n","sub_path":"hsredshift/analytics/library/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":46571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"93270055","text":"class DistinctError(ValueError):\n \"\"\"如果向distinctdict添加重复值,则引发错误。\"\"\"\n\n\nclass distinctdict(dict):\n \"\"\"不接受重复值的字典\"\"\"\n\n def __setitem__(self, key, value):\n if value in self.values():\n if (\n (key in self and self[key] != value) or\n key not in self\n ):\n raise DistinctError(\"This value already exits for different key\")\n super().__setitem__(key, value)\n\n\nclass Folder(list):\n def __init__(self, name):\n self.name = name\n\n def dir(self, nesting=0):\n offset = \" \" * nesting\n print('%s%s/' % (offset, self.name))\n\n for element in self:\n if hasattr(element, 'dir'):\n element.dir(nesting + 1)\n else:\n print(\"%s %s\" % (offset, element))\n\n\n# 访问超类中的方法\nclass Mama:\n def says(self):\n print('do you homework')\n\n\nclass Sister(Mama):\n def says(self):\n Mama.says(self) # super().says()\n print('and clean you bedroom')\n","sub_path":"01.grammer/use_class.py","file_name":"use_class.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"405201992","text":"from os import system\n\ngit = 'git init'\npoetry = 'poetry install'\n\n\ndef try_except(command, message):\n try:\n print(f\"+ {command}\")\n system(command)\n except Exception as e:\n print(f\"{message}: {e}\")\n\n\ntry_except(git, 'git exception')\ntry_except(poetry, 'poetry exception')\n","sub_path":"hooks/post_gen_project.py","file_name":"post_gen_project.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"502049364","text":"# Measuring leaf growth on newborn oak trees\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n# Fonts:\ncsfont = {'fontname':'Charter', 'fontweight':'regular'}\nhfont = {'fontname':'Charter', 'fontweight':'bold'}\nifont = {'fontname':'Charter', 'fontweight':'regular', 'style':'italic'}\n\n# Colours:\nplant1Colour = '#18990c'\nplant2Colour = '#62b539'\nplant3Colour = '#93c75e'\n\nplant4Colour = '#67aeff'\nplant5Colour = '#3192ff'\nplant6Colour = '#0078ff'\n\ntextColour = '#818a8b'\n\ninterpColour = '#db2727'\n\nfont_axes = 12\nfont_labels = 22\nfont_title = 18\nfont_text = 14\n\n# Time:\nn_days = 21\ntime = range(1, n_days)\n\n# Leaf length in centimeters:\nl1p1e = np.array([4.4, 5.8, 6.8, 7.9, 8.7, 9.4, 9.6, 9.6, 9.6, 9.6, 9.6, 9.6, 9.6, 9.6, 9.6, 9.6, 9.6, 9.6, 9.6, 9.6])\nl1p2e = np.array([1.4, 2.1, 2.6, 3.6, 4.7, 6.0, 7.0, 8.1, 9.1, 9.8, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10])\nl2p2e = np.array([2.5, 3.0, 3.4, 3.8, 4.0, 4.1, 4.1, 4.1, 4.1, 4.1, 4.1, 4.1, 4.1, 4.1, 4.1, 4.1, 4.1, 4.1, 4.1, 4.1])\nl1p3e = np.array([3.8, 5.6, 6.7, 7.9, 9.1, 10.0, 10.3, 10.4, 10.4, 10.4, 10.4, 10.4, 10.4, 10.4, 10.4, 10.4, 10.4, 10.4, 10.4, 10.4])\n\nl1p1w = np.array([3.6, 4.2, 4.6, 5.2, 5.7, 6.0, 6.1, 6.1, 6.1, 6.1, 6.1, 6.1, 6.1, 6.1, 6.1, 6.1, 6.1, 6.1, 6.1, 6.1])\nl2p1w = np.array([2.0, 2.3, 2.7, 3.2, 4.0, 4.8, 5.0, 4.9, 6.0, 6.2, 6.3, 6.3, 6.4, 6.4, 6.4, 6.4, 6.4, 6.4, 6.4, 6.4])\nl3p1w = np.array([1.6, 1.8, 2.2, 2.7, 3.2, 4.1, 4.3, 5.5, 5.4, 5.7, 6.0, 6.1, 6.1, 6.1, 6.1, 6.1, 6.1, 6.1, 6.1, 6.1])\n\nl1p2w = np.array([4.2, 4.9, 5.3, 6.0, 6.7, 7.6, 8.0, 8.4, 8.5, 8.6, 8.7, 8.8, 8.9, 9.0, 9.1, 9.1, 9.1, 9.1, 9.1, 9.1])\nl2p2w = np.array([2.2, 2.5, 2.8, 3.3, 4.0, 4.8, 5.2, 5.7, 6.0, 6.3, 6.7, 7.0, 7.3, 7.3, 7.4, 7.4, 7.4, 7.4, 7.4, 7.4])\n\nl1p3w = np.array([2.1, 2.5, 2.8, 3.1, 3.4, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5])\nl2p3w = np.array([2.6, 3.2, 3.7, 4.5, 5.3, 6.3, 6.5, 7.0, 7.2, 7.2, 7.3, 7.3, 7.3, 7.3, 7.3, 7.3, 7.3, 7.3, 7.3, 7.3])\nl3p3w = np.array([1.2, 1.5, 2.0, 2.4, 3.0, 4.2, 4.4, 5.3, 6.2, 6.7, 7.1, 7.4, 7.4, 7.4, 7.4, 7.4, 7.4, 7.4, 7.4, 7.4])\n\n# Mark the results that were interpolated (missing measurements):\ninterp_meas = np.array([8.5, 8.6, 8.7, 8.8])\ninterp_time = np.array([9, 10, 11, 12])\n\n# Collect results to find the largest growth:\ncollected = np.array([l1p1e, l1p2e, l2p2e, l1p3e, l1p2w, l2p2w])\nn_leafs, n_meas = np.shape(collected)\n\nlargest_growth = 0\n\nfor i in range(0, n_leafs):\n\n for j in range(0, n_meas-1):\n\n if j != 0:\n growth = collected[i][j] - collected[i][j-1]\n\n if growth > largest_growth:\n largest_growth = growth\n start_length = collected[i][j-1]\n end_length = collected[i][j]\n\nprint('Largest growth observed: ' + str(largest_growth) + ' from ' + str(start_length) + 'cm to ' + str(end_length) + 'cm in one day.')\n\n# Plot graph:\nfigure = plt.figure(figsize=(6, 7))\nfigureSubplot = figure.add_subplot(1,1,1)\nplt.plot(time, l1p1e, color=plant1Colour, linestyle='-', linewidth=2.0)\nplt.plot(time, l1p2e, color=plant2Colour, linestyle='-', linewidth=2.0)\nplt.plot(time, l2p2e, color=plant2Colour, linestyle='-', linewidth=2.0)\nplt.plot(time, l1p3e, color=plant3Colour, linestyle='-', linewidth=2.0)\n\nplt.plot(time, l1p1w, color=plant4Colour, linestyle='-', linewidth=2.0)\nplt.plot(time, l2p1w, color=plant4Colour, linestyle='-', linewidth=2.0)\nplt.plot(time, l3p1w, color=plant4Colour, linestyle='-', linewidth=2.0)\n\nplt.plot(time, l1p2w, color=plant5Colour, linestyle='-', linewidth=2.0, zorder=0)\nplt.plot(time, l2p2w, color=plant5Colour, linestyle='-', linewidth=2.0)\n\nplt.plot(time, l1p3w, color=plant6Colour, linestyle='-', linewidth=2.0)\nplt.plot(time, l2p3w, color=plant6Colour, linestyle='-', linewidth=2.0)\nplt.plot(time, l3p3w, color=plant6Colour, linestyle='-', linewidth=2.0)\n\nplt.scatter(interp_time, interp_meas, color=interpColour, s=5, linewidth=2.0, zorder=1)\n\nplt.title(r'Leaf growth on newborn oak trees', fontsize=font_title, **hfont, color=textColour)\nplt.xlabel(r'Day', fontsize=font_labels, **csfont, color=textColour)\nplt.ylabel(r'Length in cm', fontsize=font_labels, **csfont, color=textColour)\n#plt.text(12, 4.3, 'secondary branch', **ifont, fontsize=font_text, color=plant2Colour)\nplt.text(20, 1.2, 'East orientation', **csfont, fontsize=font_text, color=plant1Colour, horizontalalignment='right')\nplt.text(20, 0.6, 'West orientation', **csfont, fontsize=font_text, color=plant6Colour, horizontalalignment='right')\nplt.text(16, 8, 'Missing measurements,\\nresults interpolated', **csfont, fontsize=font_text-6, color=interpColour, horizontalalignment='right')\nplt.xlim([0, time[-1] + 1]), plt.xticks(time)\nplt.ylim([0, 10]), plt.yticks(range(1, 12))\n\nfigureSubplot.spines['bottom'].set_color(textColour)\nfigureSubplot.spines['top'].set_color(textColour)\nfigureSubplot.spines['left'].set_color(textColour)\nfigureSubplot.spines['right'].set_color(textColour)\nfigureSubplot.tick_params(axis='x', colors=textColour)\nfigureSubplot.tick_params(axis='y', colors=textColour)\n\n# Set the tick labels font\nfor label in (figureSubplot.get_xticklabels()):\n label.set_fontname('Charter')\n label.set_fontweight('regular')\n label.set_fontsize(font_axes)\n\nfor label in (figureSubplot.get_yticklabels()):\n label.set_fontname('Charter')\n label.set_fontweight('regular')\n label.set_fontsize(font_axes)\n\n# Save plot:\nfilename = 'measuring-leaf-growth.png'\nplt.savefig(filename, dpi = 300, bbox_inches='tight')\n","sub_path":"measuring-leaf-growth/measuring-leaf-growth.py","file_name":"measuring-leaf-growth.py","file_ext":"py","file_size_in_byte":5470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"163889466","text":"import os\nfrom urllib.request import urlopen\nimport json\n\nfor i in range(999):\n html = 'http://www.eshop.unicom.local/eshop/front/order/orderInfo.do?id=' + str(i)\n response = urlopen(html).read()\n text = json.loads(response)\n print(i)\n name = './order/' + str(i) + '.txt'\n fopen = open(name, 'a')\n fopen.write(str(text))\n fopen.close()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"113880803","text":"# -------------------------------------------------------------------------- #\n# ---------------------------------------------------------------- HEADER -- #\n\"\"\"\n@copyright: 2018 Kludgeworks LLC\n\n@description: tools related to hierarchical relationships between nodes\n\n@author: Ed Whetstone\n\n@applications: NUKE\n\"\"\"\n\n# -------------------------------------------------------------------------- #\n# --------------------------------------------------------------- IMPORTS -- #\n# internal\nfrom vfx_utils.plutonium.core import decorators\n\n# domain\nimport nuke\n\n# -------------------------------------------------------------------------- #\n# ------------------------------------------------------------ DECORATORS -- #\ndef main_tree(func):\n \"\"\"decorates functions which take node arguments representing the\n main comp tree. Replaces nodes=None with a list of nuke Nodes.\"\"\"\n return decorators.defaults_factory('nodes', (main_comp, [], {}), func)\n\ndef full_tree(func):\n \"\"\"decorates functions which require all nodes connected to main\n tree, including those that don't contribute to the image, or are\n connected via expression links. Replaces nodes=None with a list\n of nuke Nodes.\"\"\"\n return decorators.defaults_factory('nodes', (full_comp, [], {}), func)\n\n# -------------------------------------------------------------------------- #\n# ------------------------------------------------------------- FUNCTIONS -- #\n\n# ------------------------------------------------ Input / Output Helpers -- #\n@decorators.selected_node\ndef direct_outputs(node=None, pipe=None):\n \"\"\"returns a list of nodes that node outputs to directly\n (not via expressions)\n \"\"\"\n depend_nodes = node.dependent(nuke.INPUTS)\n if not pipe:\n return [n for n in depend_nodes if n.Class() != 'Viewer']\n else:\n return [n for n in depend_nodes if n.Class() != 'Viewer'\n and n.input(pipe) == node]\n\n@decorators.selected_node\ndef direct_inputs(node=None):\n \"\"\"returns a list of nodes output to node directly\n (not via expressions)\"\"\"\n depend_nodes = node.dependencies(nuke.INPUTS)\n return [d for d in depend_nodes if d.Class() != 'Viewer']\n\n@decorators.selected_node\ndef exp_outputs(node=None):\n \"\"\"returns a list of nodes that node outputs through expressions\"\"\"\n depend_nodes = node.dependent(nuke.EXPRESSIONS)\n return [d for d in depend_nodes if d.Class() != 'Viewer']\n\n@decorators.selected_node\ndef exp_inputs(node=None):\n \"\"\"returns a list of nodes that this node recieves information from\n through expressions\"\"\"\n depend_nodes = node.dependencies(nuke.EXPRESSIONS)\n return depend_nodes\n\n# --------------------------------------------------------- Above / Below -- #\n@decorators.selected_nodes\ndef up(nodes=None, pipe=None):\n \"\"\"returns a list of nodes one level up from the given node(s)\"\"\"\n try:\n list(nodes)\n except:\n nodes = [nodes]\n if not pipe:\n return [in_node for n in nodes for in_node in direct_inputs(n)]\n else:\n return [n.input(pipe) for n in nodes if n.input(pipe)]\n\n@decorators.selected_nodes\ndef down(nodes=None, pipe=None):\n \"\"\"returns a list of nodes one level down from the given node(s)\"\"\"\n try:\n list(nodes)\n except:\n nodes = [nodes]\n return [out_node for n in list(nodes)\n for out_node in list(direct_outputs(n, pipe=pipe))]\n\n@decorators.selected_nodes\ndef above(nodes=None, dist_return=False, pipe=None):\n \"\"\"returns a list of all nodes that are up-chain from the\n given node(s)\"\"\"\n try:\n list(nodes)\n except TypeError:\n nodes = [nodes]\n above_nodes = []\n distances = []\n dist = 0\n while len(nodes) > 0:\n new_nodes = up(nodes, pipe=pipe)\n dist += 1\n new = filter(lambda a: a not in above_nodes, new_nodes)\n if len(new) == 0:\n break\n for n in new:\n if n not in above_nodes:\n above_nodes.append(n)\n distances.append(dist)\n nodes = new\n if dist_return:\n above_nodes = zip(above_nodes, distances)\n return above_nodes\n\n@decorators.selected_nodes\ndef below(nodes=None, dist_return=False, pipe=None):\n \"\"\"returns a list of all nodes that are up-chain from the\n given node(s)\"\"\"\n try:\n list(nodes)\n except:\n nodes = [nodes]\n below_nodes = []\n distances = []\n dist = 0\n while len(nodes) > 0:\n new_nodes = down(nodes, pipe=pipe)\n new = filter(lambda a: a not in below_nodes, new_nodes)\n if len(new) == 0:\n break\n for n in new:\n if n not in below_nodes:\n below_nodes.append(n)\n distances.append(dist)\n nodes = new\n dist += 1\n if dist_return:\n below_nodes = zip(below_nodes, distances)\n return below_nodes\n\n# ------------------------------------------------- Hierarchical Distance -- #\n# TODO: this section needs updating\ndef first_common_descent(nodes=None, pipe=None):\n \"\"\"returns the first down-chain node common to the given nodes\n (NOT IMPLEMENTED!)\n \"\"\"\n nodes = nodes if nodes else nuke.selectedNodes()\n node_lists = [below(n, pipe=pipe) for n in nodes]\n if len(node_lists) > 1:\n int_set = set(node_lists[0])\n for nl in node_lists[1:]:\n int_set.intersection_update(nl)\n tup_list = [(len(above(n, pipe=pipe)), n) for n in int_set]\n tup_list.sort()\n if tup_list:\n return tup_list[0][1]\n else:\n return []\n else:\n return []\n\ndef dist_between(anode, bnode, pipe=None):\n \"\"\"returns the connected distance between two nodes\"\"\"\n a_node_above = above(anode, pipe=pipe)\n b_node_above = above(bnode, pipe=pipe)\n dist = None\n if anode in b_node_above:\n above_tups = above(bnode, dist_return=True, pipe=pipe)\n above_tups.sort(key=lambda x: x[1])\n for distTup in above_tups:\n if anode == distTup[0]:\n dist = distTup[1]\n elif bnode in a_node_above:\n above_tups = above(anode, dist_return=True, pipe=pipe)\n above_tups.sort(key=lambda x: x[1])\n for distTup in above(anode, dist_return=True, pipe=pipe):\n if bnode == distTup[0]:\n dist = distTup[1]\n else:\n common_desc = first_common_descent((anode, bnode))\n if common_desc:\n distA = dist_between(anode, common_desc, pipe=pipe)\n distB = dist_between(bnode, common_desc, pipe=pipe)\n dist = distA + distB\n else:\n dist = None\n return dist\n\ndef nodes_between(anode, bnode, pipe=None):\n \"\"\"returns the list of nodes connecting two nodes\n (NOT IMPLEMENTED!)\n \"\"\"\n a_node_above = above(anode, pipe=pipe)\n b_node_above = above(bnode, pipe=pipe)\n nodes_btwn = None\n if anode in b_node_above:\n nodes_btwn = list(set(below(anode, pipe=pipe)) & set(b_node_above))\n elif bnode in a_node_above:\n nodes_btwn = list(set(below(bnode, pipe=pipe)) & set(a_node_above))\n else:\n common_desc = first_common_descent((anode, bnode), pipe=pipe)\n if common_desc:\n nodes_a = nodes_between(anode, common_desc, pipe=pipe)\n nodes_b = nodes_between(bnode, common_desc, pipe=pipe)\n nodes_btwn = nodes_a.extend(nodes_b)\n return nodes_btwn\n else:\n nodes_btwn = None\n return nodes_btwn\n\n# ------------------------------------------------------- Script Specific -- #\ndef main_comp():\n \"\"\"returns a list of all nodes in the largest tree in the comp,\n assumed to be the main comp\"\"\"\n all_nodes = nuke.allNodes('Write')\n above_nodes = []\n above_nodes = [(above(n), n) for n in all_nodes]\n main_comp = max(above_nodes, key=lambda x: len(x[0]))\n main_comp_nodes = main_comp[0]\n main_comp_nodes.reverse()\n main_comp_nodes.append(main_comp[1])\n return main_comp_nodes\n\ndef full_comp():\n \"\"\"return all the nodes connected to the comp, even if they don't contribute\n to the final output.\"\"\"\n main_tree_nodes = main_comp()\n all_nodes_ = nuke.allNodes()\n culled_nodes = [n for n in all_nodes_ if n not in main_tree_nodes]\n non_comp_nodes = [n for n in culled_nodes\n if any((abv in main_tree_nodes)\n for abv in above(nodes=n))]\n for node in list(non_comp_nodes):\n non_comp_nodes.extend(above(node))\n return list(set(non_comp_nodes))\n\n# ------------------------------------------------------- Sorting Methods -- #\ndef sorted_hierarchy(nodes):\n \"\"\"sorts the given nodes by hierarchical order. Assumes all nodes\n are in the same chain\"\"\"\n def sorter(node):\n return len([n for n in above(node) if n in nodes])\n return sorted(nodes, key=sorter)\n\n# ------------------------------------------------------- Group Hierarchy -- #\ndef parent(node=None):\n return nuke.toNode('.'.join(node.fullName().split('.')[:-1])) or nuke.root()\n","sub_path":"plutonium/core/crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":9005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"542401139","text":"from django.conf.urls import url\nfrom . import views\n\napp_name = \"courses_app\"\nurlpatterns = [\n url(r'^create_comment/(?P\\d+)$',views.create_comment),\n url(r'^courses/destroy/destroy_course/(?P\\d+)$', views.destroy_course),\n url(r'^destroy_course/(?P\\d+)$', views.destroy_course),\n url(r'^courses/destroy/(?P\\d+)$', views.destroy_course),\n url(r'^create$', views.create_course),\n url(r'^$', views.index)\n ]\n","sub_path":"apps/courses_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"519884420","text":"#COMP 363 Assignment 11\n#Thomas Walsh\n\ntargetWord = ''\ngivenWord = ''\ntargetIndex = 0\ngivenIndex = 0\nsolution = []\n\ndef dynamic():\n global targetWord\n global givenWord\n global targetIndex\n global givenIndex\n global solution\n #Sets up the solution list of lists\n for x in range(targetIndex + 1):\n #Makes the right number of lists\n tempy = []\n for y in range(givenIndex + 1):\n #Adds the right number of zeros\n tempy.append(0)\n solution.append(tempy)\n #Checks each letter in target word\n for targetNum in range(targetIndex + 1):\n #Checks every letter in given word\n for givenNum in range(givenIndex + 1):\n #given word is empty\n if givenNum == 0:\n solution[targetNum][givenNum] = targetNum\n #target word is empty\n elif targetNum == 0:\n solution[targetNum][givenNum] = givenNum\n #letters match\n elif targetWord[targetNum - 1] == givenWord[givenNum - 1]:\n solution[targetNum][givenNum] = solution[targetNum - 1][givenNum - 1]\n #letters don't match\n else:\n #Checks to see whats easiest replacement, insertion, or removal\n solution[targetNum][givenNum] = 1 + min(solution[targetNum - 1][givenNum - 1], solution[targetNum][givenNum - 1], solution[targetNum - 1][givenNum])\n \ndef main():\n global targetWord\n global givenWord\n global targetIndex\n global givenIndex\n global solution\n targetWord = 'loyola'\n givenWord = 'crayola'\n targetIndex = len(targetWord)\n givenIndex = len(givenWord)\n dynamic()\n print(solution[targetIndex][givenIndex])\n\nmain()\n","sub_path":"COMP363A11/WALSH_11.1B.py","file_name":"WALSH_11.1B.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"577342684","text":"class WorldNode:\n def __init__(self, name):\n self.name = name\n self.children = {}\n self.parent = None\n self.depth = 0\n self.text = \"\"\n\n @property\n def display_name(self):\n return self.name.strip(\"#\")\n\n def add_child(self, node):\n if node.name in self.children:\n print(\"{} already exists.\".format(node.name))\n self.children[node.name] = node\n node.parent = self\n node.depth = self.depth + 1","sub_path":"toolkits/md2xml/src/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"167011755","text":"from werkzeug.contrib import fixers\nfrom werkzeug.datastructures import ResponseCacheControl\nfrom werkzeug.http import parse_cache_control_header\nfrom werkzeug.test import Client\nfrom werkzeug.test import create_environ\nfrom werkzeug.wrappers import Request\nfrom werkzeug.wrappers import Response\n\n\n@Request.application\ndef path_check_app(request):\n return Response(\n \"PATH_INFO: %s\\nSCRIPT_NAME: %s\"\n % (request.environ.get(\"PATH_INFO\", \"\"), request.environ.get(\"SCRIPT_NAME\", \"\"))\n )\n\n\nclass TestServerFixer(object):\n def test_cgi_root_fix(self):\n app = fixers.CGIRootFix(path_check_app)\n response = Response.from_app(\n app, dict(create_environ(), SCRIPT_NAME=\"/foo\", PATH_INFO=\"/bar\")\n )\n assert response.get_data() == b\"PATH_INFO: /bar\\nSCRIPT_NAME: \"\n\n def test_cgi_root_fix_custom_app_root(self):\n app = fixers.CGIRootFix(path_check_app, app_root=\"/baz/\")\n response = Response.from_app(\n app, dict(create_environ(), SCRIPT_NAME=\"/foo\", PATH_INFO=\"/bar\")\n )\n assert response.get_data() == b\"PATH_INFO: /bar\\nSCRIPT_NAME: baz\"\n\n def test_path_info_from_request_uri_fix(self):\n app = fixers.PathInfoFromRequestUriFix(path_check_app)\n for key in \"REQUEST_URI\", \"REQUEST_URL\", \"UNENCODED_URL\":\n env = dict(create_environ(), SCRIPT_NAME=\"/test\", PATH_INFO=\"/?????\")\n env[key] = \"/test/foo%25bar?drop=this\"\n response = Response.from_app(app, env)\n assert response.get_data() == b\"PATH_INFO: /foo%bar\\nSCRIPT_NAME: /test\"\n\n def test_header_rewriter_fix(self):\n @Request.application\n def application(request):\n return Response(\"\", headers=[(\"X-Foo\", \"bar\")])\n\n application = fixers.HeaderRewriterFix(\n application, (\"X-Foo\",), ((\"X-Bar\", \"42\"),)\n )\n response = Response.from_app(application, create_environ())\n assert response.headers[\"Content-Type\"] == \"text/plain; charset=utf-8\"\n assert \"X-Foo\" not in response.headers\n assert response.headers[\"X-Bar\"] == \"42\"\n\n\nclass TestBrowserFixer(object):\n def test_ie_fixes(self):\n @fixers.InternetExplorerFix\n @Request.application\n def application(request):\n response = Response(\"binary data here\", mimetype=\"application/vnd.ms-excel\")\n response.headers[\"Vary\"] = \"Cookie\"\n response.headers[\"Content-Disposition\"] = \"attachment; filename=foo.xls\"\n return response\n\n c = Client(application, Response)\n response = c.get(\n \"/\",\n headers=[\n (\"User-Agent\", \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)\")\n ],\n )\n\n # IE gets no vary\n assert response.get_data() == b\"binary data here\"\n assert \"vary\" not in response.headers\n assert response.headers[\"content-disposition\"] == \"attachment; filename=foo.xls\"\n assert response.headers[\"content-type\"] == \"application/vnd.ms-excel\"\n\n # other browsers do\n c = Client(application, Response)\n response = c.get(\"/\")\n assert response.get_data() == b\"binary data here\"\n assert \"vary\" in response.headers\n\n cc = ResponseCacheControl()\n cc.no_cache = True\n\n @fixers.InternetExplorerFix\n @Request.application\n def application(request):\n response = Response(\"binary data here\", mimetype=\"application/vnd.ms-excel\")\n response.headers[\"Pragma\"] = \", \".join(pragma)\n response.headers[\"Cache-Control\"] = cc.to_header()\n response.headers[\"Content-Disposition\"] = \"attachment; filename=foo.xls\"\n return response\n\n # IE has no pragma or cache control\n pragma = (\"no-cache\",)\n c = Client(application, Response)\n response = c.get(\n \"/\",\n headers=[\n (\"User-Agent\", \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)\")\n ],\n )\n assert response.get_data() == b\"binary data here\"\n assert \"pragma\" not in response.headers\n assert \"cache-control\" not in response.headers\n assert response.headers[\"content-disposition\"] == \"attachment; filename=foo.xls\"\n\n # IE has simplified pragma\n pragma = (\"no-cache\", \"x-foo\")\n cc.proxy_revalidate = True\n response = c.get(\n \"/\",\n headers=[\n (\"User-Agent\", \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)\")\n ],\n )\n assert response.get_data() == b\"binary data here\"\n assert response.headers[\"pragma\"] == \"x-foo\"\n assert response.headers[\"cache-control\"] == \"proxy-revalidate\"\n assert response.headers[\"content-disposition\"] == \"attachment; filename=foo.xls\"\n\n # regular browsers get everything\n response = c.get(\"/\")\n assert response.get_data() == b\"binary data here\"\n assert response.headers[\"pragma\"] == \"no-cache, x-foo\"\n cc = parse_cache_control_header(\n response.headers[\"cache-control\"], cls=ResponseCacheControl\n )\n assert cc.no_cache\n assert cc.proxy_revalidate\n assert response.headers[\"content-disposition\"] == \"attachment; filename=foo.xls\"\n","sub_path":"tests/contrib/test_fixers.py","file_name":"test_fixers.py","file_ext":"py","file_size_in_byte":5291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"589303521","text":"# _*_ coding:utf-8 _*_\nfrom PIL import Image, ImageSequence\n\nimgname = 'C:\\工作\\gif动图逆序播放\\科比投篮动图.'\nim = Image.open(imgname + '.gif')\n# 初始化列表\nsequence = []\n#在图像序列中遍历所有帧\ni= 1\nfor f in ImageSequence.Iterator(im):\n sequence.append(f.copy())\n f.save(imgname + '分解'+ str(i) + '.png')#文件名需要有后缀,知道什么格式\n i += 1\n# 将图像序列逆转\nsequence.reverse()\n\n#几张单的gif组合成动态图片\n#im.save(out, save_all = True, append_images=[im1, im2......]),这边的im只需要是Image对象即可\nsequence[0].save(r'C:\\工作\\gif动图逆序播放\\动图逆序.gif', save_all=True, append_images=sequence[0:], duration=30)#sequence[0]为Image对象,[1][2]…都可以\n","sub_path":"gif逆序播放.py","file_name":"gif逆序播放.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"134264317","text":"#!/usr/bin/env python3\n\nimport sys\nimport csv\nimport reverse_geocoder as rg\nimport sqlite3\n\ntry:\n csvfilename = sys.argv[1]\nexcept IndexError:\n print(\"Usage %s \" % sys.argv[0])\n exit(1)\n\nconn = sqlite3.connect('videos.db')\nc = conn.cursor()\nc.execute(\"CREATE TABLE IF NOT EXISTS videos (longitude float, latitude float, time datetime, id text PRIMARY KEY, country text, state text, city text)\")\n\n# first pass: build database\nwith open(csvfilename, newline='') as csvfile:\n for i, row in enumerate(csv.reader(csvfile)):\n if (i!=0):\n c.execute(\"REPLACE INTO videos (longitude, latitude, time, id) VALUES (?,?,?,?)\", row)\n conn.commit()\n\n# second pass: local geocode\nrows = [row for row in c.execute(\"SELECT latitude,longitude,id FROM videos ORDER BY id\")]\ngeos = rg.search([(row[0], row[1]) for row in rows ])\n\n# third pass: save geocoding\nfor r, row in enumerate(rows):\n g = geos[r]\n if g['admin1'] == 'Washington, D.C.':\n g['admin1'] = \"DC\"\n g['name'] = \"Washington\"\n c.execute(\"UPDATE videos SET country=?, state=?, city=? WHERE id=?\", ( g['cc'], g['admin1'], g['name'], row[2] ))\n conn.commit()\n print(g['cc'], g['admin1'], g['name'], row[2])","sub_path":"geocode.py","file_name":"geocode.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"166096124","text":" \n\"\"\"\nModule estiamates HousingModel using Simualted Method of Moments\nMinimisation performed using Cross-Entropy method (see Kroese et al)\n\nScript must be run using Mpi: \n\nExample (on Gadi):\n\nmodule load python3/3.7.4\nmodule load openmpi/4.0.2\n\nalias mpython='mpiexec -np 480 `which python3`'\n \nmpython SMM.py\n\n\"\"\"\n\n# import packages\n\nimport numpy as np\nimport time\nimport warnings\n\nwarnings.filterwarnings('ignore')\n\n\nfrom collections import defaultdict\nfrom numpy import genfromtxt\nimport csv\nimport time\nimport dill as pickle \nfrom randparam import rand_p_generator\nimport copy\nimport sys\nimport pandas as pd\n\nfrom profiles_moments import genprofiles_operator, gen_moments, sortmoments\nfrom housing_functions import housingmodel_function_factory\nfrom egg_basket import HousingModel, housingmodel_operator_factory\nfrom retiree_operators import housing_model_retiree_func_factory\n\nfrom pyina import mpi\nworld = mpi.world\n\nfrom mpi4py import MPI as MPI4py\ncomm = MPI4py.COMM_WORLD\n\nfrom pyina.mpi_pool import parallel_map\n\nimport gc\n\n\ndef gen_format_moments(TS1,TS2, moments_data):\n\t\"\"\"Gen simulated moments, labels\n\t\tand sorts simulated and data mooments\n\t\tand generates numpy arrays\"\"\"\n\n\tmoments_male \t= gen_moments(copy.copy(TS1),copy.copy(TS2)) \n\tmoments_female \t= gen_moments(copy.copy(TS1),copy.copy(TS2)) \n\tmoments_female \t= moments_female.add_suffix('_female')\n\tmoments_male \t= moments_male.add_suffix('_male')\n\tmoments_sim_sorted \t= sortmoments(moments_male,\\\n\t\t\t\t\t\t\t\t\t\t moments_female)\n\n\tmoments_sim_sorted \t\t= pd.concat([moments_male[\"Age_wave10_male\"]\\\n\t\t\t\t\t\t\t\t.reset_index().iloc[:,1],\\\n\t\t\t\t\t\t\t\tmoments_sim_sorted],\\\n\t\t\t\t\t\t\t\taxis =1) \n\tmoments_sim_sorted \t\t= moments_sim_sorted.rename(columns =\\\n\t\t\t\t\t\t\t {'Age_wave10_male':'Age_wave10'})\n\n\tmoments_data.columns \t= moments_sim_sorted.columns\n\n\t\n\tmoments_sim_sorted =\\\n\t\tmoments_sim_sorted\\\n\t\t.loc[:,moments_sim_sorted.columns.str.endswith('_male')] \n\t\n\tmoments_sim_array = np.array(np.ravel(moments_sim_sorted))\n\n\tmoments_sim_array[np.isnan(moments_sim_array)] = 0\n\n\tmoments_data =\\\n\tmoments_data.loc[:,moments_data.columns.str.endswith('_male')] \n\n\tmoments_data_array = np.array(np.ravel(moments_data))\n\n\treturn moments_sim_array, moments_data_array\n\ndef gen_RMS(parameters,lambdas,\\\n\t\t\tsurvival,\\\n\t\t\tmoments_data,\\\n\t\t\tvol_cont_points,\\\n\t\t\trisk_share_points, TSN,U):\n\t\"\"\"\n\tGenerate root mean square error \n\tbetween simulated moments for HousingModel \n\tand data moments \n\n\t\"\"\"\n\t# define functions \n\n\tfunctions = {}\n\n\tfunctions['u'], functions['uc'], functions['uh'], functions['b'], \\\n\tfunctions['b_prime'], functions['y'],functions['yvec'], functions['DB_benefit'], \\\n\tfunctions['adj_p'], functions['adj_v'], functions['adj_pi'],\\\n\tfunctions['uc_inv'],functions['uh_inv'],\\\n\t\t= housingmodel_function_factory(parameters,\\\n\t\t\t\t\t\t\t\t\t\t lambdas,\\\n\t\t\t\t\t\t\t\t\t\t normalisation)\n\n\t# Create housing model \n\tog = HousingModel(functions, parameters, survival,\\\n\t\t\t\t\t\t\t\t\t\tvol_cont_points,\\\n\t\t\t\t\t\t\t\t\t\trisk_share_points)\n\t\n\t# solve model \n\tgen_R_pol = housing_model_retiree_func_factory(og)\n\n\tsolve_LC_model = housingmodel_operator_factory(og,gen_R_pol)\n\n\tpolicies \t= (solve_LC_model())\n\n\t# generate time series \n\tgenerate_TSDF = genprofiles_operator(og)\n\n\tdel og\n\tgc.collect() \n\n\tTS1, TS2 = generate_TSDF(U,TSN, *policies)\n\n\t# generate and sort moments\n\n\tmoments_sim_array, moments_data_array \\\n\t= gen_format_moments(TS1, TS2, moments_data)\n\n\tdel TS1\n\tdel TS2 \n\tgc.collect()\n\n\tdeviation = (moments_sim_array\\\n\t\t\t\t\t\t\t\t[~np.isnan(moments_data_array)]\\\n\t\t\t\t\t\t\t\t - moments_data_array\\\n\t\t\t\t\t\t\t\t [~np.isnan(moments_data_array)])\n\n\tnorm = np.sum(np.square(moments_data_array[~np.isnan(moments_data_array)]))\n\n\n\n\tN_err = len(deviation)\n\n\treturn 1-np.sqrt((1/N_err)*np.sum(np.square(deviation))/norm)\n\ndef gen_param_moments(parameter_list_dict, param_random_bounds,\\\n\t\t\t\t\t\t selected, weights):\n\n\t\"\"\" Estiamate params of a sampling distribution\n\n\tParameters\n\t----------\n\tparameter_list_dict: Dict\n\t\t\t\t\t\t Dictionary with all paramameters\n\t\t\t\t\t\t with ID keys\n\tselected : 2D-array\n\t\t\t\t\t\t set of elite paramters IDs and errors\n\n\tReturns\n\t-------\n\n\tmeans\n\n\tcov\n\t\"\"\"\n\n\tsample_params = []\n\n\tfor i in range(len(selected)):\n\t\trand_params_i = []\n\t\tfor key in param_random_bounds.keys():\n\t\t\trand_params_i.append(\\\n\t\t\t\tparameter_list_dict[int(selected[i,0])][key])\n\t\t\n\t\tsample_params.append(rand_params_i)\n\n\tsample_params = np.array(sample_params)\n\tmeans = np.average(sample_params, weights = weights, axis=0)\n\tcov = np.cov(sample_params, aweights =weights, rowvar=0)\n\n\treturn means, cov\n\n\nif __name__ == \"__main__\":\n\n\n\n\tnormalisation = np.array([1E-5, 100])\n\tparam_deterministic = {}\n\tparam_random_bounds = {}\n\n\tsettings_folder = '/home/141/as3442/Retirementeggs/settings'\n\n\n\t# un-pack model settings \n\n\twith open('{}/parameters_EGM_base.csv'.format(settings_folder),\\\n\t\tnewline='') as pscfile:\n\t\treader = csv.DictReader(pscfile)\n\t\tfor row in reader:\n\t\t\tparam_deterministic[row['parameter']] = np.float64(row['value'])\n\n\twith open('{}/random_param_bounds.csv'\\\n\t\t.format(settings_folder), newline='') as pscfile:\n\t\treader_ran = csv.DictReader(pscfile)\n\t\tfor row in reader_ran:\n\t\t\tparam_random_bounds[row['parameter']] = np.float64([row['LB'],\\\n\t\t\t\trow['UB']])\n\n\tlambdas = genfromtxt('{}/lambdas_male.csv'.\\\n\t\t\t\t\t\t\tformat(settings_folder), delimiter=',')[1:]\n\tsurvival = genfromtxt('{}/survival_probabilities_male.csv'.\\\n\t\t\t\t\t\t\tformat(settings_folder), delimiter=',')[0:]\n\tvol_cont_points = genfromtxt('{}/vol_cont_points.csv'.\\\n\t\t\t\t\t\t\tformat(settings_folder), delimiter=',')[1:]\n\trisk_share_points = genfromtxt('{}/risk_share_points.csv'.\\\n\t\t\t\t\t\t\tformat(settings_folder), delimiter=',')[1:]\n\t\n\n\t# load and prepare data moments \n\tmoments_data = pd.read_csv('{}/moments_data.csv'\\\n\t\t\t\t\t.format(settings_folder))\n\tmoments_data = moments_data.drop('Unnamed: 0', axis=1) \n\n\t# run SMM estimation \n\n\ttol \t= 1E-8\n\tTSN \t= 150\n\tN_elite = 45\n\td \t= 3\n\n\tstart = time.time()\n\t# pick previous parameters settings and means\n\tgamma_XEM \t= pickle.load(open(\"/scratch/pv33/gamma_XEM.smms\",\"rb\"))\n\tS_star \t\t= pickle.load(open(\"/scratch/pv33/S_star.smms\",\"rb\"))\n\tt \t\t\t= pickle.load(open(\"/scratch/pv33/t.smms\",\"rb\"))\n\n\tsampmom \t= pickle.load(open(\"/scratch/pv33/latest_means_iter.smms\",\"rb\"))\n\n\t# generate new parameter sample (each worker generates a random sample)\n\n\tif t ==0:\n\t\tinitial =0\n\telse:\n\t\tinitial = 0\n\n\tparameters = rand_p_generator(param_deterministic,\\\n\t\t\t\t\t\t\t\tparam_random_bounds, deterministic = 0,\\\n\t\t\t\t\t\t\t\tinitial =initial,\\\n\t\t\t\t\t\t\t\tparam_random_means = sampmom[0],\\\n\t\t\t\t\t\t\t\tparam_random_cov = sampmom[1])\n\n\tt = t+1\n\n\tindexed_errors = None\n\tparameter_list = None\n\n\t# eval model on each worker \n\tif world.rank ==0:\n\t\tprint(\"Distributng iter {}\".format(t))\n\t\terrors_ind = [0,0]\n\telse:\n\t\tdef SMM_objective():\n\t\t\t\"\"\"SMM objective to be maximised \n\t\t\tas function of params\"\"\"\n\t\t\tparameters_all = parameters\n\t\t\tU = pickle.load(open(\"/scratch/pv33/seed_U.smms\",\"rb\")) \n\t\t\t#U = np.random.rand(6,100,TSN,100) \n\t\t\tRMS = gen_RMS(parameters_all,lambdas,\\\n\t\t\t\t\t\tsurvival,\\\n\t\t\t\t\t\tmoments_data,\\\n\t\t\t\t\t\tvol_cont_points,\\\n\t\t\t\t\t\trisk_share_points,TSN,U)\n\n\t\t\treturn [parameters_all['ID'], RMS]\n\t\terrors_ind = SMM_objective()\n\t\tdel SMM_objective\n\t\tgc.collect()\n\n\tcomm.Barrier()\n\tindexed_errors \t= comm.gather(errors_ind, root=0)\n\tparameter_list \t= comm.gather(parameters, root=0)\n\n\t# master does calculations\n\t\n\tif world.rank ==0:\n\t\tparameter_list_dict = dict([(param['ID'], param)\\\n\t\t\t\t\t\t\t for param in parameter_list[1:]])\n\t\tindexed_errors_arr = np.array(indexed_errors[1:])\n\t\tindexed_errors_arr = indexed_errors_arr[np.argsort(\\\n\t\t\t\t\t\t\t\t\t-indexed_errors_arr[:,1])]\n\t\tnumber_N \t\t\t\t= len(indexed_errors_arr) - np.sum(np.isnan(indexed_errors_arr[:,1]))\n\n\n\t\telite_errors_indexed = indexed_errors_arr[0: N_elite]\n\n\t\tweights \t\t\t\t= np.exp((elite_errors_indexed[:,1] - np.min(elite_errors_indexed[:,1]))\\\n\t\t\t\t\t\t\t\t\t\t/ (np.max(elite_errors_indexed[:,1]) -np.min(elite_errors_indexed[:,1])))\n\n\n\t\tgamma_XEM = np.append(gamma_XEM,\\\n\t\t\t\t\t\t\t\t\t elite_errors_indexed[-1, 1])\n\t\tS_star = np.append(S_star,\\\n\t\t\t\t\t\t\t\t\t elite_errors_indexed[0, 1])\n\n\t\terror_gamma = gamma_XEM[d +t-1] \\\n\t\t\t\t\t\t\t\t\t- gamma_XEM[d +t -2]\n\t\terror_S = S_star[int(d +t-1)]\\\n\t\t\t\t\t\t\t\t\t- S_star[int(d +t -2)]\n\n\t\tprint(\"...iteration {} on {} cores, elite_gamma error are {} and elite S error are {}\"\\\n\t\t\t.format(t, number_N, error_gamma, error_S))\n\n\t\tconvg = int(np.abs(max(S_star[-d:]) - min(S_star[-d:]))< tol)\n\n\t\tprint(\"...stop_error is {}, convergence is {}\".format(np.abs(max(S_star[-d:]) - min(S_star[-d:])), convg))\n\n\t\tmeans, cov = gen_param_moments(parameter_list_dict,\\\n\t\t\t\t\t\t\t\tparam_random_bounds,\\\n\t\t\t\t\t\t\t\telite_errors_indexed, weights)\n\n\t\tconvg_cov\t\t= int(np.abs(np.max(cov))< tol )\n\t\tprint(\"...cov error is {}, convergence is {}\".format((np.abs(np.max(cov))), convg_cov))\n\n\t\tpickle.dump([means, cov],\\\n\t\t\t\t\t\topen(\"/scratch/pv33/latest_means_iter.smms\",\"wb\"))\n\t\tpickle.dump(gamma_XEM,\\\n\t\t\t\t\t\topen(\"/scratch/pv33/gamma_XEM.smms\",\"wb\"))\n\t\tpickle.dump(S_star,\\\n\t\t\t\t\t\topen(\"/scratch/pv33/S_star.smms\",\"wb\"))\n\n\t\tpickle.dump(t,\\\n\t\t\t\t\t\topen(\"/scratch/pv33/t.smms\",\"wb\"))\n\t\t\n\t\tprint(\"...generated and saved sampling moments\")\n\t\tprint(\"...time elapsed: {} minutes\".format((time.time()-start)/60))\n\n","sub_path":"eggsandbaskets/smm/smm.py","file_name":"smm.py","file_ext":"py","file_size_in_byte":9388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"107195588","text":"# Окно с событиями\nfrom tkinter import *\n\n# Функция для события\ndef button1Click() :\n Display.config(text=\"Это радует!\")\ndef button2Click() :\n Display.config(text=\"Это огорчает!\")\n\n# Основная программа\nWindow = Tk()\nDisplay = Label(Window, text=\"Привет, как дела?\")\nDisplay.grid(row=0, column=1)\nButton1 = Button(Window, text=\"Хорошо\", command=button1Click)\nButton2 = Button(Window, text=\"Плохо\", command=button2Click)\nButton1.grid(row=2, column=0, padx=10, pady=10)\nButton2.grid(row=2, column=2, padx=10, pady=10)\nWindow.mainloop()\n\n","sub_path":"python_for_kids/book/Examples/window6.py","file_name":"window6.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"68720793","text":"categories = ['brand', 'seats', 'color', 'price']\n\navailable_cars = {\n\t\n\t'impreza' : {\n\t'brand' : 'subaru',\n\t'seats' : 5,\n\t'color' : 'red',\n\t'price' : 40,\n\t},\n\n\t'mustang' : {\n\t'brand' : 'ford',\n\t'seats' : 2,\n\t'color' : 'white',\n\t'price' : 55,\n\t},\n\n\t'sonata' : {\n\t'brand' : 'hyundai',\n\t'seats' : 5,\n\t'color' : 'silver',\n\t'price' : 35,\n\t},\n}\n\n\n\nprompt = \"Welcome to my car rental service.\"\nprompt += \"\\nWhat are you looking for in a car?\"\nprompt += \"\\n\\n- - -\"\nprompt += \"\\n\\nBrand\"\nprompt += \"\\nSeats\"\nprompt += \"\\nColor\"\nprompt += \"\\nPrice\"\nprompt += \"\\n\\nPlease type which of these is your priority: \"\n\npriority = raw_input(prompt).lower()\n\n\nif priority in categories:\n\tprint(\"\\nWe will match you with vehicles that fit with your priorities.\")\n\tprint(\"Your top priority is: \" + priority.title() + \"\\n\")\n\t\n\tif priority == 'color':\n\t\tprint(\"We have cars available in these colors:\")\n\t\tfor car, aspect in available_cars.items():\n\t\t\tprint(\"\\t\" + aspect['color'].title())\n\t\tcolor_choice = raw_input(\"\\nWhich of these colors do you prefer?\\nPlease enter your choice: \").lower()\n\t\tfor car, aspect in available_cars.items():\n\t\t\tif color_choice in aspect['color']:\n\t\t\t\tprint(\"\\nBased on your priorities, we recommend this car:\\n\" + \n\t\t\t\t\taspect['brand'].title() + \" \" + car.title() + \", with the following characteristics:\\n\" + \n\t\t\t\t\t\"Seats: \" + str(aspect['seats']) + \"\\n\" +\n\t\t\t\t\t\"Color: \" + aspect['color'].title() + \"\\n\" +\n\t\t\t\t\t\"Price: $\" + str(aspect['price']) + \" per day\"\n\t\t\t\t\t)\n\t\t\n\tif priority == 'brand':\n\t\tprint(\"We have cars available from these brands:\")\n\t\tfor car, aspect in available_cars.items():\n\t\t\tprint(\"\\t\" + aspect['brand'].title())\n\t\tbrand_choice = raw_input(\"\\nWhich of these brands do you prefer?\\nPlease enter your choice: \").lower()\n\t\tfor car, aspect in available_cars.items():\n\t\t\tif brand_choice in aspect['brand']:\n\t\t\t\tprint(\"\\nBased on your priorities, we recommend this car:\\n\" + \n\t\t\t\t\taspect['brand'].title() + \" \" + car.title() + \", with the following characteristics:\\n\" + \n\t\t\t\t\t\"Seats: \" + str(aspect['seats']) + \"\\n\" +\n\t\t\t\t\t\"Color: \" + aspect['color'].title() + \"\\n\" +\n\t\t\t\t\t\"Price: $\" + str(aspect['price']) + \" per day\"\n\t\t\t\t\t)\n\t\n\tif priority == 'seats':\n\t\tprint(\"We have cars available with these numbers of seats:\")\n\t\tfor car, aspect in available_cars.items():\n\t\t\tprint(\"\\t\" + str(aspect['seats']))\n\t\tseats_choice = raw_input(\"\\nHow many seats do you want to have?\\nPlease enter your choice: \").lower()\n\t\tprint(\"\\nBased on your priorities, there may be a number of a number of car that we recommend:\\n\")\n\t\t# for car, aspect in available_cars.items():\n\t\t# \tif int(seats_choice) in aspect['seats']:\n\t\t\t\t\n\n\n\n\nelse:\n\tprint(\"\\nYour top priority is: \" + priority.title() + \"\\nSorry, your priority does not match with any of our available cars.\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"23983755","text":"from spinn_utilities.progress_bar import ProgressBar\nfrom .scp_update_runtime_request import SCPUpdateRuntimeRequest\nfrom spinn_front_end_common.utilities.constants import SDP_PORTS\nfrom spinnman.processes import AbstractMultiConnectionProcess\n\n\nclass UpdateRuntimeProcess(AbstractMultiConnectionProcess):\n def __init__(self, connection_selector):\n AbstractMultiConnectionProcess.__init__(self, connection_selector)\n self._progress = None\n\n def receive_response(self, response): # @UnusedVariable\n if self._progress is not None:\n self._progress.update()\n\n def update_runtime(self, run_time, infinite_run, core_subsets, n_cores):\n self._progress = ProgressBar(n_cores, \"Updating run time\")\n for core_subset in core_subsets:\n for processor_id in core_subset.processor_ids:\n self._send_request(\n SCPUpdateRuntimeRequest(\n core_subset.x, core_subset.y, processor_id,\n run_time, infinite_run,\n SDP_PORTS.RUNNING_COMMAND_SDP_PORT.value),\n callback=self.receive_response)\n self._finish()\n self._progress.end()\n self.check_for_error()\n","sub_path":"spinn_front_end_common/utilities/scp/update_runtime_process.py","file_name":"update_runtime_process.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"202931919","text":"from face_predictor import *\n\nif __name__ == '__main__':\n\n\timg = 'image.jpg'\n\tres = []\n\n\tfor i, j, tile in img_tiles(img):\n\t\tif face_or_not(tile):\n\t\t\tres.append([i,j])\n\tprint(res)","sub_path":"face_test.py","file_name":"face_test.py","file_ext":"py","file_size_in_byte":179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"148768705","text":"import re\ndef Str_encode(s:str,rule='utf-8'):\n '''\n 将明文字符串按照rule的格式转化为01字符串\n :param s: 待编码字符串\n :param rule: 编码方案 默认utf-8\n :return: 字符串对应01字符串\n '''\n sc=s.encode(rule)\n bc=[bin(int(i))[2:].rjust(8,'0') for i in sc ]\n rtn=''.join(bc)\n return rtn\n\ndef Str_decode(s:str,rule='utf-8'):\n '''\n 将01字符串(不加任何标识符和纠错码)转化为对应的明文字符串(默认UTF-8)\n :param s:01字符串\n :return:解码原文\n '''\n if len(s)==0:\n return '>>内容为空<<'\n if len(s)%8!=0:\n raise SyntaxError('编码不是八的倍数')\n #至少是字节的倍数才能操作\n msg=re.sub(r'0x','',hex(int(s,2)))\n rtn=bytes.fromhex(msg).decode(rule)\n return rtn\n\nif __name__==\"__main__\":\n print(\"输入要转换的字符串:\")\n message=input()\n bit=Str_encode(message)\n print(bit)\n res = Str_decode(bit)\n print(re)\n","sub_path":"client/trans.py","file_name":"trans.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"36721943","text":"# In[1]:\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport datetime\r\nfrom sklearn import preprocessing\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.preprocessing import OneHotEncoder\r\nfrom sklearn.metrics import roc_curve\r\nfrom sklearn.pipeline import make_pipeline\r\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier,ExtraTreesClassifier,AdaBoostClassifier\r\nfrom sklearn import metrics\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.models import load_model\r\nfrom keras.callbacks import EarlyStopping\r\nfrom keras.utils import to_categorical\r\n\r\n\r\ndef get_data():\r\n #loading processed data\r\n data = pd.read_pickle(\"music.pickle\")\r\n test = pd.read_pickle(\"test.pickle\")\r\n #Target Generation\r\n target=data['is_listened']\r\n del data['is_listened']\r\n test2=test.iloc[:,1:]\r\n data=pd.concat([data,test2],axis=0)\r\n data=data.fillna(0)\r\n\r\n #dummies creation\r\n dummie1 = pd.get_dummies(data['context_type'], prefix='context_type', prefix_sep='_')\r\n dummie2 = pd.get_dummies(data['platform_name'], prefix='platform_name', prefix_sep='_')\r\n dummie3 = pd.get_dummies(data['platform_family'], prefix='platform_family', prefix_sep='_')\r\n dummie4 = pd.get_dummies(data['listen_type'], prefix='listen_type', prefix_sep='_')\r\n dummie5 = pd.get_dummies(data['user_gender'], prefix='user_gender', prefix_sep='_')\r\n dummie6 = pd.get_dummies(data['genre_id'], prefix='genre_id', prefix_sep='_')\r\n dummie7 = pd.get_dummies(data['album_id'], prefix='album_id', prefix_sep='_')\r\n dummie8 = pd.get_dummies(data['release_date'], prefix='album_id', prefix_sep='_')\r\n dummie9 = pd.get_dummies(data['usergencluster'], prefix='usergencluster', prefix_sep='_')\r\n dummie10 = pd.get_dummies(data['artist_id'], prefix='artist_id', prefix_sep='_')\r\n dummie11 = pd.get_dummies(data['usermedcluster'], prefix='usermedcluster', prefix_sep='_')\r\n dummie12= pd.get_dummies(data['useralbcluster'], prefix='useralbcluster', prefix_sep='_')\r\n dummie13= pd.get_dummies(data['userartcluster'], prefix='userartcluster', prefix_sep='_')\r\n dummie14= pd.get_dummies(data['userdatecluster'], prefix='userdatecluster', prefix_sep='_')\r\n\r\n data=pd.concat([data,dummie1,dummie2,dummie3,dummie4,dummie5,dummie6,dummie7,dummie8,dummie9,dummie10,dummie11,dummie12,dummie13,dummie14],axis=1)\r\n\r\n # Generating normalised features\r\n min_max_scaler = preprocessing.MinMaxScaler(feature_range=(-1, 1))\r\n data['user_age_cent'] = min_max_scaler.fit_transform(data['user_age'])\r\n data['media_duration_cent'] = min_max_scaler.fit_transform(data['media_duration'])\r\n data['Bcent'] = min_max_scaler.fit_transform(data['B'])\r\n data['Ccent'] = min_max_scaler.fit_transform(data['C'])\r\n data['Dcent'] = min_max_scaler.fit_transform(data['D'])\r\n data['Ecent'] = min_max_scaler.fit_transform(data['E'])\r\n data['Fcent'] = min_max_scaler.fit_transform(data['F'])\r\n data['Gcent'] = min_max_scaler.fit_transform(data['G'])\r\n \r\n return data,target\r\n \r\n \r\ndef get_model(n_cols):\r\n# Model\r\n model = Sequential()\r\n model.add(Dense(98 , activation = 'relu' , input_shape = (n_cols,)))\r\n model.add(Dense(98 , activation = 'relu'))\r\n model.add(Dense(56 , activation = 'relu'))\r\n model.add(Dense(56 , activation = 'relu'))\r\n model.add(Dense(56 , activation = 'relu'))\r\n model.add(Dense(42 , activation = 'relu'))\r\n model.add(Dense(14 , activation = 'relu'))\r\n model.add(Dense(2 , activation = 'softmax'))\r\n model.compile(optimizer = 'adam' , loss = 'categorical_crossentropy' , metrics = ['accuracy'])\r\n \r\n return model\r\n \r\ndef prediction(test_data):\r\n # test prediction\r\n preds = model.predict_proba(test_data, verbose=0)[:, 1]\r\n submission = pd.DataFrame(preds, columns=['is_listened'])\r\n submission.to_csv('Keras_21054_b2.csv')\r\n\r\n\r\ndata,target = get_data()\r\n# converting to matrix \r\npredictors = data.iloc[:, 31:633].as_matrix()# assign feature dataframe to predictors removing the target column\r\ntarget1 = to_categorical(target)# assign target dataframe to y\r\nn_cols = predictors.shape[1]\r\npredictors.shape\r\n\r\n# train model\r\nmodel = get_model(n_cols)\r\nearly_stopping_monitor = EarlyStopping(patience = 2)\r\nmodel.fit(predictors[0:7558834,:] , target1 , validation_split = 0.20 , epochs = 2 , callbacks = [early_stopping_monitor])\r\nprediction(predictors[7558834:, :])\r\n\r\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"153618313","text":"# Copyright © 2019 Province of British Columbia\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests to assure the FeeSchedule Service.\n\nTest-Suite to ensure that the FeeSchedule Service is working as expected.\n\"\"\"\n\nfrom typing import Dict\n\nfrom pay_api.services.payment_account import PaymentAccount as PaymentAccountService\nfrom pay_api.utils.enums import PaymentMethod\n\nfrom tests.utilities.base_test import (\n factory_payment_account, factory_premium_payment_account, get_auth_basic_user, get_auth_premium_user)\n\n\ndef test_account_saved_from_new(session):\n \"\"\"Assert that the payment is saved to the table.\"\"\"\n payment_account = factory_payment_account()\n payment_account.save()\n business_info: Dict = {\n 'businessIdentifier': payment_account.corp_number,\n 'corpType': payment_account.corp_type_code\n }\n\n pa = PaymentAccountService.find_account(business_info, get_auth_basic_user(), 'PAYBC')\n\n assert pa is not None\n assert pa.id is not None\n assert pa.corp_number is not None\n assert pa.corp_type_code is not None\n\n\ndef test_direct_pay_account_saved_from_new(session):\n \"\"\"Assert that the payment is saved to the table.\"\"\"\n payment_account = factory_payment_account(payment_method_code=PaymentMethod.DIRECT_PAY.value)\n payment_account.save()\n business_info: Dict = {\n 'businessIdentifier': payment_account.corp_number,\n 'corpType': payment_account.corp_type_code\n }\n\n pa = PaymentAccountService.find_account(business_info, get_auth_basic_user(), 'PAYBC')\n\n assert pa is not None\n assert pa.id is not None\n assert pa.corp_number is not None\n assert pa.corp_type_code is not None\n\n\ndef test_premium_account_saved_from_new(session):\n \"\"\"Assert that the payment is saved to the table.\"\"\"\n payment_account = factory_premium_payment_account()\n payment_account.save()\n\n pa = PaymentAccountService.find_account({}, get_auth_premium_user(),\n payment_system='BCOL', payment_method=PaymentMethod.DRAWDOWN.value)\n\n assert pa is not None\n assert pa.id is not None\n\n\ndef test_account_invalid_lookup(session):\n \"\"\"Invalid account test.\"\"\"\n business_info: Dict = {\n 'businessIdentifier': '1234',\n 'corpType': 'CP'\n }\n\n p = PaymentAccountService.find_account(business_info, get_auth_basic_user(), 'PAYBC')\n\n assert p is not None\n assert p.id is None\n import pytest\n from pay_api.exceptions import BusinessException\n from pay_api.utils.errors import Error\n with pytest.raises(BusinessException) as excinfo:\n PaymentAccountService.find_account({}, get_auth_basic_user(), 'PAYBC')\n assert excinfo.value.code == Error.INVALID_CORP_OR_FILING_TYPE.name\n\n\ndef test_account_invalid_premium_account_lookup(session):\n \"\"\"Invalid account test.\"\"\"\n business_info: Dict = {\n }\n\n p = PaymentAccountService.find_account(business_info, get_auth_premium_user(), 'BCOL')\n\n assert p is not None\n assert p.id is None\n import pytest\n from pay_api.exceptions import BusinessException\n from pay_api.utils.errors import Error\n with pytest.raises(BusinessException) as excinfo:\n PaymentAccountService.find_account(business_info, {}, 'BCOL')\n assert excinfo.value.code == Error.INCOMPLETE_ACCOUNT_SETUP.name\n","sub_path":"pay-api/tests/unit/services/test_payment_account.py","file_name":"test_payment_account.py","file_ext":"py","file_size_in_byte":3820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"198994363","text":"\"\"\"Shut up *print* statements in Python 2.x\"\"\"\nimport dis\nimport types\n \n \nALL_PRINT = 'PRINT_EXPR', 'PRINT_ITEM', 'PRINT_ITEM_TO', 'PRINT_NEWLINE', 'PRINT_NEWLINE_TO'\nNO_PRINTTO = 'PRINT_EXPR', 'PRINT_ITEM', 'PRINT_NEWLINE'\n \nneed_pop = 'PRINT_EXPR', 'PRINT_ITEM', 'PRINT_NEWLINE_TO'\nneed_2pop = 'PRINT_ITEM_TO',\n \n \nPOP_TOP = chr(dis.opmap['POP_TOP'])\n \n \ndef shutup(fn, print_to=1):\n code = fn.func_code\n code_ops = list(code.co_code)\n new_codestr = []\n print_ops = ALL_PRINT if print_to else NO_PRINTTO\n while code_ops:\n op = code_ops.pop(0)\n if dis.opname[ord(op)] not in print_ops:\n new_codestr.append(op)\n elif dis.opname[ord(op)] in need_pop:\n new_codestr.append(POP_TOP)\n elif dis.opname[ord(op)] in need_2pop:\n new_codestr.append(POP_TOP)\n new_codestr.append(POP_TOP)\n if op == dis.HAVE_ARGUMENT:\n new_codestr.append(code_ops.pop(0))\n new_code = types.CodeType(code.co_argcount, code.co_nlocals,\n code.co_stacksize, code.co_flags, \n ''.join(new_codestr), code.co_consts,\n code.co_names, code.co_varnames,\n code.co_filename, code.co_name,\n code.co_firstlineno, code.co_lnotab,\n code.co_freevars, code.co_cellvars)\n new_fn = types.FunctionType(new_code, fn.func_globals, fn.func_name,\n fn.func_defaults, fn.func_closure)\n if hasattr(fn, '__doc__'):\n new_fn.__doc__ = getattr(fn, '__doc__', None)\n return new_fn\n","sub_path":"shutup.py","file_name":"shutup.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"466082554","text":"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"loss\"\"\"\nimport mindspore.common.dtype as mstype\nfrom mindspore.common.tensor import Tensor\nfrom mindspore.ops import operations as P\nfrom mindspore.ops import functional as F\nfrom mindspore.nn.cell import Cell\nfrom mindspore._checkparam import Validator as validator\nfrom mindspore._checkparam import Rel\nfrom ... import context\n\n\nclass _Loss(Cell):\n \"\"\"\n Base class for other losses.\n \"\"\"\n def __init__(self, reduction='mean'):\n super(_Loss, self).__init__()\n if reduction is None:\n reduction = 'none'\n\n if reduction not in ('mean', 'sum', 'none'):\n raise ValueError(f\"reduction method for {reduction.lower()} is not supported\")\n\n self.average = True\n self.reduce = True\n if reduction == 'sum':\n self.average = False\n if reduction == 'none':\n self.reduce = False\n\n self.reduce_mean = P.ReduceMean()\n self.reduce_sum = P.ReduceSum()\n\n def get_axis(self, x):\n shape = F.shape(x)\n length = F.tuple_len(shape)\n perm = F.make_range(0, length)\n return perm\n\n def get_loss(self, x):\n if self.reduce and self.average:\n x = self.reduce_mean(x, self.get_axis(x))\n if self.reduce and not self.average:\n x = self.reduce_sum(x, self.get_axis(x))\n return x\n\n def construct(self, base, target):\n raise NotImplementedError\n\n\nclass L1Loss(_Loss):\n r\"\"\"\n L1Loss creates a criterion to measure the mean absolute error (MAE) between :math:`x` and :math:`y` by element,\n where :math:`x` is the input Tensor and :math:`y` is the target Tensor.\n\n For simplicity, let :math:`x` and :math:`y` be 1-dimensional Tensor with length :math:`N`,\n the unreduced loss (i.e. with argument reduction set to 'none') of :math:`x` and :math:`y` is given as:\n\n .. math::\n L(x, y) = \\{l_1,\\dots,l_N\\}, \\quad \\text{with } l_n = \\left| x_n - y_n \\right|\n\n When argument reduction is 'mean', the mean value of :math:`L(x, y)` will be returned.\n When argument reduction is 'sum', the sum of :math:`L(x, y)` will be returned. :math:`N` is the batch size.\n\n Args:\n reduction (str): Type of reduction to apply to loss. The optional values are \"mean\", \"sum\", \"none\".\n Default: \"mean\".\n\n Inputs:\n - **input_data** (Tensor) - Tensor of shape :math:`(x_1, x_2, ..., x_R)`.\n - **target_data** (Tensor) - Tensor of shape :math:`(y_1, y_2, ..., y_S)`.\n\n Outputs:\n Tensor, loss float tensor.\n\n Examples:\n >>> loss = nn.L1Loss()\n >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32)\n >>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32)\n >>> loss(input_data, target_data)\n \"\"\"\n def __init__(self, reduction='mean'):\n super(L1Loss, self).__init__(reduction)\n self.abs = P.Abs()\n\n def construct(self, base, target):\n x = self.abs(base - target)\n return self.get_loss(x)\n\n\nclass MSELoss(_Loss):\n r\"\"\"\n MSELoss create a criterion to measures the mean squared error (squared L2-norm) between :math:`x` and :math:`y`\n by element, where :math:`x` is the input and :math:`y` is the target.\n\n For simplicity, let :math:`x` and :math:`y` be 1-dimensional Tensor with length :math:`N`,\n the unreduced loss (i.e. with argument reduction set to 'none') of :math:`x` and :math:`y` is given as:\n\n .. math::\n L(x, y) = \\{l_1,\\dots,l_N\\}, \\quad \\text{with} \\quad l_n = (x_n - y_n)^2.\n\n When argument reduction is 'mean', the mean value of :math:`L(x, y)` will be returned.\n When argument reduction is 'sum', the sum of :math:`L(x, y)` will be returned. :math:`N` is the batch size.\n\n Args:\n reduction (str): Type of reduction to apply to loss. The optional values are \"mean\", \"sum\", \"none\".\n Default: \"mean\".\n\n Inputs:\n - **input_data** (Tensor) - Tensor of shape :math:`(x_1, x_2, ..., x_R)`.\n - **target_data** (Tensor) - Tensor of shape :math:`(y_1, y_2, ..., y_S)`.\n\n Outputs:\n Tensor, weighted loss float tensor.\n\n Examples:\n >>> loss = nn.MSELoss()\n >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32)\n >>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32)\n >>> loss(input_data, target_data)\n \"\"\"\n def construct(self, base, target):\n x = F.square(base - target)\n return self.get_loss(x)\n\n\nclass SmoothL1Loss(_Loss):\n r\"\"\"\n A loss class for learning region proposals.\n\n SmoothL1Loss can be regarded as modified version of L1Loss or a combination of L1Loss and L2Loss.\n L1Loss computes the element-wise absolute difference between two input Tensor while L2Loss computes the\n squared difference between two input Tensor. L2Loss often leads to faster convergence but it is less\n robust to outliers.\n\n Given two input :math:`x,\\ y` of length :math:`N`, the unreduced SmoothL1Loss can be described\n as follows:\n\n .. math::\n L_{i} =\n \\begin{cases}\n 0.5 (x_i - y_i)^2, & \\text{if } |x_i - y_i| < \\text{sigma}; \\\\\n |x_i - y_i| - 0.5, & \\text{otherwise. }\n \\end{cases}\n\n Here :math:`\\text{sigma}` controls the point where the loss function changes from quadratic to linear.\n Its default value is 1.0. :math:`N` is the batch size. This function returns an\n unreduced loss Tensor.\n\n Args:\n sigma (float): A parameter used to control the point where the function will change from\n quadratic to linear. Default: 1.0.\n\n Inputs:\n - **input_data** (Tensor) - Tensor of shape :math:`(x_1, x_2, ..., x_R)`.\n - **target_data** (Tensor) - Tensor of shape :math:`(y_1, y_2, ..., y_S)`.\n\n Outputs:\n Tensor, loss float tensor.\n\n Examples:\n >>> loss = nn.SmoothL1Loss()\n >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32)\n >>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32)\n >>> loss(input_data, target_data)\n \"\"\"\n def __init__(self, sigma=1.0):\n super(SmoothL1Loss, self).__init__()\n self.sigma = sigma\n self.smooth_l1_loss = P.SmoothL1Loss(self.sigma)\n\n def construct(self, base, target):\n return self.smooth_l1_loss(base, target)\n\n\nclass SoftmaxCrossEntropyWithLogits(_Loss):\n r\"\"\"\n Computes softmax cross entropy between logits and labels.\n\n Measures the distribution error between the probabilities of the input (computed with softmax function) and the\n target where the classes are mutually exclusive (only one class is positive) using cross entropy loss.\n\n Typical input into this function is unnormalized scores and target of each class.\n Scores Tensor :math:`x` is of shape :math:`(N, C)` and target Tensor :math:`t` is a\n Tensor of shape :math:`(N, C)` which contains one-hot labels of length :math:`C`.\n\n For each instance :math:`N_i`, the loss is given as:\n\n .. math::\n \\ell(x_i, t_i) = - \\log\\left(\\frac{\\exp(x_{t_i})}{\\sum_j \\exp(x_j)}\\right)\n = -x_{t_i} + \\log\\left(\\sum_j \\exp(x_i)\\right),\n where :math:`x_i` is a 1D score Tensor, :math:`t_i` is a scalar.\n\n Note:\n While the target classes are mutually exclusive, i.e., only one class is positive in the target, the predicted\n probabilities need not be exclusive. All that is required is that the predicted probability distribution\n of entry is a valid one.\n\n Args:\n is_grad (bool): Specifies whether calculate grad only. Default: True.\n sparse (bool): Specifies whether labels use sparse format or not. Default: False.\n reduction (Union[str, None]): Type of reduction to apply to loss. Support 'sum' or 'mean' If None,\n do not reduction. Default: None.\n smooth_factor (float): Label smoothing factor. It is a optional input. Default: 0.\n num_classes (int): The number of classes in the task. It is a optional input Default: 2.\n\n Inputs:\n - **logits** (Tensor) - Tensor of shape (N, C).\n - **labels** (Tensor) - Tensor of shape (N, ). If `sparse` is True, The type of\n `labels` is mindspore.int32. If `sparse` is False, the type of `labels` is same as the type of `logits`.\n\n Outputs:\n Tensor, a tensor of the same shape as logits with the component-wise\n logistic losses.\n\n Examples:\n >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)\n >>> logits = Tensor(np.random.randint(0, 9, [1, 10]), mindspore.float32)\n >>> labels_np = np.ones([1,]).astype(np.int32)\n >>> labels = Tensor(labels_np)\n >>> loss(logits, labels)\n \"\"\"\n def __init__(self,\n is_grad=True,\n sparse=False,\n reduction=None,\n smooth_factor=0,\n num_classes=2):\n super(SoftmaxCrossEntropyWithLogits, self).__init__(reduction)\n self.is_grad = is_grad\n self.sparse = sparse\n validator.check_integer(\"num_classes\", num_classes, 1, Rel.GT, self.cls_name)\n validator.check_number_range(\"smooth_factor\", smooth_factor, 0, 1, Rel.INC_BOTH, self.cls_name)\n self.smooth_factor = smooth_factor\n self.num_classes = num_classes\n self.softmax_cross_entropy = P.SoftmaxCrossEntropyWithLogits()\n self.one_hot = P.OneHot()\n self.on_value = Tensor(1.0 - self.smooth_factor, mstype.float32)\n self.off_value = Tensor(1.0 * self.smooth_factor / (self.num_classes - 1), mstype.float32)\n self.is_cpugpu = context.get_context('device_target') in [\"CPU\", \"GPU\"]\n\n if self.is_cpugpu:\n self.sparse_softmax_cross_entropy = P.SparseSoftmaxCrossEntropyWithLogits(is_grad=self.is_grad)\n\n def construct(self, logits, labels):\n if self.is_cpugpu and self.sparse:\n x = self.sparse_softmax_cross_entropy(logits, labels)\n return x\n\n if self.sparse:\n labels = self.one_hot(labels, F.shape(logits)[-1], self.on_value, self.off_value)\n x = self.softmax_cross_entropy(logits, labels)[0]\n return self.get_loss(x)\n\n\nclass SoftmaxCrossEntropyExpand(Cell):\n r\"\"\"\n Computes softmax cross entropy between logits and labels. Implemented by expanded formula.\n\n This is a wrapper of several functions.\n\n .. math::\n \\ell(x_i, t_i) = -log\\left(\\frac{\\exp(x_{t_i})}{\\sum_j \\exp(x_j)}\\right),\n where :math:`x_i` is a 1D score Tensor, :math:`t_i` is the target class.\n\n Note:\n When argument sparse is set to True, the format of label is the index\n range from :math:`0` to :math:`C - 1` instead of one-hot vectors.\n\n Args:\n sparse(bool): Specifies whether labels use sparse format or not. Default: False.\n\n Inputs:\n - **input_data** (Tensor) - Tensor of shape :math:`(x_1, x_2, ..., x_R)`.\n - **label** (Tensor) - Tensor of shape :math:`(y_1, y_2, ..., y_S)`.\n\n Outputs:\n Tensor, a scalar tensor including the mean loss.\n\n Examples:\n >>> loss = nn.SoftmaxCrossEntropyExpand(sparse=True)\n >>> input_data = Tensor(np.ones([64, 512]), dtype=mindspore.float32)\n >>> label = Tensor(np.ones([64]), dtype=mindspore.int32)\n >>> loss(input_data, label)\n \"\"\"\n def __init__(self, sparse=False):\n super(SoftmaxCrossEntropyExpand, self).__init__()\n self.exp = P.Exp()\n self.reduce_sum = P.ReduceSum(keep_dims=True)\n self.onehot = P.OneHot()\n self.on_value = Tensor(1.0, mstype.float32)\n self.off_value = Tensor(0.0, mstype.float32)\n self.div = P.Div()\n self.log = P.Log()\n self.sum_cross_entropy = P.ReduceSum(keep_dims=False)\n self.mul = P.Mul()\n self.mul2 = P.Mul()\n self.cast = P.Cast()\n self.reduce_mean = P.ReduceMean(keep_dims=False)\n self.sparse = sparse\n self.reduce_max = P.ReduceMax(keep_dims=True)\n self.sub = P.Sub()\n\n def construct(self, logit, label):\n logit_max = self.reduce_max(logit, -1)\n exp = self.exp(self.sub(logit, logit_max))\n exp_sum = self.reduce_sum(exp, -1)\n softmax_result = self.div(exp, exp_sum)\n if self.sparse:\n label = self.onehot(label, F.shape(logit)[1], self.on_value, self.off_value)\n\n softmax_result_log = self.log(softmax_result)\n loss = self.sum_cross_entropy((self.mul(softmax_result_log, label)), -1)\n loss = self.mul2(F.scalar_to_array(-1.0), loss)\n loss = self.reduce_mean(loss, -1)\n\n return loss\n","sub_path":"mindspore/nn/loss/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":13192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"230461691","text":"####set up####\n##load libraries\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n##read in data\ndat = pd.read_csv(\"CVOE data 8_8.csv\")\n\n#make the 95% confidence intervals\ndat['diff'] = dat['Upper'].sub(dat['Lower']) #get the length of the bars\ndat['diff2'] = dat['diff'].div(2) #length from end of bar to cap\n\n##split the data into RT and Error groups\nerror_dat = dat[ dat['TYPE'] == \"ERROR\"]\n\nerror_dat['Average'] = error_dat['Average'].multiply(100) #Disregard the warnings here\nerror_dat['diff2'] = error_dat['diff2'].multiply(100)\n\n##split into groups bases on condition\nerror_dat_ya = error_dat[ error_dat[\"Conditon\"] == \"Younger\"]\nerror_dat_healthy = error_dat[ error_dat[\"Conditon\"] == \"Healthy\"]\nerror_dat_mci = error_dat[ error_dat[\"Conditon\"] == \"MCI\"]\n\n##now get only the variables needed\nerror_dat_ya2 = error_dat_ya[ error_dat_ya[\"VAR\"].isin([\"PURE\", \"ALT_SWITCH\", \"ALT_NS\", \"RAND_SWITCH\", \"RAND_NS\"])] \nerror_dat_healthy2 = error_dat_healthy[ error_dat_healthy[\"VAR\"].isin([\"PURE\", \"ALT_SWITCH\", \"ALT_NS\", \"RAND_SWITCH\", \"RAND_NS\"])] \nerror_dat_mci2 = error_dat_mci[ error_dat_mci[\"VAR\"].isin([\"PURE\", \"ALT_SWITCH\", \"ALT_NS\", \"RAND_SWITCH\", \"RAND_NS\"])] \n\n##now get averages and conf intervals\n##averages\nya_average = error_dat_ya2[\"Average\"]\nya_average2 = ya_average.tolist() #convert to list\n\nhealthy_average = error_dat_healthy2[\"Average\"]\nhealthy_average2 = healthy_average.tolist()\n\nmci_average = error_dat_mci2[\"Average\"]\nmci_average2 = mci_average.tolist()\n\n##conf intervals\nya_conf = error_dat_ya2[\"diff2\"]\nya_conf2 = ya_conf.tolist() #convert to list\n\nhealthy_conf = error_dat_healthy2[\"diff2\"]\nhealthy_conf2 = healthy_conf.tolist()\n\nmci_conf = error_dat_mci2[\"diff2\"]\nmci_conf2 = mci_conf.tolist()\n\n##set up the plot\nerror_fig = plt.figure()\nerror_fig.set_size_inches(10,15)\n\n####First, lets plot errors for pure, nonswitch, and switch trials\nbars1 = ya_average2\nbars2 = healthy_average2\nbars3 = mci_average2\n\n##set bar width\nbarwidth = 0.20 ##ax1\nbarwidth2 = 0.25 ##ax2\n\n#set bar position\nr1 = np.arange(len(bars1))\nr2 = [x + barwidth for x in r1]\nr3 = [x + barwidth for x in r2]\n\n##make the sub plots\n##ax1 will be pure vs ns vs s\n##ax2 will be local vs global\nax1 = error_fig.add_subplot(2, 1, 1)\nax2 = error_fig.add_subplot(2, 1, 2)\n\n##make the plot\nrects1 = ax1.bar(r1, bars1, width = barwidth, yerr = ya_conf2, capsize = 3, color = 'w', edgecolor = 'k',\n label ='Younger Adults')\n\nrects2 = ax1.bar(r2, bars2, width = barwidth, yerr = healthy_conf2, capsize = 3, color = 'silver', edgecolor = 'k',\n label = 'Healthy Older')\n\nrects3 = ax1.bar(r3, bars3, width = barwidth, yerr = mci_conf2, capsize = 3, color = 'dimgray', edgecolor = 'k',\n label = 'MCI Older')\n\n##Add labels, legend, and set tick marks\nax1.set_title('Mean Error Rates: Pure, Switch, and Non-Switch Trials', fontsize = 18)\nax1.set_ylabel('Mean % Error', fontsize = 16)\nax1.set_xlabel('Trial Type', fontsize = 16)\nax1.xaxis.labelpad = 7.5\nax1.set_xticks(r2)\nax1.tick_params(axis='x', which = 'major', pad = 2.5) #controls how far labels are from axis\nax1.set_xticklabels(('Pure', 'Nonswitch Alt Run', 'Nonswitch Rand', 'Switch Alt Run', 'Switch Rand'), fontsize = 10)\nbox = ax1.get_position()\nax1.set_position([box.x0, box.y0, box.width * 0.8, box.height])\nax1.legend(bbox_to_anchor=(1.04,0.5), loc=\"center left\", borderaxespad = 0, fontsize = 14)\nax1.set_ylim([0,25])\n\n####Now make the graph for local vs global costs####\n##get only the variables that are needed\nerror_dat_ya3 = error_dat_ya[ error_dat_ya[\"VAR\"].isin([\"ALT_GLOBAL\", \"RAND_GLOBAL\", \"ALT_LOCAL\", \"RAND_LOCAL\"])] \nerror_dat_healthy3 = error_dat_healthy[ error_dat_healthy[\"VAR\"].isin([\"ALT_GLOBAL\", \"RAND_GLOBAL\", \"ALT_LOCAL\", \"RAND_LOCAL\"])] \nerror_dat_mci3 = error_dat_mci[ error_dat_mci[\"VAR\"].isin([\"ALT_GLOBAL\", \"RAND_GLOBAL\", \"ALT_LOCAL\", \"RAND_LOCAL\"])]\n\n##Now get averages and conf intervals\n##averages\nya_average3 = error_dat_ya3[\"Average\"]\nya_average4 = ya_average3.tolist() #convert to list\n\nhealthy_average3 = error_dat_healthy3[\"Average\"]\nhealthy_average4 = healthy_average3.tolist()\n\nmci_average3 = error_dat_mci3[\"Average\"]\nmci_average4 = mci_average3.tolist()\n\n##get conf intervals\nya_conf3 = error_dat_ya3[\"diff2\"]\nya_conf4 = ya_conf3.tolist() #convert to list\n\nhealthy_conf3 = error_dat_healthy3[\"diff2\"]\nhealthy_conf4 = healthy_conf3.tolist()\n\nmci_conf3 = error_dat_mci3[\"diff2\"]\nmci_conf4 = mci_conf3.tolist()\n\n##make the bars\nbars4 = ya_average4\nbars5 = healthy_average4\nbars6 = mci_average4\n\n#set bar position\nr4 = np.arange(len(bars4)) + .5\nr5 = [x + barwidth2 for x in r4]\nr6 = [x + barwidth2 for x in r5]\n\n##make the plot\nrects4 = ax2.bar(r4, bars4, width = barwidth2, yerr = ya_conf4, capsize = 3, color = 'w', edgecolor = 'k',\n label ='Younger Adults')\n\nrects5 = ax2.bar(r5, bars5, width = barwidth2, yerr = healthy_conf4, capsize = 3, color = 'silver', edgecolor = 'k',\n label = 'Healthy Older')\n\nrects6 = ax2.bar(r6, bars6, width = barwidth2, yerr = mci_conf4, capsize = 3, color = 'dimgray', edgecolor = 'k',\n label = 'MCI Older')\n\n##Add labels, legend, and set tick marks\nax2.set_title('Mean Error Rates: Local and Global Switch Costs', fontsize = 18)\nax2.set_ylabel('Mean % Error', fontsize = 16)\nax2.set_xlabel('Cost Type', fontsize = 16)\nax2.xaxis.labelpad = 7.5\nax2.set_xticks(r5)\nax2.tick_params(axis='x', which = 'major', pad = 2.5) #controls how far labels are from axis\nax2.set_xticklabels(('Global Alt Run', 'Global Rand', 'Local Alt Run', 'Local Rand'), fontsize = 10)\nbox = ax2.get_position()\nax2.set_position([box.x0, box.y0, box.width * 0.8, box.height])\nax2.legend(bbox_to_anchor = (1.04,0.5), loc=\"center left\", borderaxespad = 0, fontsize = 14)\nax2.set_ylim([-2, 15])\nplt.axhline(y = 0, color='k', linestyle='-')\n\n##save figure\nerror_fig.savefig('CVOE_mean_errors.pdf', dip = 10000)\n","sub_path":"CVOE/3 Presentations/CVOE bar charts Fixed.py","file_name":"CVOE bar charts Fixed.py","file_ext":"py","file_size_in_byte":5901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"642573361","text":"from cister.db.models.cister import Base\nfrom cister.db.models.cister import Fleet\nfrom cister.db.models.cister import Location\nfrom cister.db.models.cister import DBSession\nfrom datetime import datetime\nfrom cister.db.views import BaseCisterView\nfrom cister.db.helpers import createBlobDetails\n\n\nclass AstroView(BaseCisterView):\n\n def __call__(self):\n dbsession = DBSession()\n location = \"A\"\n location = \"%s%s\" % (location, self.request.matchdict.get(\"galaxy\"))\n location = \"%s:%s\" % (location, self.request.matchdict.get(\"region\"))\n location = \"%s:%s\" % (location, self.request.matchdict.get(\"system\"))\n location = \"%s:%s\" % (location, self.request.matchdict.get(\"astro\"))\n\n astro = dbsession.query(Location).filter(Location.location==location).one()\n base = astro.base\n fleets = dbsession.query(Fleet).filter(Fleet.location==location)\n fleets = fleets.order_by(\"arrival-(unix_timestamp(now())-unix_timestamp(fleet.timestamp)) > 0 ASC, arrival ASC, size DESC\")\n fleets_count = fleets.count()\n if fleets_count == 0:\n fleets = []\n\n\n blobdetails = createBlobDetails(location, fleets)\n blobdetails = blobdetails.values()\n def blobsize_compare(x, y):\n xsum = x['sum']\n ysum = y['sum']\n return int(ysum - xsum)\n blobdetails = sorted(blobdetails, cmp=blobsize_compare)\n\n returnvalue = { 'location':location,\n 'astrolocation':astro,\n 'base':base,\n 'fleets':fleets,\n 'fleets_count':fleets_count,\n 'datetime':datetime,\n 'blobdetails':blobdetails\n }\n returnvalue.update(self.request.matchdict)\n return returnvalue\n\n","sub_path":"cister/db/views/map/astro.py","file_name":"astro.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"653069687","text":"\nimport os\nimport shutil\n\ndef main():\n ANDROID_HOME = os.environ['ANDROID_HOME']\n #JAVA_HOME = os.environ['JAVA_HOME']\n NDK_ROOT = os.environ['NDK_ROOT']\n \n #archs = [ \"armeabi\", \"armeabi-v7a\", \"arm64-v8a\", \"x86\", \"x86_64\" ]\n #archs = [ \"armeabi-v7a\", \"arm64-v8a\", \"x86\", \"x86_64\" ]\n #archs = [ \"arm64-v8a\", \"x86\" ]\n archs = [ \"arm64-v8a\" ]\n #archs = [ \"x86_64\" ]\n \n base_cmake_cmd = '\"' + ANDROID_HOME + '\\\\cmake\\\\3.6.4111459\\\\bin\\\\cmake\"' + ' -G\"Android Gradle - Ninja\" .. -DANDROID_NDK=\"' + NDK_ROOT + '\" -DCMAKE_BUILD_TYPE=Release -DCMAKE_MAKE_PROGRAM=\"' + ANDROID_HOME + '\\\\cmake\\\\3.6.4111459\\\\bin\\\\ninja\" -DCMAKE_TOOLCHAIN_FILE=\"' + NDK_ROOT + '\\\\build\\\\cmake\\\\android.toolchain.cmake\" -DANDROID_NATIVE_API_LEVEL=9 -DANDROID_PLATFORM=android-24 -DANDROID_STL=c++_static -DANDROID_CPP_FEATURES=\"rtti exceptions\" -DANDROID_TOOLCHAIN=clang '\n \n base_build_cmd = '\"' + ANDROID_HOME + '\\\\cmake\\\\3.6.4111459\\\\bin\\\\ninja\" '\n \n # -DANDROID_ABI=armeabi-v7a\n \n # prepare assets\n os.system('DummyApp --prepare_assets android --norun')\n \n # build native part\n for arch in archs:\n if not os.path.exists(\"build-android-\" + arch):\n os.makedirs(\"build-android-\" + arch)\n os.chdir(\"build-android-\" + arch)\n os.system('\"' + base_cmake_cmd + '-DANDROID_ABI=' + arch + (' -DANDROID_ARM_NEON=TRUE' if arch == 'armeabi-v7a' else '') + '\"')\n os.chdir('..')\n \n os.chdir(\"build-android-\" + arch)\n ret = os.system('\"' + base_build_cmd + 'DummyApp' + '\"')\n \n if ret != 0:\n return\n \n os.chdir('..')\n \n if not os.path.exists(\"android\\\\lib\\\\\" + arch + \"\\\\\"):\n os.makedirs(\"android\\\\lib\\\\\" + arch + \"\\\\\")\n \n shutil.copy2(\"build-android-\" + arch + \"\\\\src\\\\DummyApp\\\\libDummyApp.so\", \"android\\\\lib\\\\\" + arch + \"\\\\\")\n \n # build java part\n os.system(\"android\\\\build.bat\")\n shutil.copy2(\"android\\\\bin\\\\DummyApp.apk\", \"DummyApp.apk\")\n \nmain()","sub_path":"build_android.py","file_name":"build_android.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"436398886","text":"import numpy as np\n\nfrom matplotlib import cm\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits import mplot3d\nimport matplotlib.animation as animation\n\n\nclass animator:\n def __init__(self, X, Y, Z, sequence,\n fig=0, name=None, interval=1):\n self.fig = plt.figure(fig)\n if name != None:\n self.fig.canvas.set_window_title(name)\n \n ax = self.fig.gca(projection='3d')\n ax.plot_surface(X, Y, Z, rstride=1, cstride=1,\n cmap='viridis', edgecolor='none', alpha=0.75)\n\n self.top = np.max(Z) + np.abs(np.min(Z))\n self.bottom = np.min(Z) - np.abs(np.max(Z))\n cset = ax.contourf(X,Y,Z,zdir='z',offset=self.bottom,cmap=cm.coolwarm)\n ax.set_zlim(self.bottom, self.top)\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n if name != None:\n ax.title.set_text(name)\n \n self.origin = ax.scatter3D([],[],[],color='red',marker='*')\n self.projected = ax.scatter3D([],[],[],color='black')\n\n self.X = X\n self.Y = Y\n self.Z = Z\n self.interval = interval\n self.sequence = sequence\n\n\n def animate(self,i):\n x,y = self.sequence[i]\n self.origin._offsets3d = (self.X[x,y], self.Y[x,y], self.Z[x,y])\n self.projected._offsets3d = (self.X[x,y], self.Y[x,y], self.bottom)\n\n def render(self):\n self.ani = animation.FuncAnimation(self.fig, self.animate,\n frames=np.arange(len(self.sequence)), interval=self.interval, blit=False, repeat=False)\n plt.show()\n","sub_path":"animation.py","file_name":"animation.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"66259974","text":"import logging\n\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import QSystemTrayIcon, QMenu\n\nfrom gui.mainwindow import MainWindow\nfrom storage.settings_manager import SettingsManager, Settings\n\nlog = logging.getLogger(__file__)\n\n\nclass TrayIcon(QSystemTrayIcon):\n\n def __init__(self, icon: QIcon, parent: MainWindow):\n super().__init__(icon, parent)\n\n # setup UI\n self.setVisible(SettingsManager.get(Settings.TRAY_SHOW_ALWAYS))\n\n # variables\n # retrieve vars from class to print more informative log messages\n self._reasonNames = {value: name for name, value in vars(QSystemTrayIcon).items() if str(value).isnumeric()}\n self._mainWindow = parent\n self._trayMenu = self._createTrayMenu()\n self.setContextMenu(self._trayMenu)\n\n # signal and slots\n self.activated.connect(self._onMouseClicked)\n self._mainWindow.closed.connect(self.hide)\n self._mainWindow.trayed.connect(self.show)\n # self._mainWindow.raised.connect(self._onMainWindowRaised)\n\n def _createTrayMenu(self) -> QMenu:\n \"\"\"\n Create tray icon menu\n :return: menu\n \"\"\"\n menu = QMenu(self._mainWindow)\n menu.addAction(self._mainWindow.ui.actionExit)\n return menu\n\n @pyqtSlot()\n def _onMainWindowRaised(self):\n if not SettingsManager.get(Settings.TRAY_SHOW_ALWAYS):\n self.hide()\n\n @pyqtSlot(QSystemTrayIcon.ActivationReason)\n def _onMouseClicked(self, reason: QSystemTrayIcon.ActivationReason):\n \"\"\"\n Process mouse click events\n :param reason: click reason\n :return: None\n \"\"\"\n\n if reason == QSystemTrayIcon.Trigger:\n self._mainWindow.toggleWindow()\n elif reason == QSystemTrayIcon.MiddleClick:\n self._mainWindow.exit()\n else:\n log.warning(f\"Unknown QSystemTrayIcon mouse reason: {self._reasonNames[reason]}\")\n","sub_path":"vid2audio/gui/trayicon.py","file_name":"trayicon.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"458508167","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 30 10:52:17 2017\r\n\r\n@author: ITA\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nThis script determines the element that has the maximum value of Sigmaxx after\r\nthe printing has finished.\r\n\r\nV4: READS ALSO STRESSES AT TWO FRAMES OF THE LAST STEP (INITIAL AND FINAL)\r\n\r\nV5: FLAG TO CHOOSE IF BOTH FRAMES ARE SAVED\r\n\"\"\"\r\n#from part import *\r\n#from material import *\r\n#from section import *\r\n#from assembly import *\r\n#from step import *\r\n#from interaction import *\r\n#from load import *\r\n#from mesh import *\r\n#from optimization import *\r\n#from job import *\r\n#from sketch import *\r\n#from visualization import *\r\n#from connectorBehavior import *\r\n\r\n#import math\r\n#import numpy as np\r\nfrom odbAccess import *\r\nfrom abaqusConstants import *\r\n\r\nimport odbAccess\r\n\r\nfrom multiprocessing import Pool\r\nfrom functools import partial\r\n\r\nimport math\r\nimport numpy as np\r\n\r\nimport os, os.path\r\n\r\nclass SHAPE_FUNCTIONS:\r\n def __init__(self, DIM,NNODES):\r\n self.DIMENSION = DIM\r\n self.number_nodes = NNODES\r\n \r\n def natural_nodes(self, vecX, vecY, vecZ):\r\n self.X = vecX\r\n self.Y = vecY\r\n self.Z = vecZ\r\n \r\n def nodal_temperature(self,vec_TEMP):\r\n self.TEMP_NODAL = vec_TEMP\r\n \r\n def brick_element(self,r,s,t):\r\n oneO8 = 1.0/8.0\r\n one = 1.0\r\n \r\n N1 = oneO8 *(1-r)*(1-s)*(1-t)\r\n \r\n N2 = oneO8 *(1+r)*(1-s)*(1-t)\r\n \r\n N3 = oneO8 *(1+r)*(1+s)*(1-t)\r\n \r\n N4 = oneO8 *(1-r)*(1+s)*(1-t)\r\n \r\n N5 = oneO8 *(1-r)*(1-s)*(1+t)\r\n \r\n N6 = oneO8 *(1+r)*(1-s)*(1+t)\r\n \r\n N7 = oneO8 *(1+r)*(1+s)*(1+t)\r\n \r\n N8 = oneO8 *(1-r)*(1+s)*(1+t)\r\n \r\n vec_shape = np.zeros((1,8)) \r\n vec_shape[0] = [N1,N2,N3,N4,N5,N6,N7,N8]\r\n \r\n dN1dr = oneO8 *(-one)*(1-s)*(1-t)\r\n dN1ds = oneO8 *(1-r)*(-one)*(1-t)\r\n dN1dt = oneO8 *(1-r)*(1-s)*(-one)\r\n \r\n dN2dr = oneO8 *(one)*(1-s)*(1-t)\r\n dN2ds = oneO8 *(1+r)*(-one)*(1-t)\r\n dN2dt = oneO8 *(1+r)*(1-s)*(-one)\r\n \r\n dN3dr = oneO8 *(one)*(1+s)*(1-t)\r\n dN3ds = oneO8 *(1+r)*(one)*(1-t)\r\n dN3dt = oneO8 *(1+r)*(1+s)*(-one)\r\n \r\n dN4dr = oneO8 *(-one)*(1+s)*(1-t)\r\n dN4ds = oneO8 *(1-r)*(one)*(1-t)\r\n dN4dt = oneO8 *(1-r)*(1+s)*(-one)\r\n \r\n dN5dr = oneO8 *(-one)*(1-s)*(1+t)\r\n dN5ds = oneO8 *(1-r)*(-one)*(1+t)\r\n dN5dt = oneO8 *(1-r)*(1-s)*(one)\r\n \r\n dN6dr = oneO8 *(one)*(1-s)*(1+t)\r\n dN6ds = oneO8 *(1+r)*(-one)*(1+t)\r\n dN6dt = oneO8 *(1+r)*(1-s)*(one)\r\n \r\n dN7dr = oneO8 *(one)*(1+s)*(1+t)\r\n dN7ds = oneO8 *(1+r)*(one)*(1+t)\r\n dN7dt = oneO8 *(1+r)*(1+s)*(one)\r\n \r\n dN8dr = oneO8 *(-one)*(1+s)*(1+t)\r\n dN8ds = oneO8 *(1-r)*(one)*(1+t)\r\n dN8dt = oneO8 *(1-r)*(1+s)*(one)\r\n \r\n MAT_diff = np.zeros((8,3))\r\n \r\n MAT_diff[0] = [ dN1dr , dN1ds , dN1dt ]\r\n MAT_diff[1] = [ dN2dr , dN2ds , dN2dt ]\r\n MAT_diff[2] = [ dN3dr , dN3ds , dN3dt ]\r\n MAT_diff[3] = [ dN4dr , dN4ds , dN4dt ]\r\n MAT_diff[4] = [ dN5dr , dN5ds , dN5dt ]\r\n MAT_diff[5] = [ dN6dr , dN6ds , dN6dt ]\r\n MAT_diff[6] = [ dN7dr , dN7ds , dN7dt ]\r\n MAT_diff[7] = [ dN8dr , dN8ds , dN8dt ]\r\n\r\n self.shapefun = vec_shape\r\n self.shape_diff = MAT_diff\r\n \r\n def JACOBIAN(self):\r\n# NODE_COORD.X = x coordinates \r\n if self.DIMENSION == 3:\r\n JACOBIAN = np.zeros((3,3))\r\n for i in range(0,3):\r\n somax = 0.0\r\n somay = 0.0\r\n somaz = 0.0\r\n for k in range(0,self.number_nodes):\r\n aux = self.X[k]*self.shape_diff[k][i]\r\n somax = somax + aux\r\n \r\n aux = self.Y[k]*self.shape_diff[k][i]\r\n somay = somay + aux\r\n \r\n aux = self.Z[k]*self.shape_diff[k][i]\r\n somaz = somaz + aux\r\n \r\n JACOBIAN[i,0] = somax\r\n JACOBIAN[i,1] = somay\r\n JACOBIAN[i,2] = somaz\r\n \r\n self.detJACOBIAN = np.linalg.det(JACOBIAN)\r\n \r\n \r\n def TEMPERATURE_RST(self):\r\n TEMP_RST = 0.0\r\n for i in range(0,len(self.TEMP_NODAL)):\r\n TEMP_RST = TEMP_RST + self.TEMP_NODAL[i]*self.shapefun[0][i]\r\n self.TEMP_RST = TEMP_RST\r\n \r\n def FUNCTION_INTEGRATED_RST(self,r,s,t):\r\n \r\n self.brick_element(r,s,t) #changes shapefun and diff shapefun\r\n \r\n self.JACOBIAN() #calculates Jacobian at new r,s,t\r\n \r\n self.TEMPERATURE_RST()\r\n \r\n f = self.TEMP_RST*abs(self.detJACOBIAN)\r\n \r\n return f\r\n#------------------------------------------------------------------------------\r\nclass GAUSS_POINTS:\r\n def __init__(self, npoints):\r\n self.points = npoints\r\n \r\n #W = weight\r\n #P = point\r\n if npoints == 2:\r\n self.W = [1.0,1.0]\r\n \r\n root3 = math.sqrt(3)\r\n self.P = [-1/root3, 1/root3]\r\n \r\n elif npoints == 1:\r\n self.W = [2.0]\r\n self.P = [0.0]\r\n \r\n elif npoints == 3:\r\n root3 = math.sqrt(3)\r\n root5 = math.sqrt(5)\r\n \r\n fiveOnine = 5./9.\r\n \r\n self.W = [fiveOnine, 8./9., fiveOnine]\r\n self.P = [-root3/root5,0.0, root3/root5]\r\n \r\n#==============================================================================\r\n# QUAD GAUSS 2D OR 3D\r\n#==============================================================================\r\ndef gauss_quad(DIM,func_name):\r\n Nx = 3\r\n Ny = 3\r\n Nz = 3\r\n \r\n X = GAUSS_POINTS(Nx)\r\n Y = GAUSS_POINTS(Ny)\r\n Z = GAUSS_POINTS(Nz)\r\n \r\n sum_gauss = 0.0\r\n \r\n if DIM == 2:\r\n for j in range(0,Ny):\r\n \r\n p_x2 = Y.P[j]\r\n w_x2 = Y.W[j]\r\n \r\n for i in range(0,Nx):\r\n p_x1 = X.P[i]\r\n w_x1 = X.W[i]\r\n \r\n point = (p_x1,p_x2)\r\n \r\n f = func_name(point)\r\n \r\n sum_gauss = sum_gauss + w_x1*f\r\n \r\n sum_gauss = sum_gauss * w_x2\r\n \r\n elif DIM == 3:\r\n for k in range(0,Nz):\r\n p_x3 = Z.P[k]\r\n w_x3 = Z.W[k]\r\n \r\n aux_gauss_j = 0.0\r\n for j in range(0,Ny):\r\n p_x2 = Y.P[j]\r\n w_x2 = Y.W[j]\r\n \r\n aux_gauss_i = 0.0\r\n for i in range(0,Nx):\r\n p_x1 = X.P[i]\r\n w_x1 = X.W[i]\r\n \r\n point = (p_x1,p_x2,p_x3)\r\n \r\n f = func_name(p_x1,p_x2,p_x3)\r\n \r\n aux_gauss_i = aux_gauss_i + w_x1*f\r\n \r\n aux_gauss_j = aux_gauss_j + aux_gauss_i * w_x2\r\n sum_gauss = sum_gauss + aux_gauss_j* w_x3\r\n \r\n \r\n return sum_gauss\r\n \r\n#------------------------------------------------------------------------------ \r\n#==============================================================================\r\n# WRITING VECTORS OF NODAL X, Y AND Z\r\n#==============================================================================\r\ndef rewrite_vec_pos(newnodes,ELE_TYPE):\r\n vecX = list()\r\n vecY = list()\r\n vecZ = list()\r\n for i in range(0,ELE_TYPE):\r\n node_i = newnodes[i]\r\n \r\n vecX.append(node_i[0])\r\n vecY.append(node_i[1])\r\n vecZ.append(node_i[2])\r\n \r\n return vecX,vecY,vecZ\r\n \r\n#==============================================================================\r\n# CALCULATE DISTANCE BETWEEN 2 POINTS\r\n#==============================================================================\r\ndef distance(coord1,coord2):\r\n aux = 0.0\r\n for i in range(0,len(coord1)):\r\n aux_i = (coord1[i] - coord2[i])**2\r\n aux = aux + aux_i\r\n \r\n dist = math.sqrt(aux)\r\n return dist\r\n \r\n#==============================================================================\r\n# CALCULATING NEW COORDINATES FOR EACH ELEMENT\r\n#==============================================================================\r\ndef ELEMENT_DATA(Instance,R_table,table_nodes,table_CONEC,ele_number,ELE_TYPE):\r\n #Instance = number of the instance\r\n\r\n translate = R_table[Instance - 1] #translation vector\r\n\r\n NODES = table_CONEC[ele_number - 1] #node number of the current element\r\n \r\n new_coordinates = list() #coordinates of each node of the current element\r\n \r\n for i in range(0,ELE_TYPE):\r\n \r\n node_i = int(NODES[i])\r\n \r\n coord_i = table_nodes[node_i - 1] #coordinate of node _i\r\n \r\n aux_coord = list()\r\n for j in range(0,3):\r\n aux = float(coord_i[j]) + translate[j]\r\n \r\n aux_coord.append(aux)\r\n \r\n new_coordinates.append(aux_coord)\r\n \r\n lx = distance(new_coordinates[1],new_coordinates[2])\r\n ly = distance(new_coordinates[1],new_coordinates[0])\r\n lz = distance(new_coordinates[0],new_coordinates[4])\r\n \r\n Vol = lx * ly * lz #element volume\r\n \r\n return new_coordinates, Vol\r\n \r\n#==============================================================================\r\n# READING INP FILE\r\n#==============================================================================\r\ndef read_inp(name_file_inp): \r\n file_read = open(name_file_inp, 'r');#READS THE INP FILE\r\n \r\n ELE_TYPE = 8 #NUMBER OF NODES PER ELEMENT\r\n #JUMPING LINES\r\n for i in range(0,9):\r\n file_read.readline()\r\n \r\n #READING NODES -----------------------------------------------------------\r\n aux_stop = \"t\";\r\n \r\n table_nodes = list()\r\n while aux_stop != \"*\":\r\n node_aux = file_read.readline()\r\n aux_stop = node_aux[0]\r\n node_aux = node_aux.replace(\",\", \"\")\r\n node_aux = node_aux.split();\r\n table_nodes.append(node_aux[1:4])\r\n \r\n del table_nodes[-1] #DELETE LAST ELEMENT\r\n NUMBER_NODES = len(table_nodes)\r\n \r\n #------------------------------------------------------------------------------\r\n #READING ELEMENTS--------------------------------------------------------------\r\n aux_stop = \"t\";\r\n \r\n table_CONEC = list()\r\n while aux_stop != \"*\":\r\n ele_aux = file_read.readline()\r\n aux_stop = ele_aux[0]\r\n ele_aux = ele_aux.replace(\",\", \"\")\r\n ele_aux = ele_aux.split();\r\n while len(ele_aux) < (ELE_TYPE + 1):\r\n ele_aux2 = file_read.readline()\r\n ele_aux2 = ele_aux2.replace(\",\", \"\")\r\n ele_aux2 = ele_aux2.split();\r\n ele_aux = ele_aux + ele_aux2\r\n \r\n table_CONEC.append(ele_aux[1:ELE_TYPE+1])\r\n \r\n \r\n del table_CONEC[-1] #DELETE LAST ELEMENT\r\n NUMBER_ELE = len(table_CONEC) #ELEMENTS PER INSTANCE\r\n \r\n #------------------------------------------------------------------------------\r\n #READING THE VECTOR R OF ASSEMBLY\r\n aux_stop = \"t\";\r\n \r\n while aux_stop != \"*Instance, name=I-1\":\r\n aux_stop = file_read.readline()\r\n aux_stop = aux_stop[0:19]\r\n \r\n aux_stop = \"*I\"\r\n R_table = list()\r\n LAYER_INSTANCE = list() #LIST THAT SAYS WHICH LAYER EACH INSTANCE IS AT\r\n LAYER_INSTANCE.append(1) #FIRST INSTANCE = LAYER 1\r\n aux_instance_layer = 0.0\r\n layer_cont = 1\r\n while aux_stop == \"*I\":\r\n file_read.readline()\r\n file_read.readline()\r\n \r\n aux_stop = file_read.readline()\r\n aux_stop = aux_stop[0:2]\r\n \r\n coord_R = file_read.readline()\r\n coord_R = coord_R.replace(\",\", \"\")\r\n coord_R = coord_R.split();\r\n \r\n for i in range(0,3):\r\n coord_R[i] = float(coord_R[i])\r\n \r\n #CHECKING THE LAYER INSTANCES\r\n if float(coord_R[2]) > aux_instance_layer:\r\n aux_instance_layer = float(coord_R[2])\r\n layer_cont = layer_cont + 1\r\n \r\n LAYER_INSTANCE.append(layer_cont)\r\n R_table.append(coord_R)\r\n \r\n del R_table[-1] #DELETE LAST ELEMENT\r\n del LAYER_INSTANCE[-1] #DELETE LAST ELEMENT\r\n NUMBER_LAYER = max(LAYER_INSTANCE) #NUMBER OF LAYERS\r\n R_table.insert(0,[0.0, 0.0, 0.0]) #FIRST INTANCE R = [0 0 0]\r\n NUMBER_INSTANCE = len(R_table)\r\n NUMBER_ELE = len(table_CONEC) #ELEMENTS PER INSTANCE\r\n file_read.close()\r\n \r\n return R_table, table_nodes, table_CONEC\r\n\r\n#==============================================================================\r\n# READ ODB\r\n#==============================================================================\r\ndef read_odb(i,name_old,num_steps,num_instances,print_pattern,ELE_TYPE,R_table,table_nodes,table_CONEC,BRICK):\r\n \r\n myOdb = odbAccess.openOdb(path=name_old, readOnly = True)\r\n \r\n ALLInstances = myOdb.rootAssembly.instances\r\n \r\n \r\n name_new = name_old.replace('.odb','_%d.txt' %(i+1))\r\n \r\n report_name = 'report-%d.txt' %(i+1)\r\n \r\n file_report = open(report_name,'w')\r\n file_report.close()\r\n #RESTART:\r\n aux_restart = 0 #DOES NOT RESTART\r\n \r\n if os.path.isfile(name_new): #it file exists\r\n file_new = open(name_new,'r')\r\n \r\n cont_line = 0\r\n \r\n for line in file_new:\r\n cont_line = cont_line + 1\r\n \r\n file_new.close()\r\n if cont_line < 14:\r\n aux_restart = 1\r\n \r\n if not os.path.isfile(name_new) or aux_restart == 1: #it file does not exists or has not been written entirely\r\n file_new = open(name_new,'w')\r\n \r\n total_time = 0.0\r\n \r\n \r\n if i == num_steps - 1: #last step\r\n step_name = 'Step-FINAL-THERMAL-STEP'\r\n num_instance_now = num_instances\r\n else: \r\n step_name = 'Step-%d' %(i+1)\r\n num_instance_now = i + 1\r\n \r\n step_now = myOdb.steps[step_name]\r\n num_frames = len(step_now.frames)\r\n \r\n file_new.write(\"--------------------------------------------------\\n\")\r\n file_new.write(\"%s\\n\" %step_name)\r\n \r\n for j in range(0,num_frames):\r\n frame_now = step_now.frames[j]\r\n \r\n time_step = frame_now.frameValue\r\n total_time = total_time + time_step\r\n \r\n TEMP_aux = frame_now.fieldOutputs['TEMP'] \r\n \r\n file_new.write(\"FRAME = %d ----------------------------------------\\n\" %j)\r\n file_new.write(\"TIME [s] = %f\\n\" %total_time)\r\n \r\n cont_ele = 0 #global elements\r\n \r\n Vol_total = 0.0 #volume total is zeroed in each frame\r\n \r\n Int_temp_total = 0.0 #integration sum variable is zeroed in each frame\r\n \r\n file_new_frame = name_new.replace('.txt','') + 'F%d.txt' %(j)\r\n \r\n for k in range(1,num_instance_now+1):\r\n \r\n intance_number = print_pattern[k-1]\r\n \r\n file_name_new_I = file_new_frame.replace('.txt','') + '_I%d.txt' %(intance_number)\r\n file_new_I = open(file_name_new_I,'w')\r\n \r\n instanceName = 'I-%d' %intance_number\r\n \r\n myInstance = ALLInstances[instanceName]\r\n \r\n numElements = len(myInstance.elements)\r\n \r\n Vol_total = 0.0 #volume total is zeroed in each frame\r\n \r\n Int_temp_total = 0.0 #integration sum variable is zeroed in each frame\r\n \r\n for el in range(0,numElements):\r\n #Isolate current and previous element's stress field\r\n \r\n #THOSE RESULTS ARE NO AVERAGED\r\n #POSITION = INTEGRATION_POINT/ELEMENT_NODAL/CENTROID\r\n region_aux = myInstance.elements[el]\r\n \r\n TEMP_NODAL = TEMP_aux.getSubset(\r\n region=region_aux,position=ELEMENT_NODAL,elementType='DC3D8').values\r\n \r\n cont_ele = cont_ele +1\r\n \r\n local_ele = el+1\r\n \r\n TEMP_NODE_ip = list()\r\n \r\n for ip in range(0,ELE_TYPE): #node loop\r\n \r\n TEMP_NODE_ip.append(TEMP_NODAL[ip].data)\r\n \r\n \r\n (new_coordinates, Vol) = ELEMENT_DATA(intance_number,R_table,table_nodes,table_CONEC,local_ele,ELE_TYPE) \r\n \r\n Vol_total = Vol_total + Vol\r\n \r\n #Post-Processing ----------------------------------------------\r\n (vecX,vecY,vecZ) = rewrite_vec_pos(new_coordinates,ELE_TYPE)\r\n \r\n BRICK.natural_nodes(vecX, vecY, vecZ)\r\n \r\n BRICK.nodal_temperature(TEMP_NODE_ip)\r\n \r\n # print BRICK.nodal_temperature\r\n \r\n #Integrating temperature distribution within the element\r\n Iaux = gauss_quad(BRICK.DIMENSION,BRICK.FUNCTION_INTEGRATED_RST)\r\n Int_temp_total = Int_temp_total + Iaux\r\n \r\n \r\n file_new_I.write(\"Integral Volume\\n\")\r\n file_new_I.write('%f %E\\n'%(Int_temp_total/Vol_total,Vol_total))\r\n file_new_I.close()\r\n\r\n file_new.close()\r\n myOdb.close()\r\n#------------------------------------------------------------------------------ \r\n\r\nif __name__ == '__main__': \r\n \r\n ELE_TYPE = 8\r\n pool = Pool(processes=20)\r\n #Odb file in the work directory:\r\n odb_name = 'job_sim1_mesh_2_2_2.odb' \r\n \r\n myOdb = odbAccess.openOdb(path=odb_name, readOnly = True)\r\n #--------------------------------------------------------------------------\r\n \r\n ALLInstances = myOdb.rootAssembly.instances\r\n num_instances = len(ALLInstances)\r\n \r\n mysteps = myOdb.steps\r\n num_steps = len(mysteps)\r\n# myOdb.close()\r\n \r\n #==========================================================================\r\n # Reading report file\r\n #========================================================================== \r\n report_file_name = 'REPORT_sim1_mesh_2_2_2.txt'\r\n file_report = open(report_file_name,'r')\r\n \r\n print_pattern = list()\r\n \r\n for i in range(0,24):\r\n file_report.readline() #jumping lines\r\n for i in range(0,num_instances):\r\n print_pattern.append(int(file_report.readline()))\r\n \r\n file_report.close() \r\n \r\n \r\n #--------------------------------------------------------------------------\r\n \r\n #==============================================================================\r\n #READING INP FILE\r\n #==============================================================================\r\n name_file_inp = 'job_sim1_mesh_2_2_2.inp' #INP FILE OF THE ANALYSIS(MUST CONTAIN THE ASSEMBLY)\r\n R_table, table_nodes, table_CONEC = read_inp(name_file_inp)\r\n \r\n\r\n # -------------------------------------------------------------------------\r\n # POST-PROCESSING\r\n BRICK = SHAPE_FUNCTIONS(3,8)\r\n \r\n #Parallel processing\r\n inputs = range(num_steps)\r\n# inputs = range(1)\r\n aux = pool.map(partial(read_odb, name_old=odb_name, num_steps = num_steps,num_instances=num_instances, \r\n print_pattern=print_pattern,ELE_TYPE=ELE_TYPE,R_table=R_table,table_nodes=table_nodes,table_CONEC=table_CONEC,BRICK=BRICK), inputs)\r\n \r\n myOdb.close()","sub_path":"Parallel_processing_Temperature.py","file_name":"Parallel_processing_Temperature.py","file_ext":"py","file_size_in_byte":20067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"547879389","text":"from flask import Flask, render_template, request, jsonify\nfrom flask.ext.socketio import SocketIO, emit, session\nfrom background_asr import recognize_wav\nfrom online_asr import OnlineASR\n\napp = Flask(__name__)\napp.config.from_object(__name__)\nsocketio = SocketIO(app)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/recognize', methods=['POST'])\ndef recognize():\n response = recognize_wav(request.data)\n\n return jsonify(response)\n\n\n@socketio.on('begin')\ndef begin_recognition(message):\n session['recognizer'] = OnlineASR(emit)\n\n\n@socketio.on('chunk')\ndef recognize_chunk(message):\n session['recognizer'].recognize_chunk(message)\n\n\n@socketio.on('end')\ndef end_recognition(message):\n session['recognizer'].end()\n\n\nif __name__ == '__main__':\n app.secret_key = 12345\n socketio.run(app)\n","sub_path":"app/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"653088529","text":"#!/usr/local/bin/python3\nimport discord\nfrom discord.ext import commands\nimport asyncio\nimport os\n\nbot = commands.Bot(command_prefix='!')\n\nwith open(f'{os.getcwd()}/token.txt', 'r') as token_file:\n bot.token = token_file.read().strip()\n\nwith open (f'{os.getcwd()}/bad_roles.txt') as bad_role_file:\n bad_roles = bad_role_file.readlines()\n bot.bad_role_list = []\n for line in bad_roles:\n bot.bad_role_list.append(line.strip().lower())\n\nwith open (f'{os.getcwd()}/excluded_roles.txt') as excluded_role_file:\n excluded_roles = excluded_role_file.readlines()\n bot.excluded_role_list = []\n for line in excluded_roles:\n bot.excluded_role_list.append(line.strip().lower())\n\nprint(bot.excluded_role_list)\n\n@bot.event\nasync def on_ready():\n print('------')\n print('Logged in as')\n print(bot.user.name)\n print(bot.user.id)\n\n@bot.event\nasync def on_message(message):\n if not message.author.bot:\n if check_if_user_mentioned_bad_role(message):\n if not check_if_user_has_excluded_role(message):\n await message.delete()\n await message.channel.send(f'{message.author.mention}\\n**#11** - __Do not tag **Event-OPs or Above**__\\n- If you need them, please wait in the support room for assistance.', delete_after=5)\n await asyncio.sleep(5)\n #await message.author.kick(reason='Tagging Staff')\n\ndef check_if_user_mentioned_bad_role(message):\n for mentioned_player in message.mentions:\n for roles in mentioned_player.roles:\n if roles.name in bot.bad_role_list:\n return True\n return False\n\ndef check_if_user_has_excluded_role(message):\n for roles in message.author.roles:\n if roles.name in bot.excluded_role_list:\n return True\n return False\n\nbot.run(bot.token)\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"86246839","text":"import webapp2\nimport os\nfrom google.appengine.ext.webapp import template\n\n\nclass MainHandler(webapp2.RequestHandler):\n def get(self):\n \n folder = 'htm'\n file = 'home.htm'\n template_values = {'page': 'home'}\n\n path = os.path.join(folder, file)\n \n self.response.out.write(template.render(path, template_values))\n\nclass OutHandler(webapp2.RequestHandler):\n def get(self):\n \n url = self.request.get('url')\n \n folder = 'htm'\n file = 'out.htm'\n template_values = {'url': url}\n\n path = os.path.join(folder, file)\n \n self.response.out.write(template.render(path, template_values))\n\nclass GalleryHandler(webapp2.RequestHandler):\n def get(self):\n \n folder = 'htm'\n file = 'gallery.htm'\n template_values = {'page': 'gallery'}\n\n path = os.path.join(folder, file)\n \n self.response.out.write(template.render(path, template_values))\n\napp = webapp2.WSGIApplication([\n ('/out/.*', OutHandler),\n ('/gallery/.*', GalleryHandler),\n ('/.*', MainHandler)\n], debug=True)","sub_path":"trueamericanrulescom/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"138041823","text":"\"\"\"\nA module to handle queries on the DB.\n\"\"\"\n\nfrom decimal import Decimal\nfrom requests import get\n\nfrom osu_acc.replay import util\nfrom osu_acc.replay import classes\nfrom osu_acc.replay.models import Replay, ReplayData\nfrom osu_acc.beatmap.models import Beatmap, BreakPeriod, TimingPoint, HitObject\n\n\n# =============================================================================\n# REPLAY MODELS\n# =============================================================================\n\ndef create_replay_data_entry(replay_id, replay_events):\n \"\"\"\n Given a list of classes.ReplayEvents, create and save a models.ReplayData instance.\n\n Args:\n replay_events (List(classes.ReplayEvent)): The replay data.\n \"\"\"\n if ReplayData.objects.filter(replay_id=replay_id).exists():\n return\n\n replay_data_fields = {}\n\n replay_data_fields['replay_id'] = replay_id\n replay_data_fields['x_coords'] = []\n replay_data_fields['y_coords'] = []\n replay_data_fields['hit_object_times'] = []\n\n for replay_event in replay_events:\n replay_data_fields['x_coords'].append(replay_event.x)\n replay_data_fields['y_coords'].append(replay_event.y)\n replay_data_fields['hit_object_times'].append(replay_event.time)\n\n replay_data_entry = ReplayData(**replay_data_fields)\n replay_data_entry.save()\n\n\ndef select_replay_data_field(replay_id, field):\n \"\"\"\n Returns the value of the field of a specific ReplayData entry.\n\n Equivalent to: SELECT field FROM replay_replaydata WHERE replay_id = replay_id;\n\n Args:\n replay_id (str): The hash of the replay, given by osrparse.\n\n Returns:\n field: The field requested.\n Is of type: str, List(Decimal).\n \"\"\"\n\n valid_keys = set([\n 'x_coords',\n 'y_coords',\n 'hit_object_times',\n ])\n\n if field not in valid_keys:\n # Raise a proper exception\n return None\n\n replay = Replay.objects.get(replay_id=replay_id)\n return getattr(replay, field)\n\n\ndef create_replay_entry(json_resp, parsed_replay):\n \"\"\"\n Create and save a Replay instance.\n\n Args:\n json_resp (dict): The result of the osu!api call as a dictionary.\n parsed_replay (osrparse.Replay): The parsed replay.\n \"\"\"\n\n if Replay.objects.filter(replay_id=parsed_replay.replay_hash).exists():\n return\n\n replay_fields = {}\n\n # GETTING ARGUMENTS AND CONVERTING TYPES\n circle_size = Decimal(json_resp['diff_size'])\n overall_diff = Decimal(json_resp['diff_overall'])\n break_periods_model = select_beatmap_field(json_resp['beatmap_id'], 'break_period')\n break_periods = util.convert_beatmap_break_periods_to_class(break_periods_model)\n replay_events = util.convert_osrp_play_data_to_class(parsed_replay.play_data)\n hit_objects_model = select_beatmap_field(json_resp['beatmap_id'], 'hit_object')\n hit_objects = util.convert_hit_object_model_to_class(hit_objects_model)\n\n # POPULATING FIELD DICTIONARY\n replay_fields['replay_id'] = parsed_replay.replay_hash\n replay_fields['beatmap'] = Beatmap.objects.get(beatmap_id=json_resp['beatmap_id'])\n replay_fields['play_date'] = parsed_replay.timestamp\n\n replay_fields['ap'] = 0.00\n replay_fields['pp'] = 0.00\n\n replay_fields['num_raw_300'] = parsed_replay.number_300s\n replay_fields['num_raw_100'] = parsed_replay.number_100s\n replay_fields['num_raw_50'] = parsed_replay.number_50s\n replay_fields['num_raw_miss'] = parsed_replay.misses\n replay_fields['raw_accuracy'] = util.get_accuracy(replay_fields['num_raw_300'],\n replay_fields['num_raw_100'],\n replay_fields['num_raw_50'],\n replay_fields['num_raw_miss'])\n\n\n true_acc_fields = util.get_true_accuracy_fields(circle_size,\n overall_diff,\n break_periods,\n replay_events,\n hit_objects)\n replay_fields = {**replay_fields, **true_acc_fields}\n replay_fields['true_accuracy'] = util.get_accuracy(replay_fields['num_true_300'],\n replay_fields['num_true_100'],\n replay_fields['num_true_50'],\n replay_fields['num_true_miss'])\n\n create_replay_data_entry(parsed_replay.replay_hash, replay_events)\n replay_fields['replay_data'] = ReplayData.objects.get(replay_id=parsed_replay.replay_hash)\n\n replay_fields['hit_errors'] = util.get_hit_errors(circle_size,\n overall_diff,\n break_periods,\n replay_events,\n hit_objects)\n\n hit_error_data = util.calc_hit_error_data(replay_fields['hit_errors'])\n replay_fields = {**replay_fields, **hit_error_data}\n\n # Create an instance of a Replay model\n replay_entry = Replay(**replay_fields)\n replay_entry.save()\n\n\ndef select_replay_field(replay_id, field):\n \"\"\"\n Returns the value of the field of a specific Replay entry.\n\n Equivalent to: SELECT field FROM replay_replay WHERE replay_id = replay_id;\n\n Args:\n replay_id (str): The hash of the replay, given by osrparse.\n\n Returns:\n query_set[field]: The field requested.\n Is of type: str, Beatmap, DateTime, Decimal, int, List(Decimal)\n \"\"\"\n\n valid_keys = set([\n 'beatmap',\n 'replay_data',\n 'play_date',\n 'pp',\n 'raw_accuracy',\n 'num_raw_300',\n 'num_raw_100',\n 'num_raw_50',\n 'num_raw_miss',\n 'ap',\n 'true_accuracy',\n 'num_true_300',\n 'num_true_100',\n 'num_true_50',\n 'num_true_miss',\n 'hit_errors',\n 'min_neg_hit_error',\n 'max_neg_hit_error',\n 'avg_neg_hit_error',\n 'min_pos_hit_error',\n 'max_pos_hit_error',\n 'avg_pos_hit_error',\n 'min_abs_hit_error',\n 'max_abs_hit_error',\n 'avg_abs_hit_error',\n ])\n\n if field not in valid_keys:\n # Raise a proper exception\n return None\n\n replaydata = ReplayData.objects.get(replay_id=replay_id)\n return getattr(replaydata, field)\n\n\n# =============================================================================\n# BEATMAP MODELS\n# =============================================================================\n\n\ndef create_break_period_entry(bm_id, data):\n \"\"\"\n Create and save a BreakPeriod entry.\n\n Equivalent to: INSERT INTO beatmap_breakperiod (fields) VALUES (values);\n\n Args:\n bm_id (str): The id of the beatmap associated.\n data (List(str)): The beatmap data as a list of strings.\n\n Returns:\n break_period_entry(BreakPeriod): The created BreakPeriod instance.\n \"\"\"\n\n if BreakPeriod.objects.filter(beatmap_id=bm_id).exists():\n return\n\n # Syntax: 2,start,end\n # The start and end fields are both an integral number of milliseconds,\n # from the beginning of the song,\n # defining the start and end point of the break period, respectively.\n\n break_fields = {}\n break_fields['beatmap_id'] = bm_id\n break_fields['starts'] = []\n break_fields['ends'] = []\n\n is_break = False\n\n for line in data:\n if 'Break Periods' in line.strip():\n is_break = True\n continue\n\n if is_break:\n # Next subsection, storyboarding, begins with the line\n # '//Storyboard Layer 0 (Background)\\n'\n if 'Storyboard' in line.strip():\n is_break = False\n else:\n start = int(line.split(',')[1])\n break_fields['starts'].append(start)\n end = int(line.split(',')[2])\n break_fields['ends'].append(end)\n\n break_entry = BreakPeriod(**break_fields)\n break_entry.save()\n\n\ndef select_break_period_field(beatmap_id, field):\n \"\"\"\n Returns the value of the field of a specific Break entry.\n\n Equivalent to: SELECT field FROM beatmap_break WHERE beatmap_id = beatmap_id;\n\n Args:\n beatmap_id (str): The id of the beatmap, given by osu!api.\n\n Returns:\n field: The field requested.\n Is of type: List(int), List(Decimal).\n \"\"\"\n\n valid_keys = set(['starts', 'ends'])\n\n if field not in valid_keys:\n # TODO: Raise a proper exception.\n return None\n\n break_period = BreakPeriod.objects.get(beatmap_id=beatmap_id)\n return getattr(break_period, field)\n\n\ndef create_timing_point_entry(bm_id, data):\n \"\"\"\n Create and save a TimingPoint entry.\n\n Equivalent to: INSERT INTO beatmap_timingpoint (fields) VALUES (values);\n\n Args:\n bm_id (str): The id of the beatmap associated.\n data (List(str)): The beatmap data as a list of strings.\n\n Returns:\n timing_point_model(TimingPoint): The created TimingPoint instance.\n \"\"\"\n if TimingPoint.objects.filter(beatmap_id=bm_id).exists():\n return\n\n # Syntax: Offset, Milliseconds per Beat, Meter, \n # Sample Set, Sample Index, Volume, Inherited, Kiai Mode\n # For our purposes, we only need the first three fields.\n # We will also convert all ms/beat values to positive.\n timing_point_fields = {}\n timing_point_fields['beatmap_id'] = bm_id\n timing_point_fields['offsets'] = []\n timing_point_fields['ms_per_beats'] = []\n\n is_timing_point = False\n\n for line in data:\n if line.strip() == '[TimingPoints]':\n is_timing_point = True\n continue\n\n if is_timing_point:\n # There is always an empty line before the start of the next section\n # Use it to identify when the current section ends\n if not line.strip():\n is_timing_point = False\n else:\n offset = int(line.split(',')[0])\n timing_point_fields['offsets'].append(offset)\n ms_per_beat = Decimal(line.split(',')[1])\n timing_point_fields['ms_per_beats'].append(round(ms_per_beat, 2))\n\n timing_point_entry = TimingPoint(**timing_point_fields)\n timing_point_entry.save()\n\n\ndef select_timing_point_field(beatmap_id, field):\n \"\"\"\n Returns the value of the field of a specific TimingPoint entry.\n\n Equivalent to: SELECT field FROM beatmap_timingpoint WHERE beatmap_id = beatmap_id;\n\n Args:\n beatmap_id (str): The id of the beatmap, given by osu!api.\n\n Returns:\n field: The field requested.\n Is of type: List(int), List(Decimal).\n \"\"\"\n\n valid_keys = set([\n 'offsets',\n 'ms_per_beats',\n ])\n\n if field not in valid_keys:\n # Raise a proper exception\n return None\n\n timingpoint = TimingPoint.objects.get(beatmap_id=beatmap_id)\n return getattr(timingpoint, field)\n\n\ndef create_hit_object_entry(bm_id, data):\n \"\"\"\n Create and save a HitObject entry.\n\n Equivalent to: INSERT INTO beatmap_hitobject (fields) VALUES (values);\n\n Args:\n bm_id (str): The id of the beatmap associated.\n data (List(str)): The beatmap data as a list of strings.\n\n Returns:\n hit_object_model(HitObject): The created HitObject instance.\n \"\"\"\n if HitObject.objects.filter(beatmap_id=bm_id).exists():\n return\n\n # Syntax: x,y,time,type,hitSound...,extras\n # For our purposes, we only need the time field\n hit_object_fields = {}\n hit_object_fields['beatmap_id'] = bm_id\n hit_object_fields['x_coords'] = []\n hit_object_fields['y_coords'] = []\n hit_object_fields['hit_object_times'] = []\n hit_object_fields['hit_object_types'] = []\n\n is_hit_object = False\n\n for line in data:\n if line.strip() == '[HitObjects]':\n is_hit_object = True\n continue\n\n if is_hit_object:\n if not line.strip():\n is_hit_object = False\n else:\n x = line.split(',')[0]\n hit_object_fields['x_coords'].append(x)\n y = line.split(',')[1]\n hit_object_fields['y_coords'].append(y)\n obj_time = line.split(',')[2]\n hit_object_fields['hit_object_times'].append(obj_time)\n obj_type = line.split(',')[3]\n hit_object_fields['hit_object_types'].append(obj_type)\n\n hit_object_entry = HitObject(**hit_object_fields)\n hit_object_entry.save()\n\n\ndef select_hit_object_field(beatmap_id, field):\n \"\"\"\n Returns the value of the field of a specific HitObject entry.\n\n Equivalent to: SELECT field FROM beatmap_hitobject WHERE beatmap_id = beatmap_id;\n\n Args:\n beatmap_id (str): The id of the beatmap, given by osu!api.\n\n Returns:\n field: The field requested.\n Is of type: List(Decimal).\n \"\"\"\n\n valid_keys = set([\n 'x_coords',\n 'y_coords',\n 'hit_object_times',\n ])\n\n if field not in valid_keys:\n # Raise a proper exception\n return None\n\n hitobject = HitObject.objects.get(beatmap_id=beatmap_id)\n return getattr(hitobject, field)\n\n\ndef create_beatmap_entry(json_resp):\n \"\"\"\n Given a beatmap's API response as JSON,\n populate the database with the appropriate information.\n\n Args:\n json_resp (dict): The response from osu!api\n \"\"\"\n bm_id = json_resp['beatmap_id']\n\n if Beatmap.objects.filter(beatmap_id=bm_id).exists():\n return\n\n # Download beatmap file\n OSU_BEATMAP_ENDPOINT = 'https://osu.ppy.sh/osu/'\n response = get(OSU_BEATMAP_ENDPOINT + bm_id)\n\n with open(bm_id + '.osu', 'wb') as f:\n f.write(response.content)\n\n with open(bm_id + '.osu', 'r') as f:\n data = f.readlines()\n\n # Parse beatmap file for required data\n beatmap_fields = {}\n\n beatmap_fields['beatmap_id'] = bm_id\n\n beatmap_fields['song_title'] = json_resp['title']\n beatmap_fields['song_artist'] = json_resp['artist']\n beatmap_fields['beatmap_creator'] = json_resp['creator']\n beatmap_fields['beatmap_difficulty'] = json_resp['version']\n beatmap_fields['beatmap_cs'] = Decimal(json_resp['diff_size'])\n beatmap_fields['beatmap_od'] = Decimal(json_resp['diff_overall'])\n\n # Create and get model fields\n create_break_period_entry(bm_id, data)\n beatmap_fields['break_period'] = BreakPeriod.objects.get(beatmap_id=bm_id)\n create_timing_point_entry(bm_id, data)\n beatmap_fields['timing_point'] = TimingPoint.objects.get(beatmap_id=bm_id)\n create_hit_object_entry(bm_id, data)\n beatmap_fields['hit_object'] = HitObject.objects.get(beatmap_id=bm_id)\n\n # Create Beatmap model instance and save to DB\n beatmap_entry = Beatmap(**beatmap_fields)\n beatmap_entry.save()\n\n\ndef select_beatmap_field(beatmap_id, field):\n \"\"\"\n Returns the value of the field of a specific Beatmap entry.\n\n Equivalent to: SELECT field FROM beatmap_beatmap WHERE beatmap_id = beatmap_id;\n\n Args:\n beatmap_id (str): The id of the beatmap, given by osu!api.\n\n Returns:\n field: The field requested.\n Is of type: TimingPoint, HitObject, str, Decimal.\n \"\"\"\n\n valid_keys = set([\n 'break_period',\n 'timing_point',\n 'hit_object',\n 'beatmap_creator',\n 'beatmap_difficulty',\n 'beatmap_cs',\n 'beatmap_od',\n 'song_title',\n 'song_artist',\n ])\n\n if field not in valid_keys:\n # Raise a proper exception\n return None\n\n beatmap = Beatmap.objects.get(beatmap_id=beatmap_id)\n return getattr(beatmap, field)\n","sub_path":"osu_acc/replay/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":15790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"173734479","text":"from PyQt5 import QtGui, QtCore, QtWidgets\nimport numpy as np\nfrom ctypes import c_float, c_uint, sizeof\nimport time\n\nfrom camera import CameraMovement, Camera\n\nGLfloat = c_float\nGLuint = c_uint\n\nWIDTH = 800\nHEIGHT = 600\n\n# camera\ncamera = Camera(position = QtGui.QVector3D(0., 0., 3.), \n up = QtGui.QVector3D(0., 1., 0.))\n\nfirstMouse = True\nlastX = WIDTH / 2.0\nlastY = HEIGHT / 2.0\n\n# timing\ndateTime = 0. # time between current frame and last frame\nlastFrame = 0.\n\nlightPos = QtGui.QVector3D(12., 20., -2.)\n\nclass Window(QtGui.QOpenGLWindow):\n \n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n \n self.setTitle('LearnOpenGL')\n \n self.vertices = np.array(\n [[-0.5, -0.5, -0.5, 0.0, 0.0, -1.0],\n [ 0.5, -0.5, -0.5, 0.0, 0.0, -1.0],\n [ 0.5, 0.5, -0.5, 0.0, 0.0, -1.0],\n [ 0.5, 0.5, -0.5, 0.0, 0.0, -1.0],\n [-0.5, 0.5, -0.5, 0.0, 0.0, -1.0],\n [-0.5, -0.5, -0.5, 0.0, 0.0, -1.0],\n\n [-0.5, -0.5, 0.5, 0.0, 0.0, 1.0],\n [ 0.5, -0.5, 0.5, 0.0, 0.0, 1.0],\n [ 0.5, 0.5, 0.5, 0.0, 0.0, 1.0],\n [ 0.5, 0.5, 0.5, 0.0, 0.0, 1.0],\n [-0.5, 0.5, 0.5, 0.0, 0.0, 1.0],\n [-0.5, -0.5, 0.5, 0.0, 0.0, 1.0],\n\n [-0.5, 0.5, 0.5, -1.0, 0.0, 0.0],\n [-0.5, 0.5, -0.5, -1.0, 0.0, 0.0],\n [-0.5, -0.5, -0.5, -1.0, 0.0, 0.0],\n [-0.5, -0.5, -0.5, -1.0, 0.0, 0.0],\n [-0.5, -0.5, 0.5, -1.0, 0.0, 0.0],\n [-0.5, 0.5, 0.5, -1.0, 0.0, 0.0],\n\n [ 0.5, 0.5, 0.5, 1.0, 0.0, 0.0],\n [ 0.5, 0.5, -0.5, 1.0, 0.0, 0.0],\n [ 0.5, -0.5, -0.5, 1.0, 0.0, 0.0],\n [ 0.5, -0.5, -0.5, 1.0, 0.0, 0.0],\n [ 0.5, -0.5, 0.5, 1.0, 0.0, 0.0],\n [ 0.5, 0.5, 0.5, 1.0, 0.0, 0.0],\n\n [-0.5, -0.5, -0.5, 0.0, -1.0, 0.0],\n [ 0.5, -0.5, -0.5, 0.0, -1.0, 0.0],\n [ 0.5, -0.5, 0.5, 0.0, -1.0, 0.0],\n [ 0.5, -0.5, 0.5, 0.0, -1.0, 0.0],\n [-0.5, -0.5, 0.5, 0.0, -1.0, 0.0],\n [-0.5, -0.5, -0.5, 0.0, -1.0, 0.0],\n\n [-0.5, 0.5, -0.5, 0.0, 1.0, 0.0],\n [ 0.5, 0.5, -0.5, 0.0, 1.0, 0.0],\n [ 0.5, 0.5, 0.5, 0.0, 1.0, 0.0],\n [ 0.5, 0.5, 0.5, 0.0, 1.0, 0.0],\n [-0.5, 0.5, 0.5, 0.0, 1.0, 0.0],\n [-0.5, 0.5, -0.5, 0.0, 1.0, 0.0]], dtype=GLfloat\n )\n \n def initializeGL(self):\n self.gl = self.context().versionFunctions()\n self.gl.glViewport(0, 0, WIDTH, HEIGHT)\n self.gl.glEnable(self.gl.GL_DEPTH_TEST)\n self.gl.glClearColor(0.1, 0.1, 0.1, 1.)\n \n ########################################################\n # Create a shader program\n \n self.lightingShaderProg = QtGui.QOpenGLShaderProgram()\n self.lightingShaderProg.create()\n self.lightingShaderProg.addShaderFromSourceFile(\n QtGui.QOpenGLShader.Vertex, '2.1.basic_lighting.vert')\n self.lightingShaderProg.addShaderFromSourceFile(\n QtGui.QOpenGLShader.Fragment, '2.1.basic_lighting.frag')\n self.lightingShaderProg.link()\n ########################################################\n \n ########################################################\n # Create a shader program\n \n self.lampShaderProg = QtGui.QOpenGLShaderProgram()\n self.lampShaderProg.create()\n self.lampShaderProg.addShaderFromSourceFile(\n QtGui.QOpenGLShader.Vertex, '2.1.lamp.vert')\n self.lampShaderProg.addShaderFromSourceFile(\n QtGui.QOpenGLShader.Fragment, '2.1.lamp.frag')\n self.lampShaderProg.link()\n ########################################################\n \n \n ########################################################\n # create a Vertex Array Object with vertice information\n \n self.cubeVAO = QtGui.QOpenGLVertexArrayObject()\n self.cubeVAO.create()\n self.cubeVAO.bind()\n \n VBO = QtGui.QOpenGLBuffer(QtGui.QOpenGLBuffer.VertexBuffer)\n VBO.create()\n VBO.setUsagePattern(QtGui.QOpenGLBuffer.StaticDraw)\n data = self.vertices.tostring()\n VBO.bind()\n VBO.allocate(data, len(data))\n self.gl.glVertexAttribPointer(0, 3, self.gl.GL_FLOAT, \n self.gl.GL_FALSE, 6*sizeof(GLfloat), 0)\n self.gl.glEnableVertexAttribArray(0)\n self.gl.glVertexAttribPointer(1, 3, self.gl.GL_FLOAT, \n self.gl.GL_FALSE, 6*sizeof(GLfloat), 0)\n self.gl.glEnableVertexAttribArray(1)\n \n VBO.release()\n self.cubeVAO.release()\n ########################################################\n \n ########################################################\n # create a Vertex Array Object with vertice information\n \n self.lightVAO = QtGui.QOpenGLVertexArrayObject()\n self.lightVAO.create()\n self.lightVAO.bind()\n \n VBO.bind()\n self.gl.glVertexAttribPointer(0, 3, self.gl.GL_FLOAT, \n self.gl.GL_FALSE, 6*sizeof(GLfloat), 0)\n self.gl.glEnableVertexAttribArray(0)\n \n VBO.release()\n self.lightVAO.release()\n ########################################################\n \n def paintGL(self):\n currentFrame = time.time()\n global deltaTime, lastFrame\n deltaTime = currentFrame - lastFrame\n lastFrame = currentFrame\n \n self.gl.glClear(self.gl.GL_COLOR_BUFFER_BIT | \\\n self.gl.GL_DEPTH_BUFFER_BIT)\n \n self.lightingShaderProg.bind()\n self.lightingShaderProg.setUniformValue('objectColor', 1., 0.5, 0.31)\n self.lightingShaderProg.setUniformValue('lightColor', 1., 1., 1.)\n self.lightingShaderProg.setUniformValue('lightPos', lightPos)\n projection = QtGui.QMatrix4x4()\n projection.perspective(camera.zoom, WIDTH/HEIGHT, 0.1, 100.)\n self.lightingShaderProg.setUniformValue('projection', projection)\n self.lightingShaderProg.setUniformValue('view', camera.viewMatrix)\n self.lightingShaderProg.setUniformValue('model', QtGui.QMatrix4x4())\n\n self.cubeVAO.bind()\n self.gl.glDrawArrays(self.gl.GL_TRIANGLES, 0, 36)\n \n self.lampShaderProg.bind()\n self.lampShaderProg.setUniformValue('projection', projection)\n self.lampShaderProg.setUniformValue('view', camera.viewMatrix)\n model = QtGui.QMatrix4x4()\n model.translate(lightPos)\n model.scale(0.2)\n self.lampShaderProg.setUniformValue('model', model)\n\n self.lightVAO.bind()\n self.gl.glDrawArrays(self.gl.GL_TRIANGLES, 0, 36)\n \n self.update()\n \n def keyPressEvent(self, event):\n global deltaTime\n \n if event.key() == QtCore.Qt.Key_Escape:\n sys.exit()\n elif event.key() == QtCore.Qt.Key_W:\n camera.processKeyboard(CameraMovement.FORWARD, deltaTime)\n elif event.key() == QtCore.Qt.Key_S:\n camera.processKeyboard(CameraMovement.BACKWARD, deltaTime)\n elif event.key() == QtCore.Qt.Key_A:\n camera.processKeyboard(CameraMovement.LEFT, deltaTime)\n elif event.key() == QtCore.Qt.Key_D:\n camera.processKeyboard(CameraMovement.RIGHT, deltaTime)\n \n event.accept()\n \n def mouseMoveEvent(self, event):\n global firstMouse, lastX, lastY\n \n if firstMouse:\n lastX, lastY = event.globalX(), event.globalY()\n firstMouse = False\n \n xoffset = event.globalX() - lastX\n yoffset = lastY - event.globalY()\n lastX, lastY = event.globalX(), event.globalY()\n \n camera.processMouseMovement(xoffset, yoffset)\n event.accept()\n \n def wheelEvent(self, event):\n camera.processMouseScroll(event.angleDelta().y())\n event.accept()\n \n def closeEvent(self, event):\n sys.exit()\n event.accept()\n\n\nif __name__ == '__main__':\n import sys\n \n # Set format here, otherwise it throws error\n # `QCocoaGLContext: Falling back to unshared context.`\n # on Mac when use QOpenGLWidgets\n # https://doc.qt.io/qt-5/qopenglwidget.html#details last paragraph\n format = QtGui.QSurfaceFormat()\n format.setRenderableType(QtGui.QSurfaceFormat.OpenGL)\n format.setProfile(QtGui.QSurfaceFormat.CoreProfile)\n format.setVersion(4, 1)\n format.setDepthBufferSize(24)\n QtGui.QSurfaceFormat.setDefaultFormat(format)\n \n app = QtWidgets.QApplication(sys.argv)\n \n window = Window()\n window.resize(WIDTH, HEIGHT)\n window.show()\n \n sys.exit(app.exec_())\n","sub_path":"OpenGL/Joey_de_Vries/qopengl/01_lighting/02_basic_lighting/part02_diffuse_lighting/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"446394130","text":"class node():\n\tdef __init__(self, data = None, link = None):\n\t\tself.data = data\n\t\tself.link = link\n\nclass SLL():\n\tdef __init__(self, head = None):\n\t\tself.head = head\n\n\tdef append(self, data):\n\t\tnewnode = node(data)\n\t\t\n\t\tif self.head == None:\n\t\t\thead = newnode\n\t\t\treturn\n\t\ttemp = self.head\n\t\twhile temp.link:\n\t\t\ttemp = temp.link\n\n\t\ttemp.link = newnode\n\n\tdef push(self, data):\n\t\tnewnode = node(data)\n\t\tif self.head == None:\n\t\t\tself.head = newnode\n\t\t\treturn\n\t\tnewnode.link = self.head\n\t\tself.head = newnode\n\n\tdef popLast(self):\n\t\tif self.head == None:\n\t\t\traise ValueError('Linked List is NULL')\n\t\ttemp = self.head\n\t\t\n\t\twhile temp.link.link:\n\t\t\ttemp = temp.link\n\n\t\tvalue = temp.link.data\n\t\ttemp.link = None\n\t\treturn value\n\n\tdef popFront(self):\n\t\tif self.head == None:\n\t\t\traise ValueError('Linked List is Null')\n\t\tvalue = self.head.data\n\t\tself.head = self.head.link\n\t\treturn value\n\n\tdef printList(self):\n\t\ttemp = self.head\n\t\twhile temp:\n\t\t\tprint(temp.data)\n\t\t\ttemp = temp.link\n\t\t \n\n","sub_path":"easy_ds/LinkedList.py","file_name":"LinkedList.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"580481647","text":"# Given the root of a binary tree, invert the tree, and return its root.\n# Input: root = [4,2,7,1,3,6,9]\n# Output: [4,7,2,9,6,3,1]\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def invertTree(self, root: Optional[TreeNode]) -> Optional[TreeNode]:\n if root:\n if root.left!=None and root.right!=None:\n rootTemp=TreeNode()\n rootTemp=root.left\n root.left=root.right\n root.right=rootTemp\n self.invertTree(root.left)\n self.invertTree(root.right)\n elif root.left!=None and root.right==None:\n root.right=root.left\n root.left=None\n self.invertTree(root.right)\n elif root.left==None and root.right!=None:\n root.left=root.right\n root.right=None\n self.invertTree(root.left)\n return root\n return root \n ","sub_path":"InvertBinaryTree.py","file_name":"InvertBinaryTree.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"199309632","text":"import dearpygui.dearpygui as dpg\n\ndpg.create_context()\n\n\n# def clipper_toggle(sender):\n# dpg.configure_item(\"table_clip\", clipper=dpg.get_value(sender))\n\n\nwith dpg.window(label=\"Tutorial\"):\n # dpg.add_checkbox(label=\"clipper\", callback=clipper_toggle, default_value=True)\n\n with dpg.table(header_row=False, tag=\"table_clip\", clipper=True):\n\n for i in range(5):\n dpg.add_table_column()\n\n for i in range(30000):\n with dpg.table_row():\n for j in range(5):\n dpg.add_text(f\"Row{i} Column{j}\")\n\ndpg.show_metrics()\n\ndpg.create_viewport(title='Custom Title', width=800, height=600)\ndpg.setup_dearpygui()\ndpg.show_viewport()\ndpg.start_dearpygui()\ndpg.destroy_context()\n","sub_path":"CS/Programming_Languages/Python/Modules/exterior/topics/gui/dearPyGUI/documentation/_24_tables/_24_9_clipping/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"383028975","text":"import pygame\nfrom Ship import Ship\nfrom Target import Target\nfrom Rectangle import Rectangle\nfrom colors import *\nimport random\nimport math\nimport time\n\npygame.init()\n\n\nscreen = pygame.display.set_mode([1000, 800])\ndone = False\nclock = pygame.time.Clock()\nrandom.seed()\n\nspeed_simulation = 5\n\niteration = 100\nduration = 5. / speed_simulation\ngenome_size = 4\nsize_pop = 100\nsize_surviving_pop = size_pop // 2\n\nstate = 0\ntarget = Target(screen, 700, 500)\nrect1 = Rectangle(screen, 550, 350, 200, 20, -45)\nrect2 = Rectangle(screen, 350, 350, 200, 20, 45)\nrect3 = Rectangle(screen, 650, 450, 200, 20, -45)\nobstacles = [rect1, rect2]\nships = []\nspeed = 6\n\nmax_score_all_time = 0\n\n\ndef gen_ships():\n for k in range(0, size_pop):\n ship = Ship(screen)\n ship.genome = [(random.randrange(2*speed_simulation, 8*speed_simulation),\n random.randrange(-180, 180))] +\\\n [(random.randrange(2*speed_simulation, 8*speed_simulation),\n random.randrange(-160, 160))\n for i in range(genome_size-1)]\n ship.setSpeed(ship.genome[0][0], ship.genome[0][1])\n ship.setObstacles(obstacles)\n ships.append(ship)\n return ships\n\n\ndef select_ships():\n ships.sort(key=lambda s: s.score, reverse=True)\n return ships[:size_surviving_pop]\n\n\ndef mutations():\n for i, ship in enumerate(ships):\n ships[i].genome = [(g[0] + ((j+1*genome_size)/(2*genome_size))*(random.randrange(0, 100)/100. - 0.5)/speed_simulation*(1+ships[i].score**2+0.4*ships[i].score**3),\n g[1] + ((j+1*genome_size)/(2*genome_size))*(random.randrange(-10, 10))/(0.5+ships[i].score**2+ships[i].score**3))\n for j, g in enumerate(ships[i].genome)]\n return ships\n\n\ndef duplicate():\n for k in range(size_pop - size_surviving_pop):\n ship = Ship(screen)\n ship.copy(ships[k % size_surviving_pop])\n ships.append(ship)\n return ships\n\n\ndef reinit():\n for ship in ships:\n ship.init_position()\n return ships\n\n\ndef draw():\n for ship in ships:\n ship.draw()\n\n\ndef setSpeed(state):\n for ship in ships:\n ship.setSpeed(ship.genome[min(state, len(ship.genome)-1)][0], ship.genome[min(state, len(ship.genome)-1)][1])\n\n\ndef move():\n for ship in ships:\n ship.move()\n\n\ndef score(max_score_all_time):\n for ship in ships:\n ship.scoring(target)\n max_score = max(ship.score for ship in ships)\n if max_score > max_score_all_time:\n max_score_all_time = max_score\n for ship in ships:\n ship.score /= max_score\n return max_score_all_time\n\n\ndef all_ships_out():\n all_out = True\n for ship in ships:\n if ship.vx != 0 or ship.vy != 0:\n all_out = False\n return all_out\n\n\nships = gen_ships()\nt = time.clock()\nwhile not done:\n clock.tick(60)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n screen.fill(WHITE)\n for obst in obstacles:\n obst.draw()\n target.draw()\n draw()\n move()\n pygame.display.flip()\n if (time.clock() -t > state*duration/(genome_size+1)):\n setSpeed(state)\n state += 1\n\n if (all_ships_out() or time.clock() - t > duration) and iteration > 0:\n iteration -= 1\n state = 0\n max_score_all_time = score(max_score_all_time)\n ships = select_ships()\n ships = duplicate()\n ships = mutations()\n ships = reinit()\n setSpeed(0)\n t = time.clock()\n # ships = gen_ships()\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"79371473","text":"import os\n\n#Parameters:\nks = ['4', '5', '6', '7', 'all']\nks = ['all']\nCs = ['1', '0.1', '2']\n#Cs = ['1', '10']\nkernels = ['rbf', 'sigmoid']\n#kernels = ['sigmoid']\ngammas = ['0.0', '1', '10']\n\ntrainset = '../../corpora/cwi_paetzold_training'\ntestset = '../../corpora/cwi_paetzold_testing'\nfor C in Cs:\n\tfor kernel in kernels:\n\t\tfor g in gammas:\n\t\t\tfor k in ks:\n\t\t\t\toutput = '../../labels/shardlow/labels_Shardlow_'+C+'_'+kernel+'_'+g+'_'+k\n\t\t\t\tcomm = 'nohup python Run_Shardlow.py '+trainset+' '+k+' '+C+' '+kernel+' 3 '+g+' 0.0 '+testset+' '+output+' &'\n\t\t\t\tos.system(comm)\n","sub_path":"cwi_separated_classes/scripts/identifiers/Run_All_Shardlow.py","file_name":"Run_All_Shardlow.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"245557223","text":"from scipy import signal\nimport scipy\nimport numpy as np\n\nNPERSEG = 1024 * 2\nL = 60\nNFFT = NPERSEG\n\n\ndef getTFFT(length, nperseg=NPERSEG):\n r = 1.0 / 8\n return (length - int(r * nperseg)) // (int(7 * r * nperseg))\n\n\ndef sp3(x, nfft=NFFT, nperseg=NPERSEG):\n Fs = 44100\n _, _, f = signal.spectrogram(x[:, 0], Fs, nfft=nfft, nperseg=nperseg)\n return f\n\n\ndef psp3(f):\n\n y = np.log10(f + 1)\n return y * 5e4\n\n y = 10 * np.log10(f + 1e-30) # 1e-10\n return y / 200.0 * 3 + 1.7 # ????????\n\n\ndef hz2mel(hz):\n \"\"\"Convert a value in Hertz to Mels\n :param hz: a value in Hz. This can also be a numpy array, conversion proceeds element-wise.\n :returns: a value in Mels. If an array was passed in, an identical sized array is returned.\n \"\"\"\n return 2595 * np.log10(1 + hz / 700.0)\n\n\ndef mel2hz(mel):\n \"\"\"Convert a value in Mels to Hertz\n :param mel: a value in Mels. This can also be a numpy array, conversion proceeds element-wise.\n :returns: a value in Hertz. If an array was passed in, an identical sized array is returned.\n \"\"\"\n return 700 * (10 ** (mel / 2595.0) - 1)\n\n\ndef get_filterbanks(nfilt=20, nfft=512, samplerate=16000, lowfreq=0, highfreq=None):\n \"\"\"Compute a Mel-filterbank. The filters are stored in the rows, the columns correspond\n to fft bins. The filters are returned as an array of size nfilt * (nfft/2 + 1)\n :param nfilt: the number of filters in the filterbank, default 20.\n :param nfft: the FFT size. Default is 512.\n :param samplerate: the samplerate of the signal we are working with. Affects mel spacing.\n :param lowfreq: lowest band edge of mel filters, default 0 Hz\n :param highfreq: highest band edge of mel filters, default samplerate/2\n :returns: A numpy array of size nfilt * (nfft/2 + 1) containing filterbank. Each row holds 1 filter.\n \"\"\"\n highfreq = highfreq or samplerate / 2\n assert highfreq <= samplerate / 2, \"highfreq is greater than samplerate/2\"\n\n lowmel = hz2mel(lowfreq)\n highmel = hz2mel(highfreq)\n melpoints = np.linspace(lowmel, highmel, nfilt + 2)\n bin = np.floor((nfft + 1) * mel2hz(melpoints) / samplerate)\n\n fbank = np.zeros([nfilt, nfft // 2])\n for j in range(0, nfilt):\n for i in range(int(bin[j]), int(bin[j + 1])):\n fbank[j, i] = (i - bin[j]) / (bin[j + 1] - bin[j])\n for i in range(int(bin[j + 1]), int(bin[j + 2])):\n fbank[j, i] = (bin[j + 2] - i) / (bin[j + 2] - bin[j + 1])\n return fbank\n\n\ndef create_mel_filter(\n fft_size, n_freq_components=64, start_freq=100 - 80, end_freq=1000 * 5, samplerate=44100\n):\n \"\"\"\n Creates a filter to convolve with the spectrogram to get out mels\n\n \"\"\"\n mel_inversion_filter = get_filterbanks(\n nfilt=n_freq_components,\n nfft=fft_size,\n samplerate=samplerate,\n lowfreq=start_freq,\n highfreq=end_freq,\n )\n mel_filter = mel_inversion_filter.T / mel_inversion_filter.sum(axis=1)\n\n return mel_filter, mel_inversion_filter\n\n\ndef make_mel(spectrogram, mel_filter, shorten_factor=1):\n mel_spec = np.transpose(mel_filter).dot(np.transpose(spectrogram))\n return mel_spec\n\n\nmel_filter, _ = create_mel_filter(NFFT, n_freq_components=L)\n\n\ndef shrink(magnitude, S):\n xi = np.linspace(0, magnitude.shape[0] - 1, S)\n magnitude = np.interp(xi, np.arange(magnitude.shape[0]), magnitude)\n return magnitude\n\n\ndef shrinkMel(x, mel_filter=mel_filter):\n mel = make_mel(x[1:].T, mel_filter)\n mel[np.isnan(mel)] = 0\n return mel\n\n\nclass AA(object):\n def __init__(self, nfft=NFFT, nperseg=NPERSEG):\n self.mel_filter = mel_filter\n self.nfft = nfft\n self.nperseg = nperseg\n\n def shrinkMel(self, x):\n if self.mel_filter.shape[0] != self.nperseg:\n mel_filter, _ = create_mel_filter(self.nfft, n_freq_components=L)\n return shrinkMel(x, mel_filter=mel_filter)\n\n def getTFFT(self, length):\n print(self.nperseg)\n return getTFFT(length, self.nperseg)\n\n def sp3(self, x):\n return sp3(x, nfft=self.nfft, nperseg=self.nperseg)\n","sub_path":"ailive/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"117273911","text":"#!/usr/bin/env python\nfrom __future__ import print_function, absolute_import\n\nimport logging\nimport logging.handlers\n\nimport os\nimport argparse\nimport sys\n\nimport pkg_resources\ntry:\n __version__ = pkg_resources.require(\"VermeerKAT\")[0].version\nexcept pkg_resources.DistributionNotFound:\n __version__ = \"dev\"\n\nPIPELINE_LOG = os.path.join(os.getcwd(), \"VermeerKAT.log\")\n\nclass DelayedFileHandler(logging.handlers.MemoryHandler):\n \"\"\"A DelayedFileHandler is a variation on the MemoryHandler. It will buffer up log\n entries until told to stop delaying, then dumps everything into the target file\n and from then on logs continuously. This allows the log file to be switched at startup.\"\"\"\n def __init__(self, filename, delay=True):\n logging.handlers.MemoryHandler.__init__(self, 100000, target=logging.FileHandler(filename))\n self._delay = delay\n\n def shouldFlush(self, record):\n return not self._delay\n\n def setFilename(self, filename, delay=False):\n self._delay = delay\n self.setTarget(logging.FileHandler(filename))\n if not delay:\n self.flush()\n\ndef create_logger():\n \"\"\" Create a console logger \"\"\"\n log = logging.getLogger(__name__)\n cfmt = logging.Formatter(\n ('%(name)s - %(asctime)s %(levelname)s - %(message)s'))\n log.setLevel(logging.DEBUG)\n\n filehandler = logging.FileHandler(PIPELINE_LOG)\n filehandler.setFormatter(cfmt)\n\n log.addHandler(filehandler)\n log.setLevel(logging.INFO)\n\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n console.setFormatter(cfmt)\n\n log.addHandler(console)\n return log, filehandler, console, cfmt\n\n# Create the log object\nlog, log_filehandler, log_console_handler, log_formatter = create_logger()\n\ndef remove_log_handler(hndl):\n log.removeHandler(hndl)\n\n\ndef add_log_handler(hndl):\n log.addHandler(hndl)\n\nNONCURSES = False","sub_path":"vermeerkat/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"50991425","text":"import numpy as np\nimport pickle as pkl\nimport bisect\n\n\ncwd = '/home/canbulguoglu/app'\n\n\nclass FunkSVD():\n def __init__(self, data):\n # Accepts pandas dataframe where column names are user_id, item_id and Rating\n with open(cwd+'/flask.p', 'rb') as f:\n myDicts = pkl.load(f)\n self.user_features = myDicts[0]\n self.item_features = myDicts[1]\n self.user_data = {}\n\n for each in data:\n if str(each['id']) in self.item_features:\n\n self.user_data[str(each['id'])] = float(each['rating'])/2\n\n def get_recommendation(self, howMany=20):\n\n user_predictions = self.__user_prediction_for_same_movies(\n self.user_data)\n # Find most most similar user_ids\n user_ids = FunkSVD.get_most_similar_users(\n self.user_data, user_predictions, 1)\n\n result_list = []\n # get user features for users who are most similar to given new user\n for user in user_ids:\n for item, item_feature in self.item_features.items():\n # predict ratings for most similar users\n prediction = np.dot(\n self.user_features[user], item_feature)\n bisect.insort(result_list, [prediction, item])\n\n return_list = []\n for pair in result_list:\n if len(return_list) >= 60:\n break\n if pair[1] in return_list:\n continue\n\n return_list.append(pair[1])\n np.random.shuffle(return_list)\n\n return return_list[:howMany]\n\n def __user_prediction_for_same_movies(self, user_ratings):\n result = {}\n for key in user_ratings:\n if key not in self.item_features:\n continue\n\n for user in self.user_features:\n result.setdefault(user, []).append(\n np.dot(self.user_features[user], self.item_features[key]))\n\n return result\n\n @staticmethod\n def mean_squared_difference(a, b):\n summation = 0\n n = len(a)\n for i in range(0, n):\n difference = a[i] - b[i]\n squared_difference = difference**2\n summation = summation + squared_difference\n MSE = summation/n\n\n return 1/MSE\n\n @staticmethod\n def get_most_similar_users(user_ratings, user_predictions, howMany):\n similarities = []\n\n for user, ratings in user_predictions.items():\n\n similarity = FunkSVD.mean_squared_difference(\n list(user_ratings.values()), ratings)\n\n similarities.append([user, similarity])\n\n similarities.sort(reverse=True, key=lambda x: x[1])\n\n return [each[0] for each in similarities[:howMany]]\n","sub_path":"new/app/funkrecommender.py","file_name":"funkrecommender.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"333444568","text":"import sqlite3\nconn = sqlite3.connect('test.db')\ncursor=conn.cursor()\n\n#sqlstr='insert into test values(1,\"helloworld\")'\n#cursor.execute(sqlstr)\nsqlstr='select * from test'\ndata=cursor.execute(sqlstr)\n#row=data.fetchone()\n#print(row)\nrows=data.fetchall()\nprint(rows)\n# for row in rows:\n# print(row)\nconn.commit()\nconn.close()\n","sub_path":"1.sqlite/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"278177041","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom scipy import misc\nimport sys\nimport os\nimport pickle\nimport argparse\n#import tensorflow as tf\nimport numpy as np\nimport mxnet as mx\nimport random\nimport cv2\nimport sklearn\nfrom sklearn.decomposition import PCA\nfrom time import sleep\nfrom easydict import EasyDict as edict\n# sys.path.append(os.path.join(os.path.dirname(__file__), 'src'))\nfrom mtcnn_detector import MtcnnDetector\nimport face_image\nimport face_preprocess\n\n\n\ndef do_flip(data):\n for idx in range(data.shape[0]):\n data[idx,:,:] = np.fliplr(data[idx,:,:])\n\ndef get_model(ctx, image_size, model_str, layer):\n _vec = model_str.split(',')\n assert len(_vec)==2\n prefix = _vec[0]\n epoch = int(_vec[1])\n print('---------*** Model loading ***-----------')\n # sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)\n model_path=os.path.join(os.path.dirname(__file__), '..', 'model')\n # # transform model to npy\n print(model_path)\n with open(os.path.join(model_path,'sym.pkl'), 'rb') as a_: # open file with write-mode\n # picklestring = pickle.dump(sym, a_)\n sym=pickle.load(a_)\n with open(os.path.join(model_path,'arg_params.pkl'), 'rb') as b_: # open file with write-mode\n # picklestring = pickle.dump(arg_params, b_)\n arg_params=pickle.load(b_)\n with open(os.path.join(model_path,'aux_params.pkl'), 'rb') as c_: # open file with write-mode\n # picklestring = pickle.dump(aux_params, c_)\n aux_params=pickle.load(c_)\n #end\n\n all_layers = sym.get_internals()\n sym = all_layers[layer+'_output']\n # print('sym',type(sym))\n # print('arg_params',type(arg_params))\n # print('aux_params',type(aux_params))\n model = mx.mod.Module(symbol=sym, context=ctx, label_names = None)\n #model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0], image_size[1]))], label_shapes=[('softmax_label', (args.batch_size,))])\n # print(image_size[0])\n model.bind(data_shapes=[('data', (1, 3, image_size[0], image_size[1]))])\n # print(arg_params,aux_params)\n model.set_params(arg_params, aux_params)\n return model\n\nclass FaceModel:\n def __init__(self, args):\n self.args = args\n ctx = mx.gpu(args.gpu)\n _vec = args.image_size.split(',')\n assert len(_vec)==2\n image_size = (int(_vec[0]), int(_vec[1]))\n self.model = None\n self.ga_model = None\n if len(args.model)>0:\n self.model = get_model(ctx, image_size, args.model, 'fc1')\n if len(args.ga_model)>0:\n self.ga_model = get_model(ctx, image_size, args.ga_model, 'fc1')\n\n # self.threshold = args.threshold\n self.det_minsize = 50\n self.det_threshold = [0.6,0.7,0.8]\n #self.det_factor = 0.9\n self.image_size = image_size\n mtcnn_path = os.path.join(os.path.dirname(__file__), '..','mtcnn-model')\n if args.det==0:\n # detector = MtcnnDetector(model_folder=mtcnn_path, ctx=ctx, num_worker=1, accurate_landmark = True, threshold=self.det_threshold)\n with open(os.path.join(mtcnn_path,'mtcnn_0.pkl'), 'rb') as d_: # open file with write-mode\n # picklestring = pickle.dump(detector, d_)\n detector=pickle.load(d_)\n else:\n # detector = MtcnnDetector(model_folder=mtcnn_path, ctx=ctx, num_worker=1, accurate_landmark = True, threshold=[0.0,0.0,0.2])\n with open(os.path.join(mtcnn_path,'mtcnn_1.pkl'), 'rb') as e_: # open file with write-mode\n # picklestring = pickle.dump(detector, e_)\n detector=pickle.load(e_)\n self.detector = detector\n\n\n def get_input(self, face_img):\n ret = self.detector.detect_face(face_img, det_type = self.args.det)\n if ret is None:\n return None\n bbox, points = ret\n if bbox.shape[0]==0:\n return None\n bbox = bbox[0,0:4]\n points = points[0,:].reshape((2,5)).T\n # print('bbox',bbox)\n # print('points',points)\n nimg = face_preprocess.preprocess(face_img, bbox, points, image_size='112,112')\n nimg = cv2.cvtColor(nimg, cv2.COLOR_BGR2RGB)\n aligned = np.transpose(nimg, (2,0,1))\n return aligned\n\n def get_feature(self, aligned):\n input_blob = np.expand_dims(aligned, axis=0)\n data = mx.nd.array(input_blob)\n db = mx.io.DataBatch(data=(data,))\n self.model.forward(db, is_train=False)\n embedding = self.model.get_outputs()[0].asnumpy()\n embedding = sklearn.preprocessing.normalize(embedding).flatten()\n return embedding\n\n def get_ga(self, aligned):\n input_blob = np.expand_dims(aligned, axis=0)\n data = mx.nd.array(input_blob)\n db = mx.io.DataBatch(data=(data,))\n self.ga_model.forward(db, is_train=False)\n ret = self.ga_model.get_outputs()[0].asnumpy()\n g = ret[:,0:2].flatten()\n gender = np.argmax(g)\n a = ret[:,2:202].reshape( (100,2) )\n a = np.argmax(a, axis=1)\n age = int(sum(a))\n\n return gender, age\n\n","sub_path":"src/pickle_face_model.py","file_name":"pickle_face_model.py","file_ext":"py","file_size_in_byte":4888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"343883688","text":"from django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.contrib.sitemaps import views\nfrom django.views.generic import TemplateView\n\nfrom questions.views import QuestionFeedView, QuestionDetailView\nfrom questions.sitemaps import StaticViewSitemap, QuestionSitemap\n\n\nsitemaps = {\n 'static': StaticViewSitemap(),\n 'question': QuestionSitemap()\n}\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'^comments/', include('django_comments.urls')),\n url('^', include('django.contrib.auth.urls')),\n url(r'^accounts/', include('accounts.urls')),\n url(r'^$', QuestionFeedView.as_view(), name='questions'),\n url(r'^question/(?P\\d+)/$',\n QuestionDetailView.as_view(),\n name='detail_question'),\n url(r'^new/$', TemplateView.as_view(\n template_name=\"questions/new_question.html\"),\n name='new_question'),\n url(r'^chat/$', TemplateView.as_view(\n template_name=\"questions/chat.html\"),\n name='chat'),\n url(r'^telefon/$', TemplateView.as_view(\n template_name=\"questions/telefon.html\"),\n name='telefon'),\n url(r'^forma/$', TemplateView.as_view(\n template_name=\"questions/forma_pr.html\"),\n name='forma'),\n url(r'^sitemap-(?P.+)\\.xml$', views.sitemap, {'sitemaps': sitemaps},\n name='django.contrib.sitemaps.views.sitemap'),\n url(r'^sitemap\\.xml$', views.index, {'sitemaps': sitemaps}),\n url(r'^robots.txt$', TemplateView.as_view(\n template_name='questions/robots.txt',\n content_type='text/plain'),)\n] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n\nif settings.DEBUG:\n import debug_toolbar\n urlpatterns = [\n url(r'^__debug__/', include(debug_toolbar.urls)),\n ] + urlpatterns\n","sub_path":"urist/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"182362048","text":"import random\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom sklearn import datasets, linear_model\r\nfrom sklearn.metrics import mean_squared_error, r2_score\r\nfrom matplotlib.font_manager import FontProperties\r\nfont = FontProperties(fname=r\"c:\\windows\\fonts\\msyh.ttc\", size=15)\r\nlist=[]\r\nlist2=[]\r\nfor i in range(60):\r\n list.append([i])\r\nfor i in range(60):\r\n list2.append([2*i+random.randint(-10,10)])\r\nx=tuple(list)\r\ny=tuple(list2)\r\ngreat = linear_model.LinearRegression()#引入线性回归模型\r\ngreat.fit(x,y)#将输入进来的数字转换为矩阵模式\r\n#现在我们开始用plot画出线性方程,计算出0处以及我们的最大值点出预测出来的y的大小\r\n\r\nplt.scatter(x,y)\r\nplt.title(\"机器学习之:线性回归\\n制作人:宋谨岑\",fontproperties=font)\r\nplt.plot([0,60],[1.73825137,great.predict(60)],linewidth=3,color=\"black\")\r\nplt.show()\r\nprint(great.predict(0))\r\nprint(great.predict(60))\r\nprint(great.predict(60))#这里输入的是x,这样就会将矩阵模式线性回归出y的值的大小\r\n#predict仅仅对我们的已经应用fit方法转换过的矩阵模式有效.\r\n","sub_path":"高考完后写的Python代码/线性回归.py","file_name":"线性回归.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"495972588","text":"\n########Backend Utility Libs#####\nfrom django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.http import HttpResponse\n\nimport django.http\nimport json\n################################\n\n########Data Science and Graph libs######\nimport pandas as pd\nimport networkx\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n########Normal Graph############\nfrom bokeh.models import Range1d, Circle, ColumnDataSource, MultiLine\nfrom bokeh.plotting import figure\nfrom bokeh.models.graphs import from_networkx\nfrom bokeh.palettes import Category10\nfrom bokeh.transform import linear_cmap\nfrom bokeh.embed import json_item\n\n\n############Chord################\nimport numpy as np\n \nfrom chord import Chord\n########################################\n\n\n\n\n############Filtering###############\ndef filterDataByTime(request, data):\n startDate = request.POST.get(\"start_date\", '0000-00-00')\n endDate = request.POST.get(\"end_date\", '9999-99-99')\n return data[ ((data[\"date\"]>=startDate) & (data[\"date\"] <= endDate)) ]\n\ndef filterDataByJobtitle(request, data):\n if not 'job_titles' in request.POST: return data\n\n fromMask = data[\"fromJobtitle\"] == '___'\n toMask = data[\"toJobtitle\"] == '___'\n\n for i in request.POST.get(\"job_titles\").split(','):\n fromMask |= (data[\"fromJobtitle\"] == i)\n toMask |= (data[\"toJobtitle\"] == i)\n\n return data[(fromMask & toMask)]\n\ndef filterDataBySentiment(request,data):\n mask = data[\"sentiment\"] == 10\n filterSelected = False\n if 'sentiment_negative' in request.POST:\n mask |= (data[\"sentiment\"] <= -0.1)\n filterSelected = True\n if 'sentiment_neutral' in request.POST:\n mask |= ((data[\"sentiment\"] >= -0.1) & (data[\"sentiment\"] <= 0.1))\n filterSelected = True\n if 'sentiment_positive' in request.POST:\n mask |= (data[\"sentiment\"] >= 0.1)\n filterSelected = True\n if (filterSelected):\n print(len(data))\n print(len(data[mask]))\n return data[mask]\n return data\n\ndef filterDataByPerson(request,data): #used for other purposes not necessarily in filtering\n personID = request.POST.get(\"personID\")\n return data[ ( (data[\"fromId\"] == personID) | (data[\"toId\"] == personID) ) ]\n\ndef filterDataByEmailAddress(request,data):\n email = request.POST.get(\"email\")\n return data[ ( (data[\"fromEmail\"] == email) | (data[\"toEmail\"] == email) ) ]\n\n\"\"\"\ndef filter(request,data): #full filtering\n data = filterDataByTime(request, data)\n data = filterDataByJobtitle(request, data)\n data = filterDataBySentiment(request, data)\n data = filterDataByEmailAddress(request, data)\n # compound with more filtering options\n \n return data \n\"\"\"\n\ndef filter(request,data): #full filtering\n finalData = filterDataByTime(request, data)\n finalData = filterDataByJobtitle(request, finalData)\n finalData = filterDataBySentiment(request, finalData)\n #return filterDataByJobtitles(request, finalData) \n return finalData\n\n################################################################\n\ndef index(request):\n return render(request, 'index.html')\n\ndef makeGraph(request, df_enron):\n G = networkx.from_pandas_edgelist(df_enron, 'fromId', 'toId', edge_attr=True)\n\n di = {'CEO':1,'Director':2,'Employee':3,'In House Lawyer':4,'Manager':5,'Managing Director':6,'President':7,'Trader':8,'Unknown':9,'Vice President':10}\n df_rejob = df_enron.replace({\"fromJobtitle\": di})\n df_attributes = df_enron[['fromId', 'fromJobtitle', 'fromEmail']].drop_duplicates()\n df_attributes.columns = ['fromId', 'job', 'fromEmail']\n df_attributesx = df_rejob[['fromId', 'fromJobtitle', 'fromEmail']].drop_duplicates()\n job = df_attributes.set_index('fromId').to_dict('i')\n jobx = df_attributesx.set_index('fromId').to_dict('i')\n fromEmail = df_attributes.set_index('fromEmail').to_dict('i')\n networkx.set_node_attributes(G, job)\n networkx.set_node_attributes(G, jobx)\n networkx.set_node_attributes(G, fromEmail)\n #jobs = ['Employee','Vice President','Unknown','Manager','CEO','Trader','Director','President','Managing Director','In House Lawyer']\n\n degrees = dict(networkx.degree(G))\n networkx.set_node_attributes(G, name='degree', values=degrees)\n adjusted_node_size = dict([(node, (degree + 5) - ((degree + 5)*0.3) ) for node, degree in networkx.degree(G)])\n networkx.set_node_attributes(G, name='adjusted_node_size', values=adjusted_node_size)\n\n size_by_this_attribute = 'adjusted_node_size'\n color_by_this_attribute = 'fromJobtitle'\n\n color_palette = Category10[10]\n\n TOOLTIPS = [\n (\"Person ID\", \"@index\"),\n (\"Email\", \"@fromEmail\"),\n (\"people communicated with\", \"@degree\"),\n (\"Jobtitle\",\"@job\"),\n ]\n\n graph_size = int(request.POST.get('graph_size', '720'))\n plot = figure(tooltips = TOOLTIPS,\n tools=\"pan,zoom_in,wheel_zoom,save,reset,box_select,undo\", active_scroll='wheel_zoom',\n x_range=Range1d(-20,20), y_range=Range1d(-20,20), title='Enron Emails',\n plot_width=graph_size, plot_height=graph_size)\n plot.axis.visible = False\n\n N_graph = from_networkx(G, networkx.spring_layout, scale=100)\n\n N_graph.node_renderer.glyph = Circle(size=size_by_this_attribute,\n fill_color=linear_cmap(color_by_this_attribute, color_palette, 1, 10))\n\n N_graph.edge_renderer.glyph = MultiLine(line_alpha=10, line_width=1)\n\n plot.renderers.append(N_graph)\n\n item_text = json.dumps(json_item(plot))\n\n return item_text\n # import holoviews as hv\n # from holoviews import opts, dim\n # import networkx as nx\n # import dask.dataframe as dd\n # from holoviews.selection import link_selections\n # from holoviews.operation.datashader import (\n # datashade, dynspread, directly_connect_edges, bundle_graph, stack\n # )\n # from holoviews.element.graphs import layout_nodes\n # from datashader.layout import random_layout\n # from colorcet import fire\n # import pandas as pd\n # import networkx\n # import matplotlib.pyplot as plt\n # import numpy as np\n # from bokeh.plotting import figure\n # from bokeh.resources import CDN\n # from bokeh.embed import file_html\n\n # hv.extension('bokeh')\n # df_chord = df_enron.sort_values('fromJobtitle')\n # df_chord['index'] = df_chord.index\n # df_links = df_chord.groupby(['fromId', 'toId']).count()\n # df_links = df_links.reset_index()[['fromId','toId', 'date']]\n # df_links.columns = ['source', 'target', 'value']\n # x = df_chord[['fromId', 'fromJobtitle']].drop_duplicates()\n # x.columns = ['source', 'fromJobtitle']\n\n # df_links = pd.merge(df_links, x, on=\"source\")\n # df_nodes = df_chord[['fromId','fromEmail', 'fromJobtitle']].drop_duplicates().reset_index(drop=True)\n # df_nodes.columns = ['index', 'name', 'group']\n # df_nodes.sort_values('name')\n # y = df_chord[['fromId', 'toId']].drop_duplicates().groupby(['fromId']).count().reset_index()\n # y.columns = ['index', 'sizeOut']\n # y['sizeIn'] = df_chord[['fromId', 'toId']].drop_duplicates().groupby(['toId']).count().reset_index()[['fromId']]\n # y['size'] = y['sizeIn'] + y['sizeOut']\n # df_nodes = pd.merge(df_nodes, y, on='index')\n # df_nodes['size2'] = df_nodes['size']/3+8\n # from bokeh.models import Circle\n\n # nodes = hv.Dataset(df_nodes, 'index')\n # edge_df = df_links\n\n # eb_graph = hv.Graph((edge_df, nodes))\n\n # T_graph = layout_nodes(eb_graph, layout=nx.spring_layout)\n # #B_graph_3 = bundle_graph(T_graph)\n # from bokeh.models import HoverTool\n # TOOLTIPS = [\n # (\"Person ID\", \"@index\"),\n # (\"people communicated with\", \"@size\"),\n # (\"Jobtitle\",\"@group\"),\n # ]\n # hover = HoverTool(tooltips=TOOLTIPS)\n # graph_size = int(request.POST.get('graph_size', '720'))\n # #B_graph_3.options(node_color='group', cmap='Category20', node_size='size2', show_legend=True, tools=[hover],frame_width=graph_size, frame_height=graph_size)\n # T_graph.options(node_color='group', cmap='Category20', node_size='size2', show_legend=True, tools=[hover],frame_width=graph_size, frame_height=graph_size)\n\n # # # json_graph = json_item(B_graph_3)\n\n # # json_graph = json_item(T_graph)\n # # item_text = json.dumps(json_graph)\n\n # # return item_text\n\n # renderer = hv.renderer('bokeh')\n # plot = renderer.get_plot(T_graph)\n\n # return file_html(plot, CDN, \"Plot\")\n\ndef fullSizeGraph(request):\n \n graph_json = makeGraph(request, filter(request,pd.read_csv(request.FILES['csv_data'])))\n # return django.http.JsonResponse(graph_json, safe=False)\n return JsonResponse({\n 'graph': graph_json\n })\n\ndef initialFullSizeGraph(request):\n \n df_dataset = pd.read_csv(request.FILES['csv_data'])\n \n startDate = df_dataset[\"date\"].min()\n endDate = df_dataset[\"date\"].max()\n\n startYear = int(startDate[:4])\n endYear = int(endDate[:4])\n\n startMonth = int(startDate[5:7])\n endMonth = int(startDate[5:7])\n\n jobTitles = df_dataset.fromJobtitle.unique().tolist()\n\n graph_json = makeGraph(request, df_dataset)\n\n return JsonResponse({\n 'graph': graph_json,\n 'parameters': {\n 'timeSlider': {\n 'startYear': startYear,\n 'startMonth': startMonth,\n 'endYear': endYear,\n 'endMonth': endMonth\n },\n 'jobTitles': jobTitles\n }\n })\n\ndef chordDiagram(person_id, df_enron):\n import holoviews as hv\n from holoviews import opts\n from bokeh.resources import CDN\n from bokeh.embed import file_html\n\n hv.extension('bokeh')\n\n df_chord = df_enron.sort_values('fromJobtitle')\n df_chord['index'] = df_chord.index\n\n df_links = df_chord.groupby(['fromId', 'toId']).agg({'date':'count', 'sentiment':'mean'})\n df_links = df_links.reset_index()[['fromId','toId', 'date', 'sentiment']]\n df_links.columns = ['source', 'target', 'value', 'sentiment']\n\n x = df_chord[['fromId', 'fromJobtitle']].drop_duplicates()\n x.columns = ['source', 'fromJobtitle']\n\n df_links = pd.merge(df_links, x, on=\"source\")\n df_links.drop_duplicates(subset='source')\n\n df_nodes = df_chord[['fromId','fromEmail', 'fromJobtitle']].drop_duplicates().reset_index(drop=True)\n df_nodes.columns = ['index', 'name', 'group']\n df_nodes.sort_values('name')\n y = df_chord[['fromId', 'toId']].drop_duplicates().groupby(['fromId']).count().reset_index()\n y.columns = ['index', 'size']\n df_nodes = pd.merge(df_nodes, y, on='index')\n df_nodes['size'] = df_nodes['size']/3+8\n\n nodes = hv.Dataset(df_nodes, 'index')\n edge_df = df_links\n\n import seaborn as sns # also improves the look of plots\n sns.set() # set Seaborn defaults\n\n chord = hv.Chord((df_links, nodes)).select(value=(5, None))\n chord.opts(\n opts.Chord(cmap='Category20', edge_cmap='Category20', edge_color='sentiment', \n labels='name', node_color='group', edge_alpha=0.8, edge_line_width=1.5))\n\n final_chord = chord.select(index=person_id)\n\n plot = hv.render(final_chord, backend='bokeh')\n item_text = json.dumps(json_item(plot))\n return item_text\n\n # renderer = hv.renderer('bokeh')\n # plot = renderer.get_plot(final_chord).state\n # return file_html(plot, CDN, \"Plot\")\n\ndef individualInfo(request):\n\n # import matplotlib.pyplot as plt\n\n # plt.rcParams['figure.figsize'] = [10, 5] # default hor./vert. size of plots, in inches\n # plt.rcParams['lines.markeredgewidth'] = 1 # to fix issue with seaborn box plots; needed after import seaborn\n\n # # reveal a hint only while holding the mouse down\n # from IPython.display import HTML\n # HTML(\"\")\n\n # # hide FutureWarnings, which may show for Seaborn calls in most recent Anaconda\n # import warnings\n # warnings.filterwarnings(\"ignore\", category=FutureWarning)\n\n person_id = int(request.POST['person_id'])\n\n df_enron = pd.read_csv(request.FILES['csv_data'])\n Person_ID_1, ID_mail, job_title, mails_send, mean_sentiment_send, min_sentiment_send, max_sentiment_send, mails_received, mean_sentiment_received, min_sentiment_received, max_sentiment_received, array_mails_sent, array_mails_received, p_most_received_emails, most_received_emails_nr, p_most_sent_emails, most_sent_emails_nr = getIndividualInfoInner(df_enron, person_id)\n \n df_enron_tf = filter(request,df_enron)\n Person_ID_1_tf, ID_mail_tf, job_title_tf, mails_send_tf, mean_sentiment_send_tf, min_sentiment_send_tf, max_sentiment_send_tf, mails_received_tf, mean_sentiment_received_tf, min_sentiment_received_tf, max_sentiment_received_tf, array_mails_sent_tf, array_mails_received_tf, p_most_received_emails_tf, most_received_emails_nr_tf, p_most_sent_emails_tf, most_sent_emails_nr_tf = getIndividualInfoInner(df_enron_tf, person_id)\n\n chord = chordDiagram(person_id, df_enron)\n\n #Person_ID_1, ID_mail, job_title, mails_send, mean_sentiment_send, min_sentiment_send, max_sentiment_send, mails_received, mean_sentiment_received, min_sentiment_received, max_sentiment_received\n return JsonResponse({\n 'meta': {\n 'person_id': str(Person_ID_1),\n 'mail_address': str(ID_mail),\n 'job_title': str(job_title),\n },\n 'all_time': {\n 'mails_sent': str(mails_send),\n 'min_sentiment_sent': str(min_sentiment_send),\n 'mean_sentiment_sent': str(mean_sentiment_send),\n 'max_sentiment_sent': str(max_sentiment_send),\n 'array_mails_sent': array_mails_sent,\n 'mails_received': str(mails_received),\n 'min_sentiment_received': str(min_sentiment_received),\n 'mean_sentiment_received': str(mean_sentiment_received),\n 'max_sentiment_received': str(max_sentiment_received),\n 'array_mails_received': array_mails_received,\n 'person_most_emails_received' : str(p_most_received_emails),\n 'number_received' : str(most_received_emails_nr),\n 'person_most_emails_sent' : str(p_most_sent_emails),\n 'number_sent' : str(most_sent_emails_nr),\n\n },\n 'time_filtered': {\n 'mails_sent': str(mails_send_tf),\n 'min_sentiment_sent': str(min_sentiment_send_tf),\n 'mean_sentiment_sent': str(mean_sentiment_send_tf),\n 'max_sentiment_sent': str(max_sentiment_send_tf),\n 'array_mails_sent': array_mails_sent_tf,\n 'mails_received': str(mails_received_tf),\n 'min_sentiment_received': str(min_sentiment_received_tf),\n 'mean_sentiment_received': str(mean_sentiment_received_tf),\n 'max_sentiment_received': str(max_sentiment_received_tf),\n 'array_mails_received': array_mails_received_tf,\n 'person_most_emails_received' : str(p_most_received_emails_tf),\n 'number_received' : str(most_received_emails_nr_tf),\n 'person_most_emails_sent' : str(p_most_sent_emails_tf),\n 'number_sent' : str(most_sent_emails_nr_tf),\n },\n 'chord': chord\n })\n\ndef getIndividualInfoInner(df_enron, person_id):\n person_send = df_enron['fromId'] == person_id\n person_received = df_enron['toId'] == person_id\n df_1 = df_enron[person_send]\n df_2 = df_1[['fromEmail']]\n df_3 = df_2.describe()\n ID_mail = df_3['fromEmail']['top']\n df_describe_person = df_1[['fromJobtitle']].describe()\n job_title = df_describe_person['fromJobtitle']['top']\n mails_send = df_1['sentiment'].count()\n mean_sentiment_send = df_1['sentiment'].mean()\n min_sentiment_send = df_1['sentiment'].min()\n max_sentiment_send = df_1['sentiment'].max()\n df_received = df_enron[person_received]\n mails_received = df_received['sentiment'].count()\n mean_sentiment_received = df_received['sentiment'].mean()\n min_sentiment_received = df_received['sentiment'].min()\n max_sentiment_received = df_received['sentiment'].max()\n emails_sent = 'none'\n\n\n \n\n df_person = df_enron[person_send | person_received]\n person = df_person.groupby([\"fromId\"])[[\"fromEmail\"]].count().sort_values(by = \"fromEmail\", ascending = False).iloc[[0]]\n\n person_with_most_received_emails = person.index.values[0]\n nr_received_emails = person.values[0][0]\n\n person = df_person.groupby([\"toId\"])[[\"toEmail\"]].count().sort_values(by = \"toEmail\", ascending = False).iloc[[0]]\n\n person_with_most_sent_emails = person.index.values[0]\n nr_sent_emails = person.values[0][0]\n\n try:\n df_emails_sent_1 = df_1.groupby('toId').describe()\n df_emails_sent_2 = df_emails_sent_1['fromId']\n emails_sent = df_emails_sent_2[['count']].to_json()\n except:\n pass\n emails_received = 'none'\n try:\n emails_received_1 = df_received.groupby('fromId').describe()\n emails_received_2 = emails_received_1['toId']\n emails_received = emails_received_2[['count']].to_json()\n except:\n pass\n return person_id, ID_mail, job_title, mails_send, mean_sentiment_send, min_sentiment_send, max_sentiment_send, mails_received, mean_sentiment_received, min_sentiment_received, max_sentiment_received, emails_sent, emails_received, person_with_most_received_emails, nr_received_emails, person_with_most_sent_emails, nr_sent_emails\n #from bokeh.io import output_notebook, show, save\n","sub_path":".history/backend/views_20210626185437.py","file_name":"views_20210626185437.py","file_ext":"py","file_size_in_byte":17445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"488334820","text":"#!/usr/bin/env python\n#\n# saveperspective.py - The SavePerspectiveAction\n#\n# Author: Paul McCarthy \n#\n\"\"\"This module provides the :class:`SavePerspectiveAction` class, an action\nwhich allows the user to save the current perspective.\n\"\"\"\n\n\nimport fsleyes.strings as strings\nimport fsleyes.perspectives as perspectives\nfrom . import base\n\n\nclass SavePerspectiveAction(base.Action):\n \"\"\"The ``SavePerspectiveAction`` allows the user to save the current\n :class:`.FSLeyesFrame` layout as a perspective, so it can be restored\n at a later time. See the :mod:`.perspectives` module.\n \"\"\"\n\n def __init__(self, frame):\n \"\"\"Create a ``SavePerspectiveAction``.\n\n :arg frame: The :class:`.FSLeyesFrame`.\n \"\"\"\n\n self.__frame = frame\n\n base.Action.__init__(self, self.__savePerspective)\n\n\n def __savePerspective(self):\n \"\"\"Save the current :class:`.FSLeyesFrame` layout as a perspective.\n The user is prompted to enter a name, and the current frame layout\n is saved via the :func:`.perspectives.savePerspective` function.\n \"\"\"\n\n import wx\n\n builtIns = list(perspectives.BUILT_IN_PERSPECTIVES.keys())\n saved = perspectives.getAllPerspectives()\n\n while True:\n dlg = wx.TextEntryDialog(\n self.__frame,\n message=strings.messages[self, 'enterName'])\n\n if dlg.ShowModal() != wx.ID_OK:\n return\n\n name = dlg.GetValue()\n\n if name.strip() == '':\n return\n\n # Not allowed to use built-in perspective names\n if name in builtIns:\n dlg = wx.MessageDialog(\n self.__frame,\n message=strings.messages[\n self, 'nameIsBuiltIn'].format(name),\n style=(wx.ICON_EXCLAMATION | wx.OK))\n dlg.ShowModal()\n continue\n\n # Name collision - confirm overwrite\n if name in saved:\n dlg = wx.MessageDialog(\n self.__frame,\n message=strings.messages[\n self, 'confirmOverwrite'].format(name),\n style=(wx.ICON_QUESTION | wx.YES_NO | wx.NO_DEFAULT))\n\n if dlg.ShowModal() == wx.ID_NO:\n continue\n\n break\n\n perspectives.savePerspective(self.__frame, name)\n\n self.__frame.refreshPerspectiveMenu()\n","sub_path":"fsleyes/actions/saveperspective.py","file_name":"saveperspective.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"643828540","text":"from abc import ABC, abstractmethod\r\nimport numpy as np\r\nfrom numpy import sin, cos, pi\r\n\r\ng = 9.81\r\n\r\n\r\nclass Bridge:\r\n def __init__(self, length, moment):\r\n self.length = length\r\n self.moment = moment\r\n self.angle = 0\r\n self.vel = 0\r\n self.acc = 0\r\n self.fulcrum = np.array([0, 0])\r\n self.bots = []\r\n self.tipped = False\r\n\r\n def add_robot(self, robot):\r\n self.bots.append(robot)\r\n\r\n def next_frame(self, delta_time=0.05):\r\n last_acc = self.acc\r\n last_vel = self.vel\r\n last_angle = self.angle\r\n\r\n total_torque = 0\r\n total_inertia = self.moment\r\n for i in self.bots:\r\n i.update(self.angle, self.vel, delta_time=delta_time)\r\n if abs(i.pos) < abs(self.length / 2):\r\n torque = g*i.mass*i.pos*cos(self.angle)\r\n total_torque += torque\r\n inertia = i.mass*i.pos**2\r\n total_inertia += inertia\r\n\r\n self.acc = total_torque / total_inertia\r\n self.vel = self.acc*delta_time+last_vel\r\n self.angle = .5*(self.vel+last_vel)*delta_time+last_angle\r\n\r\n if abs(self.angle) > pi/2:\r\n self.tipped = True\r\n\r\n def sim_time(self, time, delta_time=0.05):\r\n t = []\r\n ang = []\r\n vel = []\r\n acc = []\r\n pos = []\r\n pos2 = []\r\n rvel = []\r\n for i in np.arange(0, time, delta_time):\r\n if not self.tipped:\r\n self.next_frame(delta_time)\r\n t.append(i)\r\n ang.append(self.angle)\r\n vel.append(self.vel)\r\n acc.append(self.acc)\r\n pos.append(self.bots[0].pos)\r\n pos2.append(self.bots[1].pos)\r\n rvel.append(self.bots[0].vel)\r\n # print(i, self.angle, self.vel, self.acc, self.bots[0].pos, sep='\\t')\r\n\r\n return t, ang, vel, acc, pos, pos2\r\n\r\n def get_bots(self):\r\n pos = []\r\n vel = []\r\n\r\n for i in self.bots:\r\n pos.append(i.pos)\r\n vel.append(i.vel)\r\n\r\n return pos, vel\r\n\r\n\r\nclass Robot(ABC):\r\n def __init__(self, mass, pos, vel, acc):\r\n \"\"\"\r\n Dynamic Load for the bridge\r\n :param mass: Mass (kg)\r\n :param pos: Origin is at fulcrum, right is positive (m)\r\n :param vel: Field oriented velocity (m/s)\r\n :param acc: Field oriented acceleration (m/s^2)\r\n :param friction: Coefficient of kinetic friction\r\n \"\"\"\r\n self.mass = mass\r\n self.pos = pos\r\n self.vel = vel\r\n self.acc = acc\r\n\r\n def step_vel(self, new_vel, delta_time=0.05):\r\n last_vel = self.vel\r\n self.vel = new_vel\r\n self.pos += .5*(self.vel+last_vel)*delta_time\r\n\r\n def step_acc(self, new_acc, delta_time=0.05):\r\n self.acc = new_acc\r\n new_vel = self.vel + self.acc*delta_time\r\n self.step_vel(new_vel, delta_time=delta_time)\r\n\r\n @abstractmethod\r\n def update(self, angle, vel, delta_time=.05):\r\n pass\r\n","sub_path":"src/antares/labyrinth/simulation/bridge.py","file_name":"bridge.py","file_ext":"py","file_size_in_byte":3057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"119846785","text":"import socket\nimport threading\nimport threaded\n\nHOST = socket.gethostbyname(socket.gethostname())\nPORT = 4000\n\n# \"with\" look like \"try-finally\"\n# socket(ADDRESS_FAMILY, SOCKET_TYPE)\n# ADRESS_FAMILY -> AF_NET (default): IPV4\n# -> AF_INET6: IPV6\n# SOCKET_TYPE -> SOCK_STREAM (default): TCP \n# -> SOCK_DGRAM: UDP\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as tcp:\n # used for defining the communication end point (socket), associating the socket with a address\n # bind(ADDRESS_FAMILY(AF_NET), Port)\n # socket.gethostname() returns the hostname's machine\n\n print(f'Try to connect: {HOST}:{PORT}')\n\n tcp.bind((HOST, PORT))\n\n # define the number of pending connections the queue will hold\n # listen(BACKLOG)\n # socket.SOMAXCONN is the maximum backlog value that the \"socket.listen\" can allow by system\n tcp.listen(socket.SOMAXCONN)\n while True:\n # if input(\"Write 'exit' or 'quit' for close server...\\n\") == 'exit' or 'quit':\n # print('\\033[1;30;41m Good bye! \\033[m')\n # break\n\n # return a new socket representing the connection, and the address of the client\n (conn, addr) = tcp.accept()\n\n threading.Thread(target=threaded.Thread, args=(conn,)).start()\ntcp.close()\n","sub_path":"server_side.py","file_name":"server_side.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"289665632","text":"# Lint as: python3\n# Copyright 2018, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A library of transformations that can be applied to a computation.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\n\nimport six\nfrom six.moves import range\n\nfrom tensorflow_federated.python.common_libs import anonymous_tuple\nfrom tensorflow_federated.python.common_libs import py_typecheck\nfrom tensorflow_federated.python.core.api import computation_types\nfrom tensorflow_federated.python.core.impl import compiled_computation_transforms\nfrom tensorflow_federated.python.core.impl import computation_building_blocks\nfrom tensorflow_federated.python.core.impl import computation_constructing_utils\nfrom tensorflow_federated.python.core.impl import context_stack_base\nfrom tensorflow_federated.python.core.impl import federated_computation_utils\nfrom tensorflow_federated.python.core.impl import intrinsic_defs\nfrom tensorflow_federated.python.core.impl import transformation_utils\n\n\ndef extract_intrinsics(comp):\n r\"\"\"Extracts intrinsics to the scope which binds any variable it depends on.\n\n This transform traverses `comp` postorder, matches the following pattern, and\n replaces the following computation containing a called intrinsic:\n\n ...\n \\\n Call\n / \\\n Intrinsic ...\n\n with the following computation containing a block with the extracted called\n intrinsic:\n\n Block\n / \\\n [x=Call] ...\n / \\ \\\n Intrinsic ... Ref(x)\n\n The called intrinsics are extracted to the scope which binds any variable the\n called intrinsic depends. If the called intrinsic is not bound by any\n computation in `comp` it will be extracted to the root. Both the\n `parameter_name` of a `computation_building_blocks.Lambda` and the name of any\n variable defined by a `computation_building_blocks.Block` can affect the scope\n in which a reference in called intrinsic is bound.\n\n NOTE: This function will also extract blocks to the scope in which they are\n bound because block variables can restrict the scope in which intrinsics are\n bound.\n\n Args:\n comp: The computation building block in which to perform the extractions.\n The names of lambda parameters and block variables in `comp` must be\n unique.\n\n Returns:\n A new computation with the transformation applied or the original `comp`.\n\n Raises:\n TypeError: If types do not match.\n ValueError: If `comp` contains variables with non-unique names.\n \"\"\"\n py_typecheck.check_type(comp,\n computation_building_blocks.ComputationBuildingBlock)\n _check_has_unique_names(comp)\n name_generator = computation_constructing_utils.unique_name_generator(comp)\n unbound_references = _get_unbound_references(comp)\n\n def _contains_unbound_reference(comp, names):\n \"\"\"Returns `True` if `comp` contains unbound references to `names`.\n\n This function will update the non-local `unbound_references` captured from\n the parent context if `comp` is not contained in that collection. This can\n happen when new computations are created and added to the AST.\n\n Args:\n comp: The computation building block to test.\n names: A Python string or a list, tuple, or set of Python strings.\n \"\"\"\n if isinstance(names, six.string_types):\n names = (names,)\n if comp not in unbound_references:\n references = _get_unbound_references(comp)\n unbound_references.update(references)\n return any(n in unbound_references[comp] for n in names)\n\n def _is_called_intrinsic_or_block(comp):\n \"\"\"Returns `True` if `comp` is a called intrinsic or a block.\"\"\"\n return (_is_called_intrinsic(comp) or\n isinstance(comp, computation_building_blocks.Block))\n\n def _should_transform(comp):\n \"\"\"Returns `True` if `comp` should be transformed.\n\n The following `_extract_intrinsic_*` methods all depend on being invoked\n after `_should_transform` evaluates to `True` for a given `comp`. Because of\n this certain assumptions are made:\n\n * transformation functions will transform a given `comp`\n * block variables are guaranteed to not be empty\n\n Args:\n comp: The computation building block in which to test.\n \"\"\"\n if isinstance(comp, computation_building_blocks.Block):\n return (_is_called_intrinsic_or_block(comp.result) or any(\n isinstance(e, computation_building_blocks.Block)\n for _, e in comp.locals))\n elif isinstance(comp, computation_building_blocks.Call):\n return _is_called_intrinsic_or_block(comp.argument)\n elif isinstance(comp, computation_building_blocks.Lambda):\n if _is_called_intrinsic(comp.result):\n return True\n if isinstance(comp.result, computation_building_blocks.Block):\n for index, (_, variable) in enumerate(comp.result.locals):\n names = [n for n, _ in comp.result.locals[:index]]\n if (not _contains_unbound_reference(variable, comp.parameter_name) and\n not _contains_unbound_reference(variable, names)):\n return True\n elif isinstance(comp, computation_building_blocks.Selection):\n return _is_called_intrinsic_or_block(comp.source)\n elif isinstance(comp, computation_building_blocks.Tuple):\n return any(_is_called_intrinsic_or_block(e) for e in comp)\n return False\n\n def _extract_from_block(comp):\n \"\"\"Returns a new computation with all intrinsics extracted.\"\"\"\n if _is_called_intrinsic(comp.result):\n called_intrinsic = comp.result\n name = six.next(name_generator)\n variables = comp.locals\n variables.append((name, called_intrinsic))\n result = computation_building_blocks.Reference(\n name, called_intrinsic.type_signature)\n return computation_building_blocks.Block(variables, result)\n elif isinstance(comp.result, computation_building_blocks.Block):\n return computation_building_blocks.Block(comp.locals + comp.result.locals,\n comp.result.result)\n else:\n variables = []\n for name, variable in comp.locals:\n if isinstance(variable, computation_building_blocks.Block):\n variables.extend(variable.locals)\n variables.append((name, variable.result))\n else:\n variables.append((name, variable))\n return computation_building_blocks.Block(variables, comp.result)\n\n def _extract_from_call(comp):\n \"\"\"Returns a new computation with all intrinsics extracted.\"\"\"\n if _is_called_intrinsic(comp.argument):\n called_intrinsic = comp.argument\n name = six.next(name_generator)\n variables = ((name, called_intrinsic),)\n result = computation_building_blocks.Reference(\n name, called_intrinsic.type_signature)\n else:\n block = comp.argument\n variables = block.locals\n result = block.result\n call = computation_building_blocks.Call(comp.function, result)\n block = computation_building_blocks.Block(variables, call)\n return _extract_from_block(block)\n\n def _extract_from_lambda(comp):\n \"\"\"Returns a new computation with all intrinsics extracted.\"\"\"\n if _is_called_intrinsic(comp.result):\n called_intrinsic = comp.result\n name = six.next(name_generator)\n variables = ((name, called_intrinsic),)\n ref = computation_building_blocks.Reference(\n name, called_intrinsic.type_signature)\n if not _contains_unbound_reference(comp.result, comp.parameter_name):\n fn = computation_building_blocks.Lambda(comp.parameter_name,\n comp.parameter_type, ref)\n return computation_building_blocks.Block(variables, fn)\n else:\n block = computation_building_blocks.Block(variables, ref)\n return computation_building_blocks.Lambda(comp.parameter_name,\n comp.parameter_type, block)\n else:\n block = comp.result\n extracted_variables = []\n retained_variables = []\n for name, variable in block.locals:\n names = [n for n, _ in retained_variables]\n if (not _contains_unbound_reference(variable, comp.parameter_name) and\n not _contains_unbound_reference(variable, names)):\n extracted_variables.append((name, variable))\n else:\n retained_variables.append((name, variable))\n if retained_variables:\n result = computation_building_blocks.Block(retained_variables,\n block.result)\n else:\n result = block.result\n fn = computation_building_blocks.Lambda(comp.parameter_name,\n comp.parameter_type, result)\n block = computation_building_blocks.Block(extracted_variables, fn)\n return _extract_from_block(block)\n\n def _extract_from_selection(comp):\n \"\"\"Returns a new computation with all intrinsics extracted.\"\"\"\n if _is_called_intrinsic(comp.source):\n called_intrinsic = comp.source\n name = six.next(name_generator)\n variables = ((name, called_intrinsic),)\n result = computation_building_blocks.Reference(\n name, called_intrinsic.type_signature)\n else:\n block = comp.source\n variables = block.locals\n result = block.result\n selection = computation_building_blocks.Selection(\n result, name=comp.name, index=comp.index)\n block = computation_building_blocks.Block(variables, selection)\n return _extract_from_block(block)\n\n def _extract_from_tuple(comp):\n \"\"\"Returns a new computation with all intrinsics extracted.\"\"\"\n variables = []\n elements = []\n for name, element in anonymous_tuple.to_elements(comp):\n if _is_called_intrinsic_or_block(element):\n variable_name = six.next(name_generator)\n variables.append((variable_name, element))\n ref = computation_building_blocks.Reference(variable_name,\n element.type_signature)\n elements.append((name, ref))\n else:\n elements.append((name, element))\n tup = computation_building_blocks.Tuple(elements)\n block = computation_building_blocks.Block(variables, tup)\n return _extract_from_block(block)\n\n def _transform(comp):\n \"\"\"Returns a new transformed computation or `comp`.\"\"\"\n if not _should_transform(comp):\n return comp, False\n if isinstance(comp, computation_building_blocks.Block):\n comp = _extract_from_block(comp)\n elif isinstance(comp, computation_building_blocks.Call):\n comp = _extract_from_call(comp)\n elif isinstance(comp, computation_building_blocks.Lambda):\n comp = _extract_from_lambda(comp)\n elif isinstance(comp, computation_building_blocks.Selection):\n comp = _extract_from_selection(comp)\n elif isinstance(comp, computation_building_blocks.Tuple):\n comp = _extract_from_tuple(comp)\n return comp, True\n\n return transformation_utils.transform_postorder(comp, _transform)\n\n\ndef inline_block_locals(comp, variable_names=None):\n \"\"\"Inlines the block variables in `comp` whitelisted by `variable_names`.\n\n Args:\n comp: The computation building block in which to perform the extractions.\n The names of lambda parameters and block variables in `comp` must be\n unique.\n variable_names: A Python list, tuple, or set representing the whitelist of\n variable names to inline; or None if all variables should be inlined.\n\n Returns:\n A new computation with the transformation applied or the original `comp`.\n\n Raises:\n ValueError: If `comp` contains variables with non-unique names.\n \"\"\"\n py_typecheck.check_type(comp,\n computation_building_blocks.ComputationBuildingBlock)\n _check_has_unique_names(comp)\n if variable_names is not None:\n py_typecheck.check_type(variable_names, (list, tuple, set))\n\n def _should_inline_variable(name):\n return variable_names is None or name in variable_names\n\n def _should_transform(comp):\n return ((isinstance(comp, computation_building_blocks.Reference) and\n _should_inline_variable(comp.name)) or\n (isinstance(comp, computation_building_blocks.Block) and\n any(_should_inline_variable(name) for name, _ in comp.locals)))\n\n def _transform(comp, symbol_tree):\n \"\"\"Returns a new transformed computation or `comp`.\"\"\"\n if not _should_transform(comp):\n return comp, False\n if isinstance(comp, computation_building_blocks.Reference):\n value = symbol_tree.get_payload_with_name(comp.name).value\n # This identifies a variable bound by a Block as opposed to a Lambda.\n if value is not None:\n return value, True\n else:\n return comp, False\n elif isinstance(comp, computation_building_blocks.Block):\n variables = [(name, value)\n for name, value in comp.locals\n if not _should_inline_variable(name)]\n if not variables:\n comp = comp.result\n else:\n comp = computation_building_blocks.Block(variables, comp.result)\n return comp, True\n return comp, False\n\n symbol_tree = transformation_utils.SymbolTree(\n transformation_utils.ReferenceCounter)\n return transformation_utils.transform_postorder_with_symbol_bindings(\n comp, _transform, symbol_tree)\n\n\ndef merge_chained_blocks(comp):\n r\"\"\"Merges all the chained blocks in `comp` into one block.\n\n Looks for occurrences of the following pattern:\n\n Block\n / \\\n [...] Block\n / \\\n [...] Comp(x)\n\n And merges them to\n\n Block\n / \\\n [...] Comp(x)\n\n Preserving the relative ordering of any locals declarations in a postorder\n walk, which therefore preserves scoping rules.\n\n Notice that because TFF Block constructs bind their variables in sequence, it\n is completely safe to add the locals lists together in this implementation,\n\n Args:\n comp: The computation building block in which to perform the merges.\n\n Returns:\n Transformed version of `comp` with its neighboring blocks merged.\n \"\"\"\n py_typecheck.check_type(comp,\n computation_building_blocks.ComputationBuildingBlock)\n\n def _should_transform(comp):\n return (isinstance(comp, computation_building_blocks.Block) and\n isinstance(comp.result, computation_building_blocks.Block))\n\n def _transform(comp):\n if not _should_transform(comp):\n return comp, False\n transformed_comp = computation_building_blocks.Block(\n comp.locals + comp.result.locals, comp.result.result)\n return transformed_comp, True\n\n return transformation_utils.transform_postorder(comp, _transform)\n\n\ndef merge_chained_federated_maps_or_applys(comp):\n r\"\"\"Merges all the chained federated maps or federated apply in `comp`.\n\n This transform traverses `comp` postorder, matches the following pattern, and\n replaces the following computation containing two federated map intrinsics:\n\n Call\n / \\\n Intrinsic Tuple\n |\n [Comp(x), Call]\n / \\\n Intrinsic Tuple\n |\n [Comp(y), Comp(z)]\n\n intrinsic()>)\n\n with the following computation containing one federated map or apply\n intrinsic:\n\n\n Call\n / \\\n Intrinsic Tuple\n |\n [Block, Comp(z)]\n / \\\n [fn=Tuple] Lambda(arg)\n | \\\n [Comp(y), Comp(x)] Call\n / \\\n Sel(1) Call\n / / \\\n Ref(fn) Sel(0) Ref(arg)\n /\n Ref(fn)\n\n intrinsic(<(let fn= in (arg -> fn[1](fn[0](arg)))), z>)\n\n The functional computations `x` and `y`, and the argument `z` are retained;\n the other computations are replaced.\n\n Args:\n comp: The computation building block in which to perform the merges.\n\n Returns:\n A new computation with the transformation applied or the original `comp`.\n\n Raises:\n TypeError: If types do not match.\n \"\"\"\n py_typecheck.check_type(comp,\n computation_building_blocks.ComputationBuildingBlock)\n name_generator = computation_constructing_utils.unique_name_generator(comp)\n\n def _should_transform(comp):\n \"\"\"Returns `True` if `comp` is a chained federated map.\"\"\"\n if _is_called_intrinsic(comp, (\n intrinsic_defs.FEDERATED_APPLY.uri,\n intrinsic_defs.FEDERATED_MAP.uri,\n )):\n outer_arg = comp.argument[1]\n if _is_called_intrinsic(outer_arg, comp.function.uri):\n return True\n return False\n\n def _transform(comp):\n \"\"\"Returns a new transformed computation or `comp`.\"\"\"\n if not _should_transform(comp):\n return comp, False\n\n def _create_block_to_chained_calls(comps):\n r\"\"\"Constructs a transformed block computation from `comps`.\n\n Block\n / \\\n [fn=Tuple] Lambda(arg)\n | \\\n [Comp(y), Comp(x)] Call\n / \\\n Sel(1) Call\n / / \\\n Ref(fn) Sel(0) Ref(arg)\n /\n Ref(fn)\n\n (let fn= in (arg -> fn[1](fn[0](arg)))\n\n Args:\n comps: A Python list of computations.\n\n Returns:\n A `computation_building_blocks.Block`.\n \"\"\"\n functions = computation_building_blocks.Tuple(comps)\n functions_name = six.next(name_generator)\n functions_ref = computation_building_blocks.Reference(\n functions_name, functions.type_signature)\n arg_name = six.next(name_generator)\n arg_type = comps[0].type_signature.parameter\n arg_ref = computation_building_blocks.Reference(arg_name, arg_type)\n arg = arg_ref\n for index, _ in enumerate(comps):\n fn_sel = computation_building_blocks.Selection(\n functions_ref, index=index)\n call = computation_building_blocks.Call(fn_sel, arg)\n arg = call\n fn = computation_building_blocks.Lambda(arg_ref.name,\n arg_ref.type_signature, call)\n return computation_building_blocks.Block(\n ((functions_ref.name, functions),), fn)\n\n block = _create_block_to_chained_calls((\n comp.argument[1].argument[0],\n comp.argument[0],\n ))\n arg = computation_building_blocks.Tuple([\n block,\n comp.argument[1].argument[1],\n ])\n intrinsic_type = computation_types.FunctionType(\n arg.type_signature, comp.function.type_signature.result)\n intrinsic = computation_building_blocks.Intrinsic(comp.function.uri,\n intrinsic_type)\n transformed_comp = computation_building_blocks.Call(intrinsic, arg)\n return transformed_comp, True\n\n return transformation_utils.transform_postorder(comp, _transform)\n\n\ndef merge_tuple_intrinsics(comp, uri):\n r\"\"\"Merges all the tuples of intrinsics in `comp` into one intrinsic.\n\n This transform traverses `comp` postorder, matches the following pattern, and\n replaces the following computation containing a tuple of called intrinsics all\n represeting the same operation:\n\n Tuple\n |\n [Call, Call, ...]\n / \\ / \\\n Intrinsic Tuple Intrinsic Tuple\n | |\n [Comp(f1), Comp(v1), ...] [Comp(f2), Comp(v2), ...]\n\n ), Intrinsic()>\n\n with the following computation containing one called intrinsic:\n\n federated_unzip(Call)\n / \\\n Intrinsic Tuple\n |\n [Block, federated_zip(Tuple), ...]\n / \\ |\n fn=Tuple Lambda(arg) [Comp(v1), Comp(v2), ...]\n | \\\n [Comp(f1), Comp(f2), ...] Tuple\n |\n [Call, Call, ...]\n / \\ / \\\n Sel(0) Sel(0) Sel(1) Sel(1)\n / / / /\n Ref(fn) Ref(arg) Ref(fn) Ref(arg)\n\n Intrinsic(<\n (let fn= in (arg -> )),\n ,\n >)\n\n The functional computations `f1`, `f2`, etc..., and the computations `v1`,\n `v2`, etc... are retained; the other computations are replaced.\n\n NOTE: This is just an example of what this transformation would look like when\n applied to a tuple of federated maps. The components `f1`, `f2`, `v1`, and\n `v2` and the number of those components are not important.\n\n This transformation is implemented to match the following intrinsics:\n\n * intrinsic_defs.FEDERATED_AGGREGATE.uri\n * intrinsic_defs.FEDERATED_BROADCAST.uri\n * intrinsic_defs.FEDERATED_MAP.uri\n\n Args:\n comp: The computation building block in which to perform the merges.\n uri: The URI of the intrinsic to merge.\n\n Returns:\n A new computation with the transformation applied or the original `comp`.\n\n Raises:\n TypeError: If types do not match.\n \"\"\"\n py_typecheck.check_type(comp,\n computation_building_blocks.ComputationBuildingBlock)\n py_typecheck.check_type(uri, six.string_types)\n expected_uri = (\n intrinsic_defs.FEDERATED_AGGREGATE.uri,\n intrinsic_defs.FEDERATED_BROADCAST.uri,\n intrinsic_defs.FEDERATED_MAP.uri,\n )\n if uri not in expected_uri:\n raise ValueError(\n 'The value of `uri` is expected to be on of {}, found {}'.format(\n expected_uri, uri))\n name_generator = computation_constructing_utils.unique_name_generator(comp)\n\n def _should_transform(comp):\n return (isinstance(comp, computation_building_blocks.Tuple) and\n _is_called_intrinsic(comp[0], uri) and all(\n _is_called_intrinsic(element, comp[0].function.uri)\n for element in comp))\n\n def _transform_functional_args(comps):\n r\"\"\"Transforms the functional computations `comps`.\n\n Given a computation containing `n` called intrinsics with `m` arguments,\n this function constructs the following computation from the functional\n arguments of the called intrinsic:\n\n Block\n / \\\n [fn=Tuple] Lambda(arg)\n | \\\n [Comp(f1), Comp(f2), ...] Tuple\n |\n [Call, Call, ...]\n / \\ / \\\n Sel(0) Sel(0) Sel(1) Sel(1)\n / / / /\n Ref(fn) Ref(arg) Ref(fn) Ref(arg)\n\n with one `computation_building_blocks.Call` for each `n`. This computation\n represents one of `m` arguments that should be passed to the call of the\n transformed computation.\n\n Args:\n comps: a Python list of computations.\n\n Returns:\n A `computation_building_blocks.Block`.\n \"\"\"\n functions = computation_building_blocks.Tuple(comps)\n functions_name = six.next(name_generator)\n functions_ref = computation_building_blocks.Reference(\n functions_name, functions.type_signature)\n arg_name = six.next(name_generator)\n arg_type = [element.type_signature.parameter for element in comps]\n arg_ref = computation_building_blocks.Reference(arg_name, arg_type)\n elements = []\n for index in range(len(comps)):\n sel_fn = computation_building_blocks.Selection(functions_ref, index=index)\n sel_arg = computation_building_blocks.Selection(arg_ref, index=index)\n call = computation_building_blocks.Call(sel_fn, sel_arg)\n elements.append(call)\n calls = computation_building_blocks.Tuple(elements)\n fn = computation_building_blocks.Lambda(arg_ref.name,\n arg_ref.type_signature, calls)\n return computation_building_blocks.Block(((functions_ref.name, functions),),\n fn)\n\n def _transform_non_functional_args(comps):\n r\"\"\"Transforms the non-functional computations `comps`.\n\n Given a computation containing `n` called intrinsics with `m` arguments,\n this function constructs the following computation from the non-functional\n arguments of the called intrinsic:\n\n federated_zip(Tuple)\n |\n [Comp, Comp, ...]\n\n or\n\n Tuple\n |\n [Comp, Comp, ...]\n\n with one `computation_building_blocks.ComputationBuildignBlock` for each\n `n`. This computation represents one of `m` arguments that should be passed\n to the call of the transformed computation.\n\n Args:\n comps: A Python list of computations.\n\n Returns:\n A `computation_building_blocks.Block`.\n \"\"\"\n values = computation_building_blocks.Tuple(comps)\n first_comp = comps[0]\n if isinstance(first_comp.type_signature, computation_types.FederatedType):\n return computation_constructing_utils.create_federated_zip(values)\n else:\n return values\n\n def _transform_args(comp):\n \"\"\"Transforms the arguments from `comp`.\n\n Given a computation containing a tuple of intrinsics that can be merged,\n this function constructs the follwing computation from the arguments of the\n called intrinsic:\n\n Tuple\n |\n [Block, federated_zip(Tuple), ...]\n\n with one `computation_building_blocks.Block` for each functional computation\n in `m` and one called federated zip (or Tuple) for each non-functional\n computation in `m`. This list of computations represent the `m` arguments\n that should be passed to the call of the transformed computation.\n\n Args:\n comp: The computation building block in which to perform the merges.\n\n Returns:\n A `computation_building_blocks.ComputationBuildingBlock` representing the\n transformed arguments from `comp`.\n \"\"\"\n first_comp = comp[0]\n if isinstance(first_comp.argument, computation_building_blocks.Tuple):\n comps = [[] for _ in range(len(first_comp.argument))]\n for _, call in anonymous_tuple.to_elements(comp):\n for index, arg in enumerate(call.argument):\n comps[index].append(arg)\n else:\n comps = [[]]\n for _, call in anonymous_tuple.to_elements(comp):\n comps[0].append(call.argument)\n elements = []\n for args in comps:\n first_args = args[0]\n if isinstance(first_args.type_signature, computation_types.FunctionType):\n transformed_args = _transform_functional_args(args)\n else:\n transformed_args = _transform_non_functional_args(args)\n elements.append(transformed_args)\n if isinstance(first_comp.argument, computation_building_blocks.Tuple):\n return computation_building_blocks.Tuple(elements)\n else:\n return elements[0]\n\n def _transform(comp):\n \"\"\"Returns a new transformed computation or `comp`.\"\"\"\n if not _should_transform(comp):\n return comp, False\n arg = _transform_args(comp)\n first_comp = comp[0]\n named_comps = anonymous_tuple.to_elements(comp)\n parameter_type = computation_types.to_type(arg.type_signature)\n type_signature = [call.type_signature.member for _, call in named_comps]\n result_type = computation_types.FederatedType(\n type_signature, first_comp.type_signature.placement,\n first_comp.type_signature.all_equal)\n intrinsic_type = computation_types.FunctionType(parameter_type, result_type)\n intrinsic = computation_building_blocks.Intrinsic(first_comp.function.uri,\n intrinsic_type)\n call = computation_building_blocks.Call(intrinsic, arg)\n tup = computation_constructing_utils.create_federated_unzip(call)\n names = [name for name, _ in named_comps]\n transformed_comp = computation_constructing_utils.create_named_tuple(\n tup, names)\n return transformed_comp, True\n\n return transformation_utils.transform_postorder(comp, _transform)\n\n\ndef remove_mapped_or_applied_identity(comp):\n r\"\"\"Removes all the mapped or applied identity functions in `comp`.\n\n This transform traverses `comp` postorder, matches the following pattern, and\n removes all the mapped or applied identity fucntions by replacing the\n following computation:\n\n Call\n / \\\n Intrinsic Tuple\n |\n [Lambda(x), Comp(y)]\n \\\n Ref(x)\n\n Intrinsic(<(x -> x), y>)\n\n with its argument:\n\n Comp(y)\n\n y\n\n Args:\n comp: The computation building block in which to perform the removals.\n\n Returns:\n A new computation with the transformation applied or the original `comp`.\n\n Raises:\n TypeError: If types do not match.\n \"\"\"\n py_typecheck.check_type(comp,\n computation_building_blocks.ComputationBuildingBlock)\n\n def _should_transform(comp):\n \"\"\"Returns `True` if `comp` is a mapped or applied identity function.\"\"\"\n if (isinstance(comp, computation_building_blocks.Call) and\n isinstance(comp.function, computation_building_blocks.Intrinsic) and\n comp.function.uri in (\n intrinsic_defs.FEDERATED_MAP.uri,\n intrinsic_defs.FEDERATED_APPLY.uri,\n intrinsic_defs.SEQUENCE_MAP.uri,\n )):\n called_function = comp.argument[0]\n if _is_identity_function(called_function):\n return True\n return False\n\n def _transform(comp):\n if not _should_transform(comp):\n return comp, False\n transformed_comp = comp.argument[1]\n return transformed_comp, True\n\n return transformation_utils.transform_postorder(comp, _transform)\n\n\ndef replace_called_lambda_with_block(comp):\n r\"\"\"Replaces all the called lambdas in `comp` with a block.\n\n This transform traverses `comp` postorder, matches the following pattern, and\n replaces the following computation containing a called lambda:\n\n Call\n / \\\n Lambda(x) Comp(y)\n \\\n Comp(z)\n\n (x -> z)(y)\n\n with the following computation containing a block:\n\n Block\n / \\\n [x=Comp(y)] Comp(z)\n\n let x=y in z\n\n The functional computation `b` and the argument `c` are retained; the other\n computations are replaced. This transformation is used to facilitate the\n merging of TFF orchestration logic, in particular to remove unnecessary lambda\n expressions and as a stepping stone for merging Blocks together.\n\n Args:\n comp: The computation building block in which to perform the replacements.\n\n Returns:\n A new computation with the transformation applied or the original `comp`.\n\n Raises:\n TypeError: If types do not match.\n \"\"\"\n py_typecheck.check_type(comp,\n computation_building_blocks.ComputationBuildingBlock)\n\n def _should_transform(comp):\n return (isinstance(comp, computation_building_blocks.Call) and\n isinstance(comp.function, computation_building_blocks.Lambda))\n\n def _transform(comp):\n if not _should_transform(comp):\n return comp, False\n transformed_comp = computation_building_blocks.Block(\n [(comp.function.parameter_name, comp.argument)], comp.function.result)\n return transformed_comp, True\n\n return transformation_utils.transform_postorder(comp, _transform)\n\n\ndef replace_intrinsic_with_callable(comp, uri, body, context_stack):\n \"\"\"Replaces all the intrinsics with the given `uri` with a callable.\n\n This transform traverses `comp` postorder and replaces all the intrinsics with\n the given `uri` with a polymorphic callable that represents the body of the\n implementation of the intrinsic; i.e., one that given the parameter of the\n intrinsic constructs the intended result. This will typically be a Python\n function decorated with `@federated_computation` to make it into a polymorphic\n callable.\n\n Args:\n comp: The computation building block in which to perform the replacements.\n uri: The URI of the intrinsic to replace.\n body: A polymorphic callable.\n context_stack: The context stack to use.\n\n Returns:\n A new computation with the transformation applied or the original `comp`.\n\n Raises:\n TypeError: If types do not match.\n \"\"\"\n py_typecheck.check_type(comp,\n computation_building_blocks.ComputationBuildingBlock)\n py_typecheck.check_type(uri, six.string_types)\n py_typecheck.check_type(context_stack, context_stack_base.ContextStack)\n if not callable(body):\n raise TypeError('The body of the intrinsic must be a callable.')\n\n def _should_transform(comp):\n return (isinstance(comp, computation_building_blocks.Intrinsic) and\n comp.uri == uri and\n isinstance(comp.type_signature, computation_types.FunctionType))\n\n def _transform(comp):\n if not _should_transform(comp):\n return comp, False\n # We need 'wrapped_body' to accept exactly one argument.\n wrapped_body = lambda x: body(x) # pylint: disable=unnecessary-lambda\n transformed_comp = federated_computation_utils.zero_or_one_arg_fn_to_building_block(\n wrapped_body, 'arg', comp.type_signature.parameter, context_stack, uri)\n return transformed_comp, True\n\n return transformation_utils.transform_postorder(comp, _transform)\n\n\ndef replace_selection_from_tuple_with_element(comp):\n r\"\"\"Replaces any selection from a tuple with the underlying tuple element.\n\n Replaces any occurences of:\n\n Selection\n \\\n Tuple\n |\n [Comp, Comp, ...]\n\n with the appropriate Comp, as determined by the `index` or `name` of the\n `Selection`.\n\n Args:\n comp: The computation building block in which to perform the replacements.\n\n Returns:\n A possibly modified version of comp, without any occurrences of selections\n from tuples.\n\n Raises:\n TypeError: If `comp` is not an instance of\n `computation_building_blocks.ComputationBuildingBlock`.\n \"\"\"\n py_typecheck.check_type(comp,\n computation_building_blocks.ComputationBuildingBlock)\n\n def _should_transform(comp):\n return (isinstance(comp, computation_building_blocks.Selection) and\n isinstance(comp.source, computation_building_blocks.Tuple))\n\n def _get_index_from_name(selection_name, tuple_type_signature):\n named_type_signatures = anonymous_tuple.to_elements(tuple_type_signature)\n return [x[0] for x in named_type_signatures].index(selection_name)\n\n def _transform(comp):\n if not _should_transform(comp):\n return comp, False\n if comp.name is not None:\n index = _get_index_from_name(comp.name, comp.source.type_signature)\n else:\n index = comp.index\n return comp.source[index], True\n\n return transformation_utils.transform_postorder(comp, _transform)\n\n\ndef uniquify_compiled_computation_names(comp):\n \"\"\"Replaces all the compiled computations names in `comp` with unique names.\n\n This transform traverses `comp` postorder and replaces the name of all the\n comiled computations found in `comp` with a unique name.\n\n Args:\n comp: The computation building block in which to perform the replacements.\n\n Returns:\n A new computation with the transformation applied or the original `comp`.\n\n Raises:\n TypeError: If types do not match.\n \"\"\"\n py_typecheck.check_type(comp,\n computation_building_blocks.ComputationBuildingBlock)\n name_generator = computation_constructing_utils.unique_name_generator(\n None, prefix='')\n\n def _should_transform(comp):\n return isinstance(comp, computation_building_blocks.CompiledComputation)\n\n def _transform(comp):\n if not _should_transform(comp):\n return comp, False\n transformed_comp = computation_building_blocks.CompiledComputation(\n comp.proto, six.next(name_generator))\n return transformed_comp, True\n\n return transformation_utils.transform_postorder(comp, _transform)\n\n\ndef uniquify_reference_names(comp):\n \"\"\"Replaces all the reference names in `comp` with unique names.\n\n Args:\n comp: The computation building block in which to perform the replacements.\n\n Returns:\n Returns a transformed version of comp inside of which all variable names\n are guaranteed to be unique.\n \"\"\"\n py_typecheck.check_type(comp,\n computation_building_blocks.ComputationBuildingBlock)\n name_generator = computation_constructing_utils.unique_name_generator(None)\n\n class _RenameNode(transformation_utils.BoundVariableTracker):\n \"\"\"transformation_utils.SymbolTree node for renaming References in ASTs.\"\"\"\n\n def __init__(self, name, value):\n super(_RenameNode, self).__init__(name, value)\n py_typecheck.check_type(name, str)\n self.new_name = six.next(name_generator)\n\n def __str__(self):\n return 'Value: {}, name: {}, new_name: {}'.format(self.value, self.name,\n self.new_name)\n\n def _transform(comp, context_tree):\n \"\"\"Renames References in `comp` to unique names.\"\"\"\n if isinstance(comp, computation_building_blocks.Reference):\n new_name = context_tree.get_payload_with_name(comp.name).new_name\n return computation_building_blocks.Reference(new_name,\n comp.type_signature,\n comp.context), True\n elif isinstance(comp, computation_building_blocks.Block):\n new_locals = []\n for name, val in comp.locals:\n context_tree.walk_down_one_variable_binding()\n new_name = context_tree.get_payload_with_name(name).new_name\n new_locals.append((new_name, val))\n return computation_building_blocks.Block(new_locals, comp.result), True\n elif isinstance(comp, computation_building_blocks.Lambda):\n context_tree.walk_down_one_variable_binding()\n new_name = context_tree.get_payload_with_name(\n comp.parameter_name).new_name\n return computation_building_blocks.Lambda(new_name, comp.parameter_type,\n comp.result), True\n return comp, False\n\n symbol_tree = transformation_utils.SymbolTree(_RenameNode)\n return transformation_utils.transform_postorder_with_symbol_bindings(\n comp, _transform, symbol_tree)\n\n\nclass TFParser(object):\n \"\"\"Callable taking subset of TFF AST constructs to CompiledComputations.\n\n When this function is applied via `transformation_utils.transform_postorder`\n to a TFF AST node satisfying its assumptions, the tree under this node will\n be reduced to a single instance of\n `computation_building_blocks.CompiledComputation` representing the same\n logic.\n\n Notice that this function is designed to be applied to what is essentially\n a subtree of a larger TFF AST; once the processing on a single device has\n been aligned at the AST level, and placement separated from the logic of\n this processing, we should be left with a function wrapped via\n `federated_map` or `federated_apply` to a federated argument. It is this\n function which we need to reduce to TensorFlow, and it is to the root\n node of this function which we are looking to apply `TFParser`. Because of\n this, we assume that there is a lambda expression at the top of the AST\n we are looking to parse, as well as the rest of the assumptions below.\n\n We have no proof that these assumptions are sufficient for this\n library to parse *all* TFF into TF, so we expect some constructs will fail\n to be reduced. The assumptions can currently be enumerated as follows:\n\n 1. All called lambdas have been converted to blocks.\n 2. All blocks have been inlined; that is, there are no block/LET constructs\n remaining.\n 3. All compiled computations are called.\n 4. No compiled computations have been partially called; we believe this\n should be handled correctly today but we haven't reasoned explicitly about\n this possibility.\n 5. The only leaf nodes present under `comp` are compiled computations and\n references to the argument of the top-level lambda which we are hoping to\n replace with a compiled computation. Further, every leaf node which is a\n reference has as its parent a `computation_building_blocks.Call`, whose\n associated function is a TF graph. This prevents us from needing to\n deal with arbitrary nesting of references and TF graphs, and significantly\n clarifies the reasoning. This can be accomplished by \"decorating\" the\n appropriate leaves with called identity TF graphs, the construction of\n which is provided by a utility module.\n 6. There is only a single lambda binding any references present in the AST,\n and it is placed at the root of the AST to which we apply `TFParser`.\n 7. There are no intrinsics present in the AST.\n \"\"\"\n\n # TODO(b/133328350): Allow for this to take in multiple selections from a\n # single argument.\n\n def __init__(self):\n \"\"\"Populates the parser library with mutually exclusive options.\"\"\"\n self._parse_library = [\n compiled_computation_transforms.SelectionFromCalledTensorFlowBlock(),\n compiled_computation_transforms.LambdaWrappingGraph(),\n compiled_computation_transforms.LambdaCallSelectionFromArg(),\n compiled_computation_transforms.LambdaToCalledTupleOfSelectionsFromArg(\n ),\n compiled_computation_transforms.TupleCalledGraphs(),\n compiled_computation_transforms.CalledCompositionOfTensorFlowBlocks(),\n compiled_computation_transforms.LambdaToCalledGraphOnReplicatedArg(),\n ]\n\n def __call__(self, comp):\n \"\"\"Transforms `comp` by checking all elements of the parser library.\n\n This function is roughly performing intermediate-code generation, taking\n TFF and generating TF. Calling this function is essentially checking the\n stack and selecting a semantic action based on its contents, and *only one*\n of these actions should be selected for a given computation.\n\n Notice that since the parser library contains mutually exclusive options,\n it is safe to return early.\n\n Args:\n comp: The `computation_building_blocks.ComputationBuildingBlock` to check\n for possibility of reduction according to the parsing library.\n\n Returns:\n A tuple whose first element is a possibly transformed version of `comp`,\n and whose second is a Boolean indicating whether or not `comp` was\n transformed. This is in conforming to the conventions of\n `transformation_utils.transform_postorder`.\n \"\"\"\n py_typecheck.check_type(\n comp, computation_building_blocks.ComputationBuildingBlock)\n for option in self._parse_library:\n if option.should_transform(comp):\n transformed, ind = option.transform(comp)\n return transformed, ind\n return comp, False\n\n\ndef _is_called_intrinsic(comp, uri=None):\n \"\"\"Returns `True` if `comp` is a called intrinsic with the `uri` or `uri`s.\n\n Call\n /\n Intrinsic\n\n Args:\n comp: The computation building block to test.\n uri: A uri or a list, tuple, or set of uri.\n \"\"\"\n if isinstance(uri, six.string_types):\n uri = (uri,)\n if uri is not None:\n py_typecheck.check_type(uri, (list, tuple, set))\n return (isinstance(comp, computation_building_blocks.Call) and\n isinstance(comp.function, computation_building_blocks.Intrinsic) and\n (uri is None or comp.function.uri in uri))\n\n\ndef _is_identity_function(comp):\n \"\"\"Returns `True` if `comp` is an identity function.\"\"\"\n return (isinstance(comp, computation_building_blocks.Lambda) and\n isinstance(comp.result, computation_building_blocks.Reference) and\n comp.parameter_name == comp.result.name)\n\n\ndef _check_has_unique_names(comp):\n if not transformation_utils.has_unique_names(comp):\n raise ValueError(\n 'This transform should only be called after we have uniquified all '\n '`computation_building_blocks.Reference` names, since we may be moving '\n 'computations with unbound references under constructs which bind '\n 'those references.')\n\n\ndef _get_unbound_references(comp):\n \"\"\"Gets a Python `dict` of the unbound references in `comp`.\n\n Compuations that are equal will have the same collections of unbounded\n references, so it is safe to use `comp` as the key for this `dict` even though\n a given compuation may appear in many positions in the AST.\n\n Args:\n comp: The computation building block to parse.\n\n Returns:\n A Python `dict` of elements where keys are the compuations in `comp` and\n values are a Python `set` of the names of the unbound references in the\n subtree of that compuation.\n \"\"\"\n py_typecheck.check_type(comp,\n computation_building_blocks.ComputationBuildingBlock)\n references = {}\n\n def _update(comp):\n \"\"\"Updates the Python dict of references.\"\"\"\n if isinstance(comp, computation_building_blocks.Reference):\n references[comp] = set((comp.name,))\n elif isinstance(comp, computation_building_blocks.Block):\n references[comp] = set()\n names = []\n for name, variable in comp.locals:\n elements = references[variable]\n references[comp].update([e for e in elements if e not in names])\n names.append(name)\n elements = references[comp.result]\n references[comp].update([e for e in elements if e not in names])\n elif isinstance(comp, computation_building_blocks.Call):\n elements = references[comp.function]\n if comp.argument is not None:\n elements.update(references[comp.argument])\n references[comp] = elements\n elif isinstance(comp, computation_building_blocks.Lambda):\n elements = references[comp.result]\n references[comp] = set([e for e in elements if e != comp.parameter_name])\n elif isinstance(comp, computation_building_blocks.Selection):\n references[comp] = references[comp.source]\n elif isinstance(comp, computation_building_blocks.Tuple):\n elements = [references[e] for e in comp]\n references[comp] = set(itertools.chain.from_iterable(elements))\n else:\n references[comp] = set()\n return comp, False\n\n transformation_utils.transform_postorder(comp, _update)\n return references\n","sub_path":"tensorflow_federated/python/core/impl/transformations.py","file_name":"transformations.py","file_ext":"py","file_size_in_byte":47320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"282374725","text":"import numpy as np\nfrom model.coop_irl_mdp import CoopIRLMDP\nfrom collections import defaultdict\nimport json\n\n\nclass ColorTrails(CoopIRLMDP):\n action = {0: np.array([-1, 0]), # up\n 1: np.array([0, -1]), # left\n 2: np.array([0, 1]), # right\n 3: np.array([1, 0])} # down\n\n def __init__(self, ct_data):\n self.ct_data = ct_data\n self.t_map = defaultdict(lambda: defaultdict(dict))\n self.s_map = {}\n\n self.make_s_map_for_r(0, set(), self.s_map, self.t_map, ct_data.h_start, ct_data.r_start,\n ct_data.h_chip, ct_data.r_chip, 0)\n super().__init__(len(self.s_map) + 1, 5, 5, 2, 2)\n\n def valid_pos(self, pos):\n c = (pos >= np.array([0, 0])) * (pos < np.array(self.ct_data.shape))\n return np.prod(c)\n\n def make_s_map_for_r(self, s, medals, s_map, t_map, h_pos, r_pos, h_chip, r_chip, d):\n medals = medals.copy()\n if tuple(h_pos) in self.ct_data.medals:\n medals.add(self.ct_data.medals[tuple(h_pos)])\n if tuple(r_pos) in self.ct_data.medals:\n medals.add(self.ct_data.medals[tuple(r_pos)])\n\n bomb = -1\n if tuple(h_pos) in self.ct_data.bomb:\n bomb = self.ct_data.bomb[tuple(h_pos)]\n\n s_map[s] = (h_pos, r_pos, h_chip, r_chip, medals, self._finish_recipe(medals), bomb, d)\n s_offset = 0\n r_halt = True\n if tuple(r_pos) not in self.ct_data.medals:\n for a_r in range(4):\n n_r_pos = r_pos + ColorTrails.action[a_r]\n if not self.valid_pos(n_r_pos):\n continue\n chip = self.ct_data.color[n_r_pos[0], n_r_pos[1]]\n if r_chip[chip] <= 0:\n continue\n r_halt = False\n n_r_chip = r_chip.copy()\n n_r_chip[chip] -= 1\n s_offset += self.make_s_map_for_h(s, medals, a_r, s_offset, s_map, t_map,\n h_pos, n_r_pos, h_chip, n_r_chip, d)\n if r_halt:\n s_offset += self.make_s_map_for_h(s, medals, 4, s_offset, s_map, t_map,\n h_pos, r_pos, h_chip, r_chip, d)\n\n return s_offset\n\n def make_s_map_for_h(self, s, medals, a_r, s_offset, s_map, t_map,\n h_pos, n_r_pos, h_chip, n_r_chip, d):\n h_halt = True\n s_h_offset = 0\n if tuple(h_pos) not in self.ct_data.medals:\n for a_h in range(4):\n n_h_pos = h_pos + ColorTrails.action[a_h]\n if not self.valid_pos(n_h_pos):\n continue\n chip = self.ct_data.color[n_h_pos[0], n_h_pos[1]]\n if h_chip[chip] <= 0:\n continue\n h_halt = False\n n_h_chip = h_chip.copy()\n n_h_chip[chip] -= 1\n s_h_offset += 1\n\n # t_map[s].append((a_h, a_r, s + s_offset + s_h_offset))\n t_map[s][a_h][a_r] = s + s_offset + s_h_offset\n s_h_offset += self.make_s_map_for_r(s + s_offset + s_h_offset, medals, s_map, t_map,\n n_h_pos, n_r_pos, n_h_chip, n_r_chip, d + 1)\n if h_halt and a_r != 4:\n s_h_offset += 1\n t_map[s][4][a_r] = s + s_offset + s_h_offset\n s_h_offset += self.make_s_map_for_r(s + s_offset + s_h_offset, medals, s_map, t_map,\n h_pos, n_r_pos, h_chip, n_r_chip, d + 1)\n return s_h_offset\n\n def _finish_recipe(self, medals):\n recipe = set()\n for i, rs in enumerate(self.ct_data.recipe):\n for r in rs:\n if r.issubset(medals):\n recipe.add(i)\n break\n return recipe\n\n def _set_tro(self):\n for s, v in self.t_map.items():\n for a_h in range(5):\n if a_h not in v:\n self.t[:, a_h, s, -1] = 1\n self.r[:, a_h, s, :, :] = -1000\n else:\n for a_r in range(5):\n if a_r not in v[a_h]:\n self.t[a_r, a_h, s, -1] = 1\n self.r[a_r, a_h, s, :, :] = -1000\n else:\n ns = v[a_h][a_r]\n self.t[a_r, a_h, s, ns] = 1\n self.r[a_r, a_h, s, :, :] -= self.calc_cost(a_h, a_r)\n s_data = self.s_map[s]\n ns_data = self.s_map[ns]\n for recipe in ns_data[5]:\n if recipe not in s_data[5]:\n self.r[a_r, a_h, s, :, recipe] += 300\n if ns_data[6] != -1:\n self.r[a_r, a_h, s, ns_data[6], :] -= 100\n for s in self.s_map.keys():\n if s not in self.t_map:\n self.t[:, :, s, -1] = 1\n self.r[:-1, :, s, :] = -1000\n self.r[:, :-1, s, :] = -1000\n self.t[:, :, -1, -1] = 1\n\n # for k, v in self.s_map.items():\n # print(k, v)\n # # exit()\n #\n # for k, v in self.t_map.items():\n # print(k, v)\n # exit()\n\n def calc_cost(self, a_h, a_r):\n return (int(a_h != 4) + int(a_r != 4)) * 5\n\n def make_data(self):\n # medals = {v: k for k, v in self.ct_data.medals.items()}\n medals = np.zeros_like(self.ct_data.color)\n for k, v in self.ct_data.medals.items():\n medals[k] = v + 1\n bomb = np.zeros_like(self.ct_data.color)\n for k, v in self.ct_data.bomb.items():\n bomb[k] = v + 1\n recipe = [[list(r) for r in rs] for rs in self.ct_data.recipe]\n data = {\n \"color\": self.ct_data.color.tolist(),\n \"bomb\": bomb.tolist(),\n \"medals\": medals.tolist(),\n \"h_chip\": self.ct_data.h_chip.tolist(),\n \"r_chip\": self.ct_data.r_chip.tolist(),\n \"recipe\": recipe,\n \"h_start\": self.ct_data.h_start.tolist(),\n \"r_start\": self.ct_data.r_start.tolist(),\n }\n json.dump(data, open(\"ct_data/data_\" + str(self.ct_data.index) + \".json\", \"w\"), indent=4)\n\n def make_scinario(self, th_r, index, algo, target):\n conv_action = {0: 2, 1: 1, 2: 4, 3: 3, 4: 0}\n s_candi = set([0])\n b_map = {0: np.array([0.5, 0.5])}\n actions = {}\n nexts = {}\n while len(s_candi) > 0:\n s = s_candi.pop()\n b = b_map[s]\n a_r = self.a_vector_a[s][th_r]\n # print(a_r)\n # return np.max(np.dot(self.a_vector_a[s][th_r][a_r], b))\n # print(s, [np.dot(b, v.T)[0][0] for _k, v in sorted(a_r.items())])\n # print(s, [v for _k, v in sorted(a_r.items())])\n # print(s, [np.max(np.dot(b, v.T)) for _k, v in sorted(a_r.items())])\n # exit()\n # print(s, [np.dot(b, v.T) for _k, v in sorted(a_r.items())])\n # print(s, np.max(np.dot(b, v.T)[0])[0] for _k, v in sorted(a_r.items())])\n a_r = np.argmax([np.max(np.dot(b, v.T)) for _k, v in sorted(a_r.items())])\n\n # print(s, a_r)\n next = {}\n for a_h, v in self.t_map[s].items():\n n_s = v[a_r]\n b = self.h_pi[th_r][s][a_r][a_h] * b_map[s]\n b /= np.sum(b)\n b_map[n_s] = np.array(b)\n next[conv_action[a_h]] = n_s\n s_candi.add(n_s)\n if len(next) > 0:\n nexts[s] = next\n actions[s] = int(conv_action[a_r])\n # print(actions)\n json.dump({\"actions\": actions, \"nexts\": nexts, \"target\": target},\n open(\"ct_data/scinario_\" + str(index) + \"_\" + str(algo) + \".json\", \"w\"), indent=4)\n # json.dump(actions, open(\"ct_data/scinario_\" + str(index) + \"_\" + str(algo) + \".json\", \"w\"), indent=4)\n\n # print(b)\n # print(self.h_pi[th_r][s][a_r])\n\n def _take_one_turn(self):\n exit()\n\n\n\n","sub_path":"problem/ct/ct_data_mdp.py","file_name":"ct_data_mdp.py","file_ext":"py","file_size_in_byte":8156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"239040699","text":"from selenium.common.exceptions import NoSuchElementException\nimport pytest\nimport time\n\n\n@pytest.fixture\ndef selenium(selenium):\n selenium.implicitly_wait(10)\n selenium.maximize_window()\n return selenium\n\n\ndef test_banner_shows_and_hides(selenium, live_server):\n selenium.get(live_server.url)\n cookielaw_banner = selenium.find_element_by_id('CookielawBanner')\n\n # on click of the button, cookie set and banner hidden\n cookielaw_banner.find_element_by_class_name('btn').click()\n assert not cookielaw_banner.is_displayed()\n assert '1' == selenium.get_cookie('cookielaw_accepted')['value']\n\n # on come back, assert banner gone\n selenium.get(live_server.url)\n\n with pytest.raises(NoSuchElementException):\n selenium.find_element_by_id('CookielawBanner')\n\n\ndef test_banner_shows_and_hides_with_jquery(selenium, live_server):\n # now, with jQuery\n selenium.get('{}/?jquery=1'.format(live_server.url))\n cookielaw_banner = selenium.find_element_by_id('CookielawBanner')\n\n # on click of the button, cookie set and banner hidden\n cookielaw_banner.find_element_by_class_name('btn').click()\n time.sleep(1)\n assert not cookielaw_banner.is_displayed()\n assert '1' == selenium.get_cookie('cookielaw_accepted')['value']\n","sub_path":"tests/test_functional.py","file_name":"test_functional.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"344088795","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-x86_64/egg/cecdaemon/__main__.py\n# Compiled at: 2018-09-22 03:14:19\n# Size of source mod 2**32: 121 bytes\n__doc__ = ' Main entry point\\n'\nfrom . import cecdaemon\n\ndef main():\n cecdaemon.run()\n\n\nif __name__ == '__main__':\n main()","sub_path":"pycfiles/cechmate-0.0.8-py3-none-any/__main__.cpython-37.py","file_name":"__main__.cpython-37.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"519703938","text":"# Copyright 2017 Huawei Technologies Co., Ltd.\n# All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_config import cfg\nfrom oslo_log import log\n\nfrom neutron.plugins.ml2 import driver_api\nfrom neutron.plugins.ml2.drivers import type_vxlan\nfrom neutron_lib import exceptions as n_exc\n\nfrom tricircle.common import constants\n\nLOG = log.getLogger(__name__)\n\n\nclass VxLANTypeDriver(type_vxlan.VxlanTypeDriver):\n def __init__(self):\n super(VxLANTypeDriver, self).__init__()\n\n def get_type(self):\n return constants.NT_VxLAN\n\n def initialize(self):\n try:\n self._initialize(cfg.CONF.tricircle.vni_ranges)\n except n_exc.NetworkTunnelRangeError:\n LOG.exception(\"Failed to parse vni_ranges. \"\n \"Service terminated!\")\n raise SystemExit()\n\n def reserve_provider_segment(self, context, segment):\n res = super(VxLANTypeDriver,\n self).reserve_provider_segment(context, segment)\n res[driver_api.NETWORK_TYPE] = constants.NT_VxLAN\n return res\n\n def allocate_tenant_segment(self, context):\n res = super(VxLANTypeDriver,\n self).allocate_tenant_segment(context)\n res[driver_api.NETWORK_TYPE] = constants.NT_VxLAN\n return res\n\n def get_mtu(self, physical_network=None):\n pass\n","sub_path":"tricircle/network/drivers/type_vxlan.py","file_name":"type_vxlan.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"177671300","text":"import numpy as np\nimport json\nimport csv\nfrom itertools import izip\n\nfilename = open('data/processed.json')\ndata = json.load(filename)\n\ntitle = []\nkeywords = []\nwinner = []\n\nstr1 = \"\"\nfor i in data:\n if i['winner'] == True:\n str1 += 'True'+';'\n winner.append(str1)\n else:\n str1 += 'False'+';'\n winner.append(str1)\n str1=\"\"\n\nstr1 = \"\"\nfor i in data:\n if \"keywords\" in i:\n for x in i[\"keywords\"]:\n str1 += x[\"text\"]+\" \"\n keywords.append(str1)\n str1 = \"\"\n\nstr1 = \"\"\nfor i in data:\n str1 += i[\"tagline\"]+';'\n title.append(str1)\n str1=\"\"\n\nfo = open('cluster.csv','w')\nwith open('cluster.csv','w') as outcsv:\n writer= csv.writer(outcsv, delimiter =';')\n writer.writerows(izip(winner,title,keywords))\nfo.close()\n\n","sub_path":"cluster_parsing.py","file_name":"cluster_parsing.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"617604842","text":"# encoding: utf-8\n\nfrom selenium import webdriver\nimport time\nfrom PIL import ImageGrab\nfrom chaojiying import Chaojiying_Client\nimport random\nfrom config_data import deviceall\nimport requests\nimport re\nimport urllib.request\nimport base64\nfrom urllib import request,parse\nimport json\n\n\nclass filter():\n sign_up = 'https://890cp2.com/'\n def device_ua(self):\n ua_iphone = []\n ua_ipad = []\n with open(\"ua_iphone.txt\", 'r') as f:\n for line in f:\n # uaph = f.readline()\n line = line.strip().split('\\n')\n # print(line)\n ua_iphone.append(line[0])\n # print(ua_iphone)\n\n with open(\"ua_ipad.txt\", 'r') as a:\n for line in a:\n line = line.strip().split('\\n')\n ua_ipad.append(line[0])\n # print(ua_all)\n\n ua_ph = 'Mozilla/5.0 (iPhone; CPU iPhone OS 10_1_1 like Mac OS X; tr-TR) AppleWebKit/537.36 (KHTML, like Gecko) Version/10.1.1 Mobile/14B100 Safari/537.36 Puffin/5.2.0IP'\n ua_pa = random.choice(ua_ipad)\n\n xypad = {'x': 720, 'y': 475} # ipad(720,475,205,40)\n xypadp = {'x': 915, 'y': 475} # ipadpro(915,475,205,40)\n xy678 = {'x': 315, 'y': 430} # iphone6_7_8(320,475,205,40)\n xyx = {'x': 320, 'y': 430} # iphoneX(320,475,205,40)\n xy5 = {'x': 252, 'y': 430} # iphone5_se(235,510,205,40)\n xy678p = {'x': 375, 'y': 430} # iphone6_7_8plus(375,470,205,40)\n\n iphone5_se = {'width': 320, 'height': 568, 'ua': ua_ph, 'xy': xy5}\n iphone6_7_8 = {'width': 375, 'height': 667, 'ua': ua_ph, 'xy': xy678}\n iphone6_7_8plus = {'width': 414, 'height': 736, 'ua': ua_ph, 'xy': xy678p}\n iphoneX = {'width': 375, 'height': 812, 'ua': ua_ph, 'xy': xyx}\n ipad = {'width': 768, 'height': 1024, 'ua': ua_pa, 'xy': xypad}\n ipadPro = {'width': 1024, 'height': 1366, 'ua': ua_pa, 'xy': xypadp}\n\n deviceall = [iphone5_se, iphone6_7_8, iphone6_7_8plus, iphoneX, ipad,ipadPro] #\n\n return deviceall\n\n def setUp(self):\n device_all = self.device_ua()\n # self.device = random.choice(device_all)\n self.device = device_all[2]\n print(self.device)\n pixel_ratio = 3.0\n mobileEmulation = {\"deviceMetrics\": {\"width\": self.device['width'], \"height\": self.device['height'], \"pixelRatio\": pixel_ratio},\"userAgent\": self.device['ua']}\n options = webdriver.ChromeOptions()\n options.binary_location = \"C:/Users/moxi/Desktop/mychrome/Chrome/chrome.exe\"\n chrome_driver_binary = \"chromedriver.exe\"\n options.add_experimental_option('mobileEmulation', mobileEmulation)\n # self.driver = webdriver.Chrome(executable_path='chromedriver.exe', chrome_options=options)\n self.driver = webdriver.Chrome(chrome_driver_binary, options=options)\n self.driver.maximize_window()\n time.sleep(1)\n self.driver.get(self.sign_up)\n self.driver.implicitly_wait(30)\n\n def quit(self):\n self.driver.quit()\n\n def verify(self):\n for i in range(1, 3):\n time.sleep(2)\n self.driver.find_elements_by_class_name(\"am-modal-button\")[0].click()\n # self.driver.find_element_by_xpath('//div[@class=\"am-modal-button-group-h.am-modal-button-group-normal\"]/a').click()\n # self.driver.refresh()\n time.sleep(5)\n self.driver.find_elements_by_class_name(\"barTextButton___G3WVC\")[1].click()\n # self.driver.switch_to.frame(self.driver.find_element_by_xpath(\"//iframe[contains(@src,'/login')]\"))\n\n username =\"ti626\"\n pwd = \"zspQjn0d5Z\"\n print(\"User:\"+username)\n print(\"Pwd:\"+pwd)\n\n # user = self.driver.find_element_by_xpath('//input[@placeholder=\"请输入用户名\"]')\n user = self.driver.find_elements_by_xpath('//input[@type=\"text\"]')[0]\n time.sleep(2)\n user.clear()\n time.sleep(2)\n user.send_keys(username)\n passw = self.driver.find_element_by_xpath('//input[@type=\"password\"]')\n passw.send_keys(pwd)\n code = self.driver.find_elements_by_xpath('//input[@type=\"text\"]')[1]\n code.send_keys(\"\")\n\n time.sleep(1)\n\n x = self.device['xy']['x'] # 1872\n y = self.device['xy']['y'] # 688,438\n w = x + 205 # 275,230\n h = y + 40 # 50,38\n size = (x, y, w, h)\n img = ImageGrab.grab(size)\n img.save(\"C:/Users/moxi/Downloads/1.png\") # C:/Users/moxi/Downloads/1.png\n # img.show()\n\n time.sleep(5)\n\n # chaojiying = Chaojiying_Client('iwtay77', 'Iwt.ay77','ac212bb67ed8fce6a530514d9f478093') # 用户中心>>软件ID 生成一个替换 96001\n # im = open('C:/Users/moxi/Downloads/1.png', 'rb').read() # 本地图片文件路径 来替换 a.jpg 有时WIN系统须要//\n # yzm = chaojiying.PostPic(im, 1902)\n # print(yzm)\n # time.sleep(5)\n\n appkey = \"62a8949082d27515eeafbd101b64912a\"\n with open(\"C:/Users/moxi/Downloads/1.png\", 'rb') as f:\n base64_data = base64.b64encode(f.read())\n s = base64_data.decode()\n # print(s)\n\n textmob = {\n \"key\": appkey,\n \"codeType\": 4006,\n \"base64Str\": s\n }\n textmob = parse.urlencode(textmob).encode(encoding='utf-8')\n # print(textmob)\n\n req = urllib.request.Request(url=\"http://op.juhe.cn/vercode/index\", data=textmob)\n webpage = urllib.request.urlopen(req)\n html = webpage.read()\n res = json.loads(html)\n yzm = str(res[\"result\"])\n print(yzm)\n code.send_keys(yzm)\n time.sleep(2)\n self.driver.find_elements_by_xpath('//button[@class=\"color1___3wpTZ\"]')[0].click()\n print(i)\n if i == 1 :\n time.sleep(2)\n quit = self.driver.find_elements_by_xpath('//div[@class=\"am-tab-bar-tab\"]')[4]\n time.sleep(2)\n quit.click()\n time.sleep(1)\n self.driver.find_element_by_class_name(\"iconService___BeN5z\").click()\n time.sleep(1)\n self.driver.find_element_by_class_name(\"color1___3wpTZ\").click()\n time.sleep(1)\n self.driver.refresh()\n else:\n break\n\n time.sleep(2)\n url = \"http://200019.ip138.com/\"\n req = urllib.request.urlopen(url).read()\n # print(req)\n theIP = re.findall(r\"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}.\\d{1,3}\", str(req))\n ip = theIP[0]\n print(\"your IP Address is: \", ip)\n time.sleep(2)\n response = requests.post(\n f\"http://47.75.184.28/api/imessage-server/imessage-restapi/external/markEmail?email={username}&ip={ip}\")\n print(response.status_code)\n time.sleep(2)\n # deviceToken = self.driver.execute_script(\"return localStorage.getItem('appDeviceToken')\")\n # print(deviceToken)\n\n sideNav_list = self.driver.find_elements_by_xpath('//div[@class=\"listItem___12frK\"]')\n sideNav =random.choice(sideNav_list)\n time.sleep(2)\n sideNav.click()\n time.sleep(2)\n self.driver.find_elements_by_class_name(\"theme1___341L1.undefined.button___3xxsI\")[0].click()\n time.sleep(2)\n self.driver.find_elements_by_class_name(\"buttonItem___2sWKk\")[2].click()\n time.sleep(2)\n self.driver.find_elements_by_class_name(\"iconRemove___3CKmq\")[0].click()\n time.sleep(7)\n # self.driver.find_element_by_xpath('//div[@class=\"theme1___341L1.undefined.button___3xxsI\"]').click()\n # time.sleep(2)\n self.driver.find_element_by_class_name(\"theme1___341L1.undefined.button___3xxsI\").click()\n time.sleep(2)\n self.driver.find_element_by_class_name(\"color1___3wpTZ\").click()\n time.sleep(2)\n # self.driver.find_element_by_xpath('//button[@data-position=\"bottom\"]').click()\n\n\nif __name__ == \"__main__\":\n F = filter()\n F.setUp()\n F.verify()\n F.quit()\n\n\n","sub_path":"978Pplay.py","file_name":"978Pplay.py","file_ext":"py","file_size_in_byte":8217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"254021024","text":"import cv2\nimport imutils\nfrom datetime import datetime,timedelta\nimport statistics\n\nprint(cv2.__version__)\n\ncascPath = './haarcascade_frontalface_default.xml'\nfaceCascade = cv2.CascadeClassifier(cascPath)\n\n#video_capture = cv2.VideoCapture(0)\nvideo_capture = cv2.VideoCapture(\"./pessoas_caminhando.mp4\")\n\ntotal = 0\nfont = cv2.FONT_HERSHEY_SIMPLEX\ninitBB = None\n\ntotal = []\nnext_update = datetime.now() + timedelta(seconds=30)\naverage = 0\nmaximo = 0\n\ndef get_average():\n global next_update\n global total\n global average\n now = datetime.now()\n if now > next_update:\n next_update = now + timedelta(seconds=30)\n average = int(statistics.median(total))\n total = []\n \n return average\n\nwhile True:\n # Capture frame-by-frame\n ret, frame = video_capture.read()\n if frame is None:\n break\n\n frame = imutils.resize(frame, width=700)\n (width, height, c) = frame.shape\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(30, 30),\n flags=cv2.CASCADE_SCALE_IMAGE\n )\n\n #desenha os retangulos\n atual = 0\n for (x, y, w, h) in faces:\n atual = atual + 1\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\n\n if atual > maximo:\n maximo = atual\n\n total.append(atual)\n\n cv2.putText(frame, \"Maximo: %s\" % maximo, (10, 20), font, 0.7, (255,165,0), 2, cv2.LINE_AA)\n\n average = get_average()\n\n cv2.putText(frame, \"Media (Mediana): %s\" % average, (10, 45), font, 0.7, (255,165,0), 2, cv2.LINE_AA)\n\n cv2.imshow('Video', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n \n# When everything is done, release the capture\nvideo_capture.release()\ncv2.destroyAllWindows()\n\n","sub_path":"heatmap.py","file_name":"heatmap.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"172884944","text":"from microbit import *\nimport random\n\nbricksPos = []\n\nstep = 1\n\nclass Player:\n \"\"\"This is the play in the bottom\"\"\"\n def __init__(self, pos, HP):\n self.HP = HP\n self.pos = pos\n\n def shoot(self):\n \"\"\"Here a shot is being fired\"\"\"\n for n in [4,3,2,1,0]:\n \"\"\"If enemy is encountered\"\"\"\n shotPos = [self.pos[0],n-1]\n print(\"Shotpos\" + \" \" + str(shotPos))\n\n if shotPos in bricksPos:\n\n # Remove brick\n display.set_pixel(shotPos[0], shotPos[1], 0 )\n bricksPos.remove(shotPos)\n print(\"Brick is removed\" + str(bricksPos))\n break\n else:\n display.set_pixel(shotPos[0],n,9)\n sleep(100)\n display.set_pixel(shotPos[0], n, 0)\n\n\n def explode(self):\n \"\"\" Here a local explosion removes all nearby bricks\"\"\"\n\n combs = [\n [1, 0],\n [1, -1],\n [0, -1],\n [-1, 0],\n [-1, -1]\n ]\n # Lyser bombepixels\n for comb in combs:\n\n y = self.pos[1] + comb[1]\n x = self.pos[0] + comb[0]\n if -1 < x and x < 5 and -1 < y and y < 5:\n # bricks are removed\n display.set_pixel(x % 5, y % 5, 9)\n\n sleep(500)\n # Slukker igen\n for comb in combs:\n\n y = self.pos[1] + comb[1]\n x = self.pos[0] + comb[0]\n\n if -1 < x and x < 5 and -1 < y and y < 5:\n # bricks are removed\n display.set_pixel(x % 5, y % 5, 0)\n if [x % 5, y % 5] in bricksPos:\n bricksPos.remove([x % 5, y % 5])\n else:\n pass\n\n def move(self):\n if accelerometer.get_x() > 100:\n self.pos[0] = (self.pos[0] + 1) % 5\n elif accelerometer.get_x() < -100:\n self.pos[0] = (self.pos[0] - 1) % 5\n else:\n pass\n\nlukas = Player([2,4],9)\n# Game loop\nt = running_time()\nt_add = running_time()\nt_move = running_time()\n\nwhile True:\n if lukas.HP < 0:\n display.scroll(str(temperature()))\n display.scroll(\"GAME OVER\")\n\n\n # Setting character:\n display.set_pixel(lukas.pos[0],lukas.pos[1],lukas.HP)\n cols =[0, 1, 2, 3, 4]\n\n\n\n t_break = 3000*(0.95)**step\n while t_add + t_break < running_time():\n x = random.choice(cols)\n newBrickPos = [x,0]\n step += 1\n\n for brick in bricksPos:\n if brick == newBrickPos:\n # setting new brick\n if brick[1]+1 == 5:\n display.clear()\n lukas.HP -= 1\n bricksPos = []\n brick = [0,0]\n display.set_pixel(brick[0],brick[1]+1,5)\n\n newBrickPos[1] +=1\n\n else:\n # do nothing\n pass\n display.set_pixel(x,0,5)\n bricksPos.append(newBrickPos)\n\n t_add = running_time()\n\n while t_move + 300 < running_time():\n display.set_pixel(lukas.pos[0],lukas.pos[1],0)\n lukas.move()\n display.set_pixel(lukas.pos[0], lukas.pos[1], lukas.HP)\n t_move = running_time()\n\n if button_a.was_pressed():\n lukas.shoot()\n if button_b.was_pressed():\n lukas.explode()\n","sub_path":"tetris_like.py","file_name":"tetris_like.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"96158764","text":"import mne\nimport numpy as np\nimport warnings\nimport scipy.io as sio\n\n\ndef events_select_condition(trigger, condition):\n \"\"\"Function to handle events and event ids\n\n Parameters\n ----------\n trigger : np.array (dims = [n,1])\n The trigger values\n condition : str\n The set of events corresponding to a specific analysis.\n\n Returns\n -------\n selection : np.array (dims = [m, 1])\n The selected trigger.\n \"\"\"\n if condition == 'stim_motor':\n selection = np.where(trigger > 0)[0]\n elif condition == 'stim':\n selection = np.where((trigger > 0) & (trigger < 4096))[0]\n elif condition == 'motor': # remove response not linked to stim\n # selection = np.where((trigger > 64) & (trigger != 128))[0]\n selection = np.where(trigger >= 4096)[0]\n return selection\n\n\ndef get_events(bhv_fname, ep_name='both'):\n \"\"\"\"Get events from matlab file\n\n Parameters\n ----------\n bhv_fname : str\n mat file path with behavioral results\n ep_name : str (default: 'both')\n string indicating if output corresponds to 'stim' events, 'motor' events\n or 'both'\n\n Returns\n -------\n events_df : pd.DataFrame\n \"\"\"\n import pandas as pd\n\n trials = sio.loadmat(bhv_fname, squeeze_me=True,\n struct_as_record=True)[\"trials\"]\n\n # Redefine key to be more explicit\n keys = [('side', 'stim_side', int),\n ('amb', 'stim_category', float),\n ('amb_word', 'stim_category', float),\n ('target_code', 'stim_code', int),\n ('key', 'motor_side', int),\n ('correct', 'motor_correct', float),\n ('RT_MEG', 'motor_RT', float),\n ('choice', 'motor_category', int)]\n\n # Create indexable dictionary\n events = list()\n for ii, trial in enumerate(trials):\n event = dict()\n # add already present fields\n for key in keys:\n event[key[1]] = trial[key[0]]\n\n # Add manual fields\n event['stim_active'] = trial['type'] == 1\n event['trigger_value'] = int(trial['ttl']['value'])\n event['motor_missed'] = not(event['motor_RT'] > 0.)\n event['trial_number'] = ii\n\n # ---- stimulus categorical ambiguity\n # NB: There seems to be an error in the matlab postproc code regarding\n # trials.amb_word. We thus need to redefine the conditions properly.\n if trial['target_code'] in [1, 2]: # [['540', 'SHO'], ['560', 'SEO']]\n event['stim_category'] = (trial['amb'] - 1.0) / 7.0\n elif trial['target_code'] in [3, 5]: # [[540, 590], [560, 580]]\n event['stim_category'] = 0.0\n elif trial['target_code'] in [4, 6]: # [[SHO, SAO], [SEO, SCO]]\n event['stim_category'] = 1.0\n else:\n raise('problem target_code!')\n\n # ---- type of passive stimulus\n if not(event['stim_active']):\n # [[540, 590], [560, 580], [SHO, SAO], [SEO, SCO]]\n if trial['amb'] == 1:\n event['stim_new'] = 0\n elif trial['amb'] == 8:\n event['stim_new'] = 1\n else:\n raise('problem target code')\n else:\n event['stim_new'] = 0\n\n # ---- stimulus contrast\n if trial['target_code'] in [1, 3, 4, 5]:\n event['stim_contrast'] = event['stim_category']\n elif trial['target_code'] in [2, 6]:\n event['stim_contrast'] = 1.0 - event['stim_category']\n else:\n raise('problem target_code!')\n\n # previous trial\n if len(events) > 1:\n if ep_name == 'both':\n previous_event = events[-2]\n else:\n previous_event = events[-1]\n for key in ['stim_category', 'stim_side',\n 'motor_category', 'motor_side']:\n event['previous_' + key] = previous_event[key]\n\n # Concatenate stim event\n if (ep_name == 'stim_lock' or ep_name == 'both'):\n event['event_type'] = 'stim'\n events.append(event)\n\n # Add motor event subject responded so as to get a single events\n # structure for both stim and resp lock\n if (ep_name == 'motor_lock' or ep_name == 'both'):\n if event['stim_active']:\n event_ = event.copy()\n event_['event_type'] = 'motor'\n events.append(event_)\n\n # store and panda DataFrame for easier manipulation\n events_df = pd.DataFrame(events)\n # RT bin\n speed_labels = ['none', 'fast', 'slow']\n speeds = np.percentile(events_df['motor_RT'], [50])\n events_df['motor_speed'] = None\n for ii in range(len(events)):\n rt = events_df['motor_RT'][ii]\n motor_speed = speed_labels[len(np.where(speeds < rt)[0]) + (rt > 0)]\n events_df['motor_speed'][ii] = motor_speed\n return events_df\n\n\ndef extract_events(fname, min_duration=0.003, first_sample=0,\n offset_to_zero_M=True, offset_to_zero_S=False):\n \"\"\"Function to 1) recompute STI101 from other channels\n 2) clean trigger channel\n 3) Add stimulus information to response channel\n\n Parameters\n ----------\n fname : str\n The filename of the event dataset.\n min_duration : float\n The minimum duration (in s) of an event\n\n Returns\n -------\n events : np.array (dims = [n_events, 3])\n Events array to pass to MNE.\n \"\"\"\n # Load data\n if fname is str:\n raw = mne.io.Raw(fname, preload=True)\n else:\n raw = fname.copy()\n\n # Dissociate STI101 into distinct channels\n raw.pick_channels(['STI101'])\n n_bits = 16\n raw._data = np.round(raw._data)\n data = np.zeros((n_bits, raw.n_times))\n for bit in range(0, n_bits)[::-1]:\n data[bit, :] = (raw._data >= 2 ** bit).astype(float)\n raw._data -= data[bit, :] * (2 ** bit)\n\n # Min duration in sample\n min_sample = min_duration * raw.info['sfreq']\n\n # Binarize trigger values to 0 and 1\n S_ch = range(0, 11)\n # Get all motor events, independently of task relevance\n cmb_M_, sample_M_ = _combine_events(data[len(S_ch):, :], min_sample,\n first_sample=first_sample,\n overlapping=False,\n offset_to_zero=offset_to_zero_M)\n # Only consider stim triggers after first button response (to avoid trigger\n # test trhat shouldn't have been recorded)\n cmb_S, sample_S = _combine_events(data[0:len(S_ch), :], min_sample,\n first_sample=sample_M_[0],\n overlapping=True,\n offset_to_zero=offset_to_zero_S)\n\n # Correct order of magnitude of M response to avoid S/M conflict\n cmb_M_ *= (2 ** len(S_ch))\n\n # Get trigger values for stim and unassociated motor\n trigger_S, trigger_M_ = cmb_S[sample_S], cmb_M_[sample_M_]\n\n # Select M responses relevant to task: first M response following trigger\n # max_delay = raw.info['sfreq'] * (2.000 + .430)\n sample_M, trigger_M = list(), list()\n for s, S in enumerate(sample_S):\n # Find first M response\n M = np.where(np.array(sample_M_) > S)[0]\n # Check its link to S\n # if (any(M) and (sample_M_[M[0]] - S) <= max_delay):\n if any(M):\n if trigger_S[s] <= 4.0: # no trigger if stim is passive\n # Add motor response to motor\n sample_M.append(sample_M_[M[0]])\n # Associate S value to M to link the two\n trigger_M.append(trigger_M_[M[0]] + trigger_S[s])\n\n # Combine S and M events\n events_S = [sample_S, np.zeros(len(sample_S)), trigger_S]\n events_M = [sample_M, np.zeros(len(sample_M)), trigger_M]\n events = np.hstack((events_S, events_M)).transpose()\n\n # Sort events chronologically\n events = events[np.argsort(events[:, 0]), :]\n\n # Add starting sample\n events[:, 0] += raw.first_samp\n\n return events\n\n\ndef _combine_events(data, min_sample, first_sample=0, overlapping=True,\n offset_to_zero=True):\n \"\"\" Function to combine multiple trigger channel into binary code \"\"\"\n n_chan, n_sample = data.shape\n cmb = np.zeros([n_chan, n_sample])\n for bit in range(0, n_chan):\n cmb[bit, :] = 2 ** bit * data[bit, :]\n cmb = np.sum(cmb, axis=0)\n\n if not overlapping:\n over_t = np.where(np.sum(data, axis=0) > 1.0)[0]\n cmb[over_t] = 0.0\n\n # Find trigger onsets and offsets\n diff = cmb[1:] - cmb[0:-1]\n diff[:first_sample] = 0 # don't consider triggers before this\n onset = np.where(diff > 0)[0] + 1\n offset = np.where(diff < 0)[0]\n\n # minimum changing time\n onset_t = np.where((onset[1:] - onset[:-1]) >= min_sample)[0]\n onset = onset[np.append(onset_t, len(onset_t))]\n offset_t = np.where((offset[1:] - offset[:-1]) >= min_sample)[0] + 1\n offset = offset[np.append(0, offset_t)]\n\n # first offsets should be after first onset\n if offset[0] < onset[0]:\n offset = offset[1:]\n # offsets must go back to 0\n if offset_to_zero:\n offset = offset[np.where(cmb[offset+1] == 0.)[0]]\n # XXX should do the same for onset?:\n # onset = onset[np.where(cmb[onset-1] == 0.)[0]]\n if len(onset) > len(offset):\n # onset = onset[:-1]\n offset = np.hstack((offset, onset[-1] + min_sample))\n warnings.warn(\"Added extra offset!\")\n\n # Remove too short samples\n duration = offset - onset\n sample = onset[duration > min_sample].tolist()\n\n return cmb, sample\n","sub_path":"ambiguity/conditions.py","file_name":"conditions.py","file_ext":"py","file_size_in_byte":9636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"650072582","text":"import requests\r\nfrom ..base.test import BaseTestCase, AuthorizedTestCase\r\nimport uuid\r\nimport common\r\n\r\n\r\nclass T(AuthorizedTestCase):\r\n\r\n @property\r\n def path(self):\r\n return '/service/secu/underway'\r\n\r\n def setUp(self):\r\n super().setUp()\r\n self.data = {\r\n \"itemid\": \"a6523c40a08f481a92c25c6945dc7b41\",\r\n \"userid\": self.user['id']\r\n }\r\n\r\n query = '''\r\n insert into todo_underway(itemid, userid) values (:itemid, :userid);\r\n '''\r\n self.db.execute(query, self.data)\r\n self.db.commit()\r\n\r\n def test_by_correct_info(self):\r\n response = requests.delete(self.url, json={'itemid': self.data['itemid']})\r\n self.assertNotEqual(response.text, '', '返回值为空!')\r\n resp = response.json()\r\n self.assertEqual('000', resp['code'])\r\n\r\n query = '''\r\n select count(1) from todo_underway where userid=:userid and itemid=:itemid;\r\n '''\r\n\r\n result = self.db.execute(query, self.data).scalar()\r\n\r\n self.assertEqual(0, result)\r\n","sub_path":"secu/tests/underway_delete_test.py","file_name":"underway_delete_test.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"353375198","text":"#!/usr/bin/env python3\n\"\"\"My solution to day 5 of Advent of Code\nhttp://adventofcode.com/day/5\n\"\"\"\n\nimport sys\n\nVOWELS = 'aeiou'\n\ndef is_nice(s):\n num_vowels = 0\n for c in s:\n if c in VOWELS:\n num_vowels += 1\n if num_vowels == 3:\n break\n if num_vowels < 3:\n return False\n\n if 'ab' in s or 'cd' in s or 'pq' in s or 'xy' in s:\n return False\n\n letter = ''\n for c in s:\n if c == letter:\n return True\n else: letter = c\n return False\n\ndef count_nice(strings):\n amount = 0\n for line in strings.splitlines():\n if is_nice(line):\n amount += 1\n return amount\n\ndef is_nicer(s):\n pairs = []\n for i in range(0,len(s)-1):\n pairs.append(s[i] + s[i+1])\n pairs = set(pairs)\n for i in pairs:\n if s.count(i) > 1:\n break\n else:\n return False\n\n for i in range(0, len(s)-2):\n if s[i] == s[i+2]:\n return True\n\ndef count_nicer(strings):\n amount = 0\n for line in strings.splitlines():\n if is_nicer(line):\n amount += 1\n return amount\n\nif __name__ == '__main__' and len(sys.argv) > 1 and len(sys.argv[1]) > 0:\n print('Number of nice strings:', count_nice(sys.argv[1]))\n print('Number of nicer strings:', count_nicer(sys.argv[1]))\n","sub_path":"day5.py","file_name":"day5.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"64267635","text":"\"\"\"Treadmill app configurator daemon, subscribes to eventmgr events.\n\"\"\"\n\nimport click\n\nfrom .. import appcfgmgr\n\n\ndef init():\n \"\"\"Top level command handler.\"\"\"\n\n @click.command()\n @click.option('--approot', type=click.Path(exists=True),\n envvar='TREADMILL_APPROOT', required=True)\n def top(approot):\n \"\"\"Starts appcfgmgr process.\"\"\"\n mgr = appcfgmgr.AppCfgMgr(root=approot)\n mgr.run()\n\n return top\n","sub_path":"treadmill/sproc/appcfgmgr.py","file_name":"appcfgmgr.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"635726911","text":"from datetime import datetime\nfrom django.contrib import admin\nfrom .models import *\n\ndef iniciar_tarea(modeladmin, request, queryset):\n for tarea in queryset:\n bitacora = Bitacora()\n bitacora.tarea = tarea\n bitacora.save()\n\ndef finalizar_tarea(modeladmin, request, queryset):\n queryset.update(terminada=True)\n\ndef finalizar_bitacora(modeladmin, request, queryset):\n queryset.update(fin=datetime.now())\n\nclass TareaInLine(admin.TabularInline):\n model = Tarea\n extra = 1\n\n\nclass BitacoraInLine(admin.StackedInline):\n model = Bitacora\n extra = 1\n\n\n@admin.register(Proyecto)\nclass ProyectoAdmin(admin.ModelAdmin):\n inlines = [TareaInLine]\n\n\n@admin.register(Tarea)\nclass TareaAdmin(admin.ModelAdmin):\n list_display = [\n 'nombre',\n 'descripcion',\n 'proyecto',\n 'terminada',\n 'fecha_de_entrega',\n 'usuario',\n 'responsable'\n ]\n actions = [iniciar_tarea, finalizar_tarea]\n list_filter = ['responsable', 'terminada']\n inlines = [BitacoraInLine]\n\n def get_queryset(self, request):\n qs = super(TareaAdmin, self).get_queryset(request)\n if not request.user.is_superuser:\n qs = qs.filter(responsable=request.user)\n return qs\n\n def save_model(self, request, obj, form, change):\n if not request.user.is_superuser:\n obj.responsable = request.user\n obj.save()\n\n\n@admin.register(Bitacora)\nclass BitacoraAdmin(admin.ModelAdmin):\n list_display = [\n 'tarea',\n 'inicio',\n 'fin'\n ]\n actions = [finalizar_bitacora]\n\n def get_queryset(self, request):\n qs = super(BitacoraAdmin, self).get_queryset(request)\n if not request.user.is_superuser:\n qs = qs.filter(tarea__responsable__id=request.user.id)\n return qs\n","sub_path":"pepepecas/apps/bitacora/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"279686778","text":"#-*-python-*-\nfrom BaseAI import BaseAI\nfrom GameObject import *\nimport math\nimport random\n\nclass AI(BaseAI):\n \"\"\"The class implementing gameplay logic.\"\"\"\n @staticmethod\n def username():\n return \"Shell AI\"\n\n @staticmethod\n def password():\n return \"password\"\n \n MOTHER = 0\n SPAWNER = 1\n CHOKER = 2\n SOAKER = 3\n BUMBLEWEED = 4\n ARALIA = 5\n TITAN = 6\n POOL = 7\n\n ##This function is called once, before your first turn\n def init(self):\n #set up me field\n self.me = self.players[self.playerID]\n #set up mother field\n self.mother = self.getMyPlants()[0]\n \n #set up directionOfEnemy field\n #if our mother is on the left side of the map, the enemy must be on the right side\n #and vice versa of course\n if self.mother.x < self.mapWidth/2:\n self.directionOfEnemy = 1\n else:\n self.directionOfEnemy = -1\n pass\n\n ##This function is called once, after your last turn\n def end(self):\n pass\n\n ##This function is called each time it is your turn\n ##Return true to end your turn, return false to ask the server for updated information\n def run(self):\n myPlants = self.getMyPlants()\n #for every plant we own, move them forward and attack if it finds an enemy\n for plant in myPlants:\n #only try radiating if it's possible\n if plant.radiatesLeft > 0:\n #only heal or buff allies and attack enemies\n targetOwner = 1 - self.playerID\n if plant.mutation == self.BUMBLEWEED or plant.mutation == self.SOAKER:\n targetOwner = self.playerID\n\n for foe in self.plants:\n #if it's dead skip it\n if foe.rads >= foe.maxRads:\n continue\n\n #don't mess with pools\n if foe.mutation == self.POOL:\n continue\n\n #if it's not the right target\n if foe.owner != targetOwner:\n continue\n\n #if a healer or soaker can't effect the mother weed\n if targetOwner == self.playerID and foe.mutation == self.MOTHER:\n continue\n\n #if a soaker can't effect other soakers\n if plant.mutation == self.SOAKER and foe.mutation == self.SOAKER:\n continue\n\n #if we're within range...\n if self.distance(plant.x, plant.y, foe.x, foe.y) < plant.range:\n #get 'im!\n plant.radiate(foe.x, foe.y)\n break\n\n #move them straight to the other side. no regrets.\n #move as far as possible, as long as it's not off the map\n wantedX = plant.x\n if plant.mutation == self.BUMBLEWEED:\n wantedX += self.directionOfEnemy * self.bumbleweedSpeed\n else:\n wantedX += self.directionOfEnemy * self.uprootRange\n if plant.uprootsLeft > 0 and self.getPlantAt(wantedX, plant.y) is None and 0 <= wantedX < self.mapWidth:\n plant.uproot(wantedX, plant.y)\n\n #make a new plant every turn, because why not?\n #first, check if we can actually do that\n if len(myPlants) >= self.maxPlants:\n #end turn\n return True\n\n spawnX = -1\n spawnY = -1\n angle = 0\n loc = 0\n for plant in myPlants:\n #remove all plants in our list except for mothers and spawners\n if not (plant.mutation == self.MOTHER or plant.mutation == self.SPAWNER):\n myPlants.remove(plant)\n\n #get a random spawner or mother plant\n spawnerPlant = myPlants[random.randint(0, len(myPlants) - 1)]\n\n #get a new position centered around that spawner within its range\n #also, keep generating new coordinates until they become valid ones\n #Remember from trig:\n #(random x inside a circle) = centerX + rand(0,1)*radius*cos(angle)\n spawnCheckLimit = 0\n while not self.withinBounds(spawnX, spawnY) or self.getPlantAt(spawnX, spawnY) is not None:\n angle = random.random() * 2 * math.pi\n while spawnX < 0 or spawnX >= self.mapWidth:\n spawnX = spawnerPlant.x + int(random.random() * spawnerPlant.range * math.cos(angle))\n while spawnY < 0 or spawnY >= self.mapHeight:\n spawnY = spawnerPlant.y + int(random.random() * spawnerPlant.range * math.sin(angle))\n spawnCheckLimit += 1\n #if we try to spawn too many times, just give up and end the turn\n if spawnCheckLimit > 10:\n return True\n #spawn a random type of plant that isn't a mother or a pool at the coordinates we made\n #of course, make sure we have enough spores to do the job!\n mutationType = random.randint(1, 6)\n if self.me.spores >= self.mutations[mutationType].spores and self.withinSpawnerRange(spawnX, spawnY):\n self.me.germinate(spawnX, spawnY, mutationType)\n return 1\n\n #Helper function to get all of the plants owned\n def getMyPlants(self):\n myPlants = []\n for plant in self.plants:\n if plant.owner == self.playerID:\n myPlants.append(plant)\n return myPlants\n\n #Helper function to get distance as a whole number\n def distance(self, x1, y1, x2, y2):\n return int((math.sqrt(math.pow(x1-x2,2)+math.pow(y1-y2,2))))\n \n #Helper function to get a Plant at a point\n #Returns None if no plant found\n def getPlantAt(self, x, y):\n #if it's out of bounds, we don't need to check anything\n if not self.withinBounds(x, y):\n return None\n \n #for every plant, if a plant is at the position we want, return it\n for plant in self.plants:\n if plant.x == x and plant.y == y:\n return plant\n\n return None\n\n #Helper function for bounds checking\n def withinBounds(self, x, y):\n if x < 0 or x >= self.mapWidth or y < 0 or y >= self.mapHeight:\n return False\n return True\n\n #Helper function to check if we're within range of a Spawner or Mother\n def withinSpawnerRange(self, x, y):\n #No need to check if we're not within the bounds of the map\n if not self.withinBounds(x, y):\n return False\n\n #for every plant\n for plant in self.plants:\n #check for ownership and correct mutation\n if plant.owner == self.me.id and (plant.mutation == self.SPAWNER or plant.mutation == self.MOTHER):\n #if we're within range, we're good\n if self.distance(x, y, plant.x, plant.y) < plant.range:\n return True\n\n #if we found none, nope\n return False\n\n def __init__(self, conn):\n BaseAI.__init__(self, conn)\n","sub_path":"ShellAI/python/AI.py","file_name":"AI.py","file_ext":"py","file_size_in_byte":6220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"395359689","text":"import sys\nimport os\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport argparse\nimport torch\nimport time\nimport os.path as osp\nfrom torch.nn.parallel import DataParallel\nfrom UW.utils import Config\nfrom UW.core.Models import build_network\nfrom UW.core.Datasets import build_dataset, build_dataloader\nfrom UW.core.Optimizer import build_optimizer, build_scheduler\nfrom UW.utils import (mkdir_or_exist, get_root_logger,\n save_epoch, save_latest, save_item, normimage_test,\n resume, load, normPRED)\n\nfrom UW.utils.save_image import (save_image, normimage,\n save_ensemble_image, save_ensemble_image_8)\n\n\nfrom tensorboardX import SummaryWriter\n# TORCH_VERSION = torch.__version__\n# if TORCH_VERSION < '1.1' or TORCH_VERSION == 'parrots':\n# try:\n# from tensorboardX import SummaryWriter\n# except ImportError:\n# raise ImportError('Please install tensorboardX to use '\n# 'TensorboardLoggerHook.')\n# else:\n# try:\n# from torch.utils.tensorboard import SummaryWriter\n# except ImportError:\n# raise ImportError(\n# 'Please run \"pip install future tensorboard\" to install '\n# 'the dependencies to use torch.utils.tensorboard '\n# '(applicable to PyTorch 1.1 or higher)')\n\nfrom getpass import getuser\nfrom socket import gethostname\ndef get_host_info():\n return f'{getuser()}@{gethostname()}'\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train a detector')\n parser.add_argument('--config',type=str,\n default='/home/dong/GitHub_Frame/UW/config/UWCNN.py',\n help='train config file path')\n parser.add_argument('--load_from',\n default='/home/dong/GitHub_Frame/UW/checkpoints/UWCNN/UWCNN_type3.pth',\n help='the dir to save logs and models,')\n parser.add_argument('--savepath', help='the dir to save logs and models,')\n group_gpus = parser.add_mutually_exclusive_group()\n group_gpus.add_argument(\n '--gpus',\n default=1,\n type=int,\n help='number of gpus to use '\n '(only applicable to non-distributed training)')\n group_gpus.add_argument(\n '--gpu-ids',\n type=int,\n nargs='+',\n help='ids of gpus to use '\n '(only applicable to non-distributed training)')\n args = parser.parse_args()\n return args\n\n\n\nif __name__ == '__main__':\n args = parse_args()\n cfg = Config.fromfile(args.config)\n if args.load_from is not None:\n # update configs according to CLI args if args.work_dir is not None\n cfg.load_from = args.load_from\n if args.savepath is not None:\n # update configs according to CLI args if args.work_dir is not None\n cfg.savepath = args.savepath\n elif cfg.get('work_dir', None) is None:\n # use config filename as default work_dir if cfg.work_dir is None\n cfg.savepath = osp.join('./results',\n osp.splitext(osp.basename(args.config))[0])\n if args.gpu_ids is not None:\n cfg.gpu_ids = args.gpu_ids\n else:\n cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)\n\n mata = dict()\n\n # make dirs\n mkdir_or_exist(osp.abspath(cfg.savepath))\n timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())\n cfg.log_file = osp.join(cfg.savepath, f'{timestamp}.log')\n\n # create text log\n # build model\n model = build_network(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)\n load(cfg.load_from, model, None)\n # build dataset\n datasets = build_dataset(cfg.data.test)\n # put model on gpu\n if torch.cuda.is_available():\n # model = DataParallel(model.cuda(), device_ids=cfg.gpu_ids)\n model = model.cuda()\n # create data_loader\n data_loader = build_dataloader(\n datasets,\n cfg.data.val_samples_per_gpu,\n cfg.data.val_workers_per_gpu,\n len(cfg.gpu_ids))\n\n save_cfg = False\n for i in range(len(cfg.test_pipeling)):\n if 'Normalize' == cfg.test_pipeling[i].type:\n save_cfg = True\n\n save_path = osp.join(cfg.savepath, cfg.load_from.split('/')[-1].split('.')[0])\n mkdir_or_exist(save_path)\n # before run\n model.eval()\n t = time.time()\n for i, data in enumerate(data_loader):\n # before iter\n\n inputs = data['image']\n with torch.no_grad():\n out_rgb = model(inputs)\n print('writing' + data['image_id'][0] + '.png')\n # input_numpy = normimage_test(inputs, save_cfg=save_cfg)\n rgb_numpy = normimage_test(out_rgb, save_cfg=save_cfg, usebytescale=cfg.usebytescale)\n\n outsavepath = osp.join(save_path, data['image_id'][0] + '.png')\n inputsavepath = osp.join(save_path, data['image_id'][0] + '_input.png')\n\n # save_image(input_numpy, inputsavepath)\n save_image(rgb_numpy, outsavepath, usebytescale=cfg.usebytescale)\n\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"506796352","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ## Proxy users recommender model\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.base import BaseEstimator, ClassifierMixin\nfrom sklearn.decomposition import TruncatedSVD, PCA\nfrom scipy.spatial.distance import cdist\nfrom sklearn.preprocessing import StandardScaler\nfrom numpy.random import shuffle\nimport bz2\nimport pickle\n\n# Recommender model 1: Proxy User Search\nclass RecommenderProxyUsers():\n \"\"\"recommender engine as an estimator\"\"\"\n\n # ******************************************************************\n def __init__(self, n_proxy_users=10):\n \"\"\"\n Called when initializing the model\n \"\"\"\n # model parameters\n self.n_proxy_users = n_proxy_users\n \n self.user_data = None\n self.item_data = None\n self.n_top_items = 10\n self.user_factors = None \n self.item_factors = None \n self.user_top_rated = None \n self.item_id_to_index_dict = None\n self.user_info = None\n\n # ******************************************************************\n def set_params(self, **params):\n self.__dict__.update(params)\n \n # ******************************************************************\n def read_model_data(self, filepath):\n\n with bz2.BZ2File(filepath, 'rb') as pickle_in:\n [self.user_data, self.item_data] = pickle.load(pickle_in)\n \n # set any games with no categories or mechanics to 'none'\n self.item_data.loc[self.item_data['categories'].isnull(), 'categories'] = 'none'\n self.item_data.loc[self.item_data['mechanics'].isnull(), 'mechanics'] = 'none'\n\n # column labels for data subsets\n user_top_cols = [col for col in self.user_data.columns if 'top_' in col]\n user_factor_cols = [col for col in self.user_data.columns if 'factor_' in col]\n item_factor_cols = [col for col in self.item_data.columns if 'factor_' in col]\n item_info_cols = [col for col in self.item_data.columns if 'factor_' not in col]\n\n # split up data for faster processing\n self.user_factors = self.user_data[user_factor_cols].values\n self.user_top_rated = self.user_data[user_top_cols].values \n \n # number of top items (games) in this dataset\n self.n_top_items = self.user_top_rated.shape[1]\n \n self.item_factors = self.item_data[item_factor_cols].values \n self.item_info = self.item_data[item_info_cols]\n \n self.item_id_to_index_dict = {key: value for (key, value) in \n zip(self.item_data['id'], \n range(len(self.item_data['id'])))}\n \n self.item_title_to_id_dict = {key: value for (key, value) in \n zip(self.item_data['name'].str.lower(), \n self.item_data['id'].astype(int))}\n \n # ******************************************************************\n def get_tags_from_csv_list(self, taglist):\n \"\"\"Create df with all unique tags contained in \n list of csv strings containing multiple tags each.\n Returns tags and counts sorted by most frequent to least.\"\"\"\n all_tags = []\n for tagset in taglist:\n all_tags += tagset.split(',')\n unique_tags, counts = np.unique(all_tags, return_counts=True)\n return pd.DataFrame( {'tag':unique_tags, 'count':counts} ).sort_values(\n by='count', ascending=False)\n\n # ******************************************************************\n def get_categories_and_mechanics(self):\n \"\"\"return lists of all category and mechanic labels\"\"\"\n \n # get all categories, sorted by counts\n categories = self.get_tags_from_csv_list(self.item_data['categories'].values)\n \n # remove expansion tag from list\n categories = categories[categories['tag'] != 'Expansion for Base-game']\n\n # get list of all mechanics, sorted by counts\n mechanics = self.get_tags_from_csv_list(self.item_data['mechanics'].values)\n \n return categories, mechanics\n \n # ******************************************************************\n def get_item_title_id(self, titles):\n \"\"\"return list of integer item IDs given title names (case insensitive)\"\"\"\n return [self.item_title_to_id_dict[title.lower()] for title in titles]\n \n # ******************************************************************\n def get_item_id_index(self, ids):\n \"\"\"return list of array indices given item IDs\"\"\"\n return [self.item_id_to_index_dict[itemid] for itemid in ids]\n\n # ******************************************************************\n def get_filtered_item_index(self, items, \n weightrange=[1,5],\n minrating=1,\n categories_include=[],\n categories_exclude=[],\n mechanics_include=[],\n mechanics_exclude=[]):\n\n # start with all data\n filt_items = items\n\n# print('filter_data, all data:',filt_items.shape)\n\n # filter by game weight\n # only filter if not defaults: [1,5]\n if weightrange[0] > 1 or weightrange[1] < 5:\n filt_items = filt_items[ (filt_items['weight'] >= weightrange[0]) &\n (filt_items['weight'] <= weightrange[1])]\n# print('weightrange, filt_items:',filt_items.shape)\n\n # filter by lowest average game rating\n # only filter if not default: 1\n if minrating > 1:\n filt_items = filt_items[ filt_items['mean_rating'] >= minrating ]\n# print('minrating, filt_items:',filt_items.shape)\n\n def tags_in_col(col, taglist):\n return col.apply(lambda x: any(tag in x for tag in taglist))\n\n # filter by categories to include\n # only filter if not default: [], or ['Any category',...]\n if (len(categories_include) and \n 'Any category' not in categories_include):\n filt_items = filt_items[ tags_in_col(filt_items['categories'], categories_include)]\n# print('categories_include, filt_items:',filt_items.shape)\n\n # filter by categories to exclude\n # only filter if not default: []\n if len(categories_exclude):\n filt_items = filt_items[ ~(tags_in_col(filt_items['categories'], categories_exclude))]\n# print('categories_exclude, filt_items:',filt_items.shape)\n\n # filter by mechanics to include\n # only filter if not default: [], or ['Any category',...]\n if (len(mechanics_include) and \n 'Any mechanism' not in mechanics_include):\n filt_items = filt_items[ tags_in_col(filt_items['mechanics'], mechanics_include)]\n# print('mechanics_include, filt_items:',filt_items.shape)\n\n# print(' filt_items:',filt_items.shape)\n\n return self.get_item_id_index(filt_items['id'])\n\n # ******************************************************************\n def get_sorted_proxy_index(self, user_liked):\n liked_idx_set = set(self.get_item_id_index(user_liked))\n scores = [-len(liked_idx_set.intersection(row)) for row in self.user_top_rated]\n return np.argsort(scores)\n\n # ******************************************************************\n def ratings_from_factors(self, row_index):\n return (np.dot(self.user_factors[row_index,:], self.item_factors.T))\n \n # ******************************************************************\n def recommend_items_by_pref_list(self, liked_item_ids, num2rec=10, **filtargs): \n \n \"\"\"Recommend games using multiple liked games in a list of titles.\n This method creates a set of recommended games for each title in prefs and\n then selects the most commonly recommended\"\"\"\n \n # get indices to proxy users\n proxy_idx = self.get_sorted_proxy_index(liked_item_ids)\n\n # average ratings for all items among proxy users\n ratings = np.mean(self.ratings_from_factors(proxy_idx[:self.n_proxy_users]), axis=0)\n \n # Create some randomness here by adding a +/- random \n # value to the ratings\n randrange = .2\n randvals = np.random.random(len(ratings))*randrange\n fuzzed_ratings = np.multiply(ratings, randvals)\n \n # get indices of filter allowed items\n filt_item_idx = self.get_filtered_item_index(self.item_info, **filtargs)\n \n def filter_items(item_idx, filter_idx, liked_item_ids):\n \"\"\"return ordered list of item indices that intersect with filter_idx.\n Also, exclude games in the liked item list\"\"\"\n filt_ids = [i for i in item_idx if i in set(filter_idx)]\n return [i for i in filt_ids if not i in set(liked_item_ids)] \n \n # filtered descending sort of item ratings\n item_idx = filter_items(np.argsort(-fuzzed_ratings), \n filt_item_idx, \n self.get_item_id_index(liked_item_ids))\n \n # select num2rec top rated game IDs \n return self.item_data['name'].values[item_idx[:num2rec]]\n \n \n\n","sub_path":"capstone_2/deploy_bokeh_top_ALS_rating_proxy/bokeh_app/recommender_proxy_users.py","file_name":"recommender_proxy_users.py","file_ext":"py","file_size_in_byte":9424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"45887794","text":"import pornhub\r\n\r\n\r\nallowableWords = set()\r\nwith open(\"the-king-james-bible.txt\", \"r\") as f:\r\n allTheLines = f.readlines()\r\n for line in allTheLines:\r\n words = line.split(\" \")\r\n for word in words:\r\n allowableWords.add(word)\r\n\r\ndef getKeyWords():\r\n print(\"type all your keywords seperated by a space\")\r\n keywords = input().split(\" \")\r\n\r\n hasBadWords = False\r\n badWords = []\r\n for keyword in keywords:\r\n if keyword not in allowableWords:\r\n hasBadWords = True\r\n badWords.append(keyword)\r\n\r\n if hasBadWords:\r\n print(\"{}! That word is not approved by Jesus! Try again \\n\\n\\n\\n\\n\".format(\", \".join(badWords).title())) \r\n keywords = getKeyWords()\r\n \r\n return keywords\r\n#client = pornhub.PornHub(\"5.135.164.72\", 3128, search_keywords)\r\n#With proxy, given a Proxy IP and Port. For the countries with restricted access like Turkey, etc.\r\nsearch_keywords = getKeyWords()\r\nprint(search_keywords)\r\nclient = pornhub.PornHub(search_keywords)\r\n \r\nfor video in client.getVideos(quantity=10):\r\n print(video[\"name\"])\r\n print(video[\"url\"])\r\n print()","sub_path":"kingJamesTest.py","file_name":"kingJamesTest.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"85768240","text":"import numpy as np\nanlist = [0,1,3,1,1,4,1]\nlandlist = [2,3,4,1,1,1,1]\nanlist = np.array(anlist)\nlandlist = np.array(landlist)\nventetid = 0 \n#venteliste = []\n\n# cumsum af lister\ncum_landlist = np.cumsum(landlist)\ncum_anlist = np.cumsum(anlist)\n\nfor i in range(len(landlist)):\n if i > 0:\n if cum_anlist[i] < cum_landlist[i-1]:\n ventetid = ventetid+ cum_landlist[i-1]-cum_anlist[i]\n# venteliste.append(ventetid)\n# ventetid = 0\n else:\n ventetid = ventetid\nprint(ventetid)\n\n\n\"\"\"\nDette skulle gerne fungere!!!\n\nNår vi er HELT færdige med koden kunne det være en ide at omskrive det til\nsamme type kode som vores anden for løkke, der laver de randomtider.\nAltså sætte de to arrays sammen og bruge enumerate funktionen.\n\n\"\"\"","sub_path":"Python/MODSIM - Miniprojekt/Trash.py","file_name":"Trash.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"148602590","text":"# 整型变量\na = 100\n# 浮点型变量\nb = 100.0\n# 字符串\nc = 'wxl'\nprint(a, b, c)\n#没有类型定义关键字,根据赋初始值来确定变量类型\n\nd=1.23\n\n#变量测试\n\n#多个变量多重赋值\ne = f = g = 100\nprint(e, f, g)\n\n#多元赋值\nh, i, j = 100, 100.0, 'wxl'\nprint(h, i, j)\n\n#查询变量类型函数:type()\n#内置的 type() 函数可以用来查询变量所指的对象类型。\na = 1000\nprint(type(a))\n\n#判断对象类型函数:isinstance()\n\na = 1000\nprint(isinstance(a, int))\n\n#两者区别\n#type() 不会认为子类是一种父类类型;\n#isinstance() 会认为子类是一种父类类型。","sub_path":"Test_2.py","file_name":"Test_2.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"211768634","text":"import torch\nfrom torch.utils.data import Dataset\n\nfrom .stereo_dataset import StereoDataset\nfrom .dexter_object import DexterObjectDataset\n\n\nclass MixedDataset(Dataset):\n def __init__(self, config):\n \n self.Stereo = StereoDataset({'path': config['path_stereo'], \n 'augment': config['augment'],\n 'scope': config['scope']})\n \n self.DexterObject = DexterObjectDataset({'path': config['path_dexter'], \n 'augment': config['augment'],\n 'scope': config['scope']})\n \n self.len_Stereo = self.Stereo.__len__()\n self.len_DexterObject = self.DexterObject.__len__()\n \n\n def __getitem__(self, idx):\n\n if idx < self.len_Stereo:\n return self.Stereo.__getitem__(idx)\n else:\n return self.DexterObject.__getitem__(idx-self.len_Stereo)\n\n \n def __len__(self):\n return self.len_Stereo + self.len_DexterObject\n\n \n \n","sub_path":"dataset/mixed_dataset_real.py","file_name":"mixed_dataset_real.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"334699712","text":"# -*- coding: utf-8 -*-\n\"\"\"OpenCTI Valhalla Knowledge importer module.\"\"\"\n\nimport re\nfrom datetime import datetime\nfrom typing import Any, Dict, List, Mapping, Optional\nfrom urllib.parse import urlparse\n\nfrom .models import ApiResponse\n\nfrom pycti.connector.opencti_connector_helper import OpenCTIConnectorHelper\nfrom stix2 import TLP_WHITE, TLP_GREEN, TLP_AMBER, TLP_RED\n\n\nclass KnowledgeImporter:\n \"\"\"Valhalla Knowledge importer.\"\"\"\n\n _GUESS_NOT_A_MALWARE = \"GUESS_NOT_A_MALWARE\"\n _GUESS_NOT_A_ACTOR = \"GUESS_NOT_A_ACTOR\"\n _KNOWLEDGE_IMPORTER_STATE = \"knowledge_importer_state\"\n _TLP_MAPPING = {\n \"tlp_white\": \"TLP_WHITE\",\n \"tlp_green\": \"TLP_GREEN\",\n \"tlp_amber\": \"TLP_AMBER\",\n \"tlp_red\": \"TLP_RED\",\n }\n\n def __init__(\n self,\n helper: OpenCTIConnectorHelper,\n confidence_level: int,\n update_data: bool,\n default_marking,\n valhalla_client: str,\n ) -> None:\n \"\"\"Initialize Valhalla indicator importer.\"\"\"\n self.helper = helper\n self.guess_malware = True\n self.guess_actor = True\n self.confidence_level = confidence_level\n self.update_data = update_data\n self.default_marking = default_marking\n self.valhalla_client = valhalla_client\n self.malware_guess_cache: Dict[str, str] = {}\n self.actor_guess_cache: Dict[str, str] = {}\n self.date_utc = datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%S+00:00\")\n self.organization = helper.api.identity.create(\n name=\"Nextron Systems GmbH\",\n type=\"Organization\",\n description=\"THOR APT scanner and Valhalla Yara Rule API Provider\",\n )\n\n def run(self, state: Mapping[str, Any]) -> Mapping[str, Any]:\n \"\"\"Run importer.\"\"\"\n self.helper.log_info(\"running Knowledge importer with state: \" + str(state))\n\n self._load_opencti_tlp()\n self._process_rules()\n\n state_timestamp = datetime.utcnow().timestamp()\n self.helper.log_info(\"knowledge importer completed\")\n return {self._KNOWLEDGE_IMPORTER_STATE: state_timestamp}\n\n def _process_rules(self) -> None:\n try:\n rules_json = self.valhalla_client.get_rules_json()\n response = ApiResponse.parse_obj(rules_json)\n except Exception as err:\n self.helper.log_error(f\"error downloading rules: {err}\")\n return None\n\n for yr in response.rules:\n try:\n indicator = self.helper.api.indicator.create(\n name=yr.name,\n description=yr.cti_description,\n pattern_type=\"yara\",\n indicator_pattern=yr.content,\n markingDefinitions=[self.default_marking[\"id\"]],\n main_observable_type=\"File-SHA256\",\n createdByRef=self.organization[\"id\"],\n valid_from=yr.cti_date,\n score=yr.score,\n update=self.update_data,\n detection=True,\n )\n except Exception as err:\n self.helper.log_error(f\"error creating indicator: {err}\")\n\n self._add_refs_for_id([yr.reference], indicator[\"id\"])\n self._add_tags_for_indicator(yr.tags, indicator[\"id\"])\n\n def _add_tags_for_indicator(self, tags: list, indicator_id: str) -> None:\n for tag in tags:\n # We skip on tags with MITRE ids for now\n if re.search(r\"^\\D\\d{4}$\", tag):\n continue\n # Create Hygiene Tag\n tag_valhalla = self.helper.api.tag.create(\n tag_type=\"Valhalla\", value=tag, color=\"#46beda\",\n )\n self.helper.api.stix_entity.add_tag(\n id=indicator_id, tag_id=tag_valhalla[\"id\"]\n )\n\n def _add_refs_for_id(self, refs: list, obj_id: str) -> None:\n if refs == {} or obj_id == \"\":\n return None\n\n for ref in refs:\n if ref == \"-\":\n continue\n try:\n san_url = urlparse(ref)\n except Exception:\n self.helper.log_error(f\"error parsing ref url: {ref}\")\n continue\n\n reference = self.helper.api.external_reference.create(\n source_name=\"Nextron Systems Valhalla API\",\n url=san_url.geturl(),\n description=\"Rule Reference: \" + san_url.geturl(),\n )\n self.helper.api.stix_entity.add_external_reference(\n id=obj_id, external_reference_id=reference[\"id\"],\n )\n\n def _guess_malwares_from_tags(self, tags: List[str]) -> Mapping[str, str]:\n if not self.guess_malware:\n return {}\n\n malwares = {}\n\n for tag in tags:\n if not tag:\n continue\n guess = self.malware_guess_cache.get(tag)\n if guess is None:\n guess = self._GUESS_NOT_A_MALWARE\n\n id = self._fetch_malware_id_by_name(tag)\n if id is not None:\n guess = id\n\n self.malware_guess_cache[tag] = guess\n\n if guess == self._GUESS_NOT_A_MALWARE:\n self.helper.log_info(f\"Tag '{tag}'' does not reference malware\")\n else:\n self.helper.log_info(f\"Tag '{tag}' references malware '{guess}'\")\n malwares[tag] = guess\n return malwares\n\n def _guess_actor_from_tags(self, tags: List[str]) -> Mapping[str, str]:\n if not self.guess_actor:\n return {}\n\n actors = {}\n\n for tag in tags:\n if not tag:\n continue\n guess = self.actor_guess_cache.get(tag)\n if guess is None:\n guess = self._GUESS_NOT_A_ACTOR\n\n id = self._fetch_actor_id_by_name(tag)\n if id is not None:\n guess = id\n\n self.actor_guess_cache[tag] = guess\n\n if guess == self._GUESS_NOT_A_ACTOR:\n self.helper.log_info(f\"Tag '{tag}' does not reference actor\")\n else:\n self.helper.log_info(f\"Tag '{tag}' references actor '{guess}'\")\n actors[tag] = guess\n return actors\n\n def _fetch_malware_id_by_name(self, name: str) -> Optional[str]:\n if name == \"\":\n return None\n filters = [\n self._create_filter(\"name\", name),\n self._create_filter(\"alias\", name),\n ]\n for fil in filters:\n malwares = self.helper.api.malware.list(filters=fil)\n if malwares:\n if len(malwares) > 1:\n self.helper.log_info(f\"More then one malware for '{name}'\")\n malware = malwares[0]\n return malware[\"id\"]\n return None\n\n def _fetch_actor_id_by_name(self, name: str) -> Optional[str]:\n if name == \"\":\n return None\n filters = [\n self._create_filter(\"name\", name),\n self._create_filter(\"alias\", name),\n ]\n for fil in filters:\n actors = self.helper.api.threat_actor.list(filter=fil)\n if actors:\n if len(actors) > 1:\n self.helper.log_info(f\"More then one actor for '{name}'\")\n actor = actors[0]\n return actor[\"id\"]\n return None\n\n @staticmethod\n def _create_filter(key: str, value: str) -> List[Mapping[str, Any]]:\n return [{\"key\": key, \"values\": [value]}]\n\n def _load_opencti_tlp(self):\n self._TLP_MAPPING[\"tlp_white\"] = self.helper.api.marking_definition.read(\n id=TLP_WHITE[\"id\"]\n )\n self._TLP_MAPPING[\"tlp_green\"] = self.helper.api.marking_definition.read(\n id=TLP_GREEN[\"id\"]\n )\n self._TLP_MAPPING[\"tlp_amber\"] = self.helper.api.marking_definition.read(\n id=TLP_AMBER[\"id\"]\n )\n self._TLP_MAPPING[\"tlp_red\"] = self.helper.api.marking_definition.read(\n id=TLP_RED[\"id\"]\n )\n","sub_path":"valhalla/src/valhalla/knowledge.py","file_name":"knowledge.py","file_ext":"py","file_size_in_byte":8065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"326574980","text":"#-------------------------------------------------------------------------------\n# Name: module1\n# Purpose:\n#\n# Author: Karanjot\n#\n# Created: 26/04/2017\n# Copyright: (c) Karanjot 2017\n# Licence: \n#-------------------------------------------------------------------------------\nfrom graphics import *\ndef main():\n print(\"This program creates 2 circles using clones\")\n win = GraphWin(\"clones\")\n leftEye = Circle(Point(80,50), 5)\n leftEye.setFill(\"yellow\")\n leftEye.setOutline(\"red\")\n rightEye = leftEye.clone() # rightEye is an exact copy of the left. can't do rightEye = leftEye\n rightEye.move(20,0)\n leftEye.draw(win)\n rightEye.draw(win)\nmain()","sub_path":"program creates 2 circles with clone.py","file_name":"program creates 2 circles with clone.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"320774284","text":"import logging\nimport threading\n\nimport paho.mqtt.client as paho_mqtt_client\n\nfrom settings import Settings\nfrom mqtt_message import MqttMessage\n\n\nclass MqttReceiver(threading.Thread):\n \"\"\" Class that offers the possibility to receive mqtt messages. \"\"\"\n\n def __init__(self, topic):\n \"\"\"\n Constructor that initializes necessary\n variables for mqtt connection.\n\n :param topic: topic that gets handled by this receiver thread\n \"\"\"\n super(MqttReceiver, self).__init__(name=\"MqttReceiverThread-\"+str(topic))\n\n self.topic = topic\n\n self._stop = threading.Event()\n self.message_list = []\n self.mqtt_client = paho_mqtt_client.Client()\n self.mqtt_client.on_connect = self.on_connect\n self.mqtt_client.on_disconnect = self.on_disconnect\n self.mqtt_client.on_message = self.on_message\n\n def stop(self):\n \"\"\"\n Sets the internal stop event to\n request the thread stopping.\n \"\"\"\n logging.debug(\"Calling stop on: \"+self.name)\n self._stop.set()\n\n def stopped(self):\n \"\"\"\n Returns whether the internal\n stop event is set.\n \"\"\"\n return self._stop.is_set()\n\n def run(self):\n \"\"\"\n Thread starting method that connects the\n receiver and starts the necessary loops.\n Stops and disconnects if the internal\n stop event is set.\n \"\"\"\n logging.info('MQTT receiver for ' + str(self.topic) + ' started.')\n self.mqtt_client.connect(Settings.mqtt_broker_host, Settings.mqtt_port)\n self.mqtt_client.loop_start()\n\n while not self.stopped():\n pass\n\n self.mqtt_client.loop_stop(force=True)\n self.mqtt_client.disconnect()\n logging.info('MQTT receiver for ' + str(self.topic) + ' stopped.')\n\n def on_connect(self, mqttc, userdata, flags, rc):\n \"\"\" Subsribes to the topic and prints a log message on connecting. \"\"\"\n self.mqtt_client.subscribe(self.topic, Settings.mqtt_qos)\n logging.info('MQTT receiver for ' + str(self.topic) + ' connected with result code ' + str(rc))\n\n def on_disconnect(self, client, userdata, rc):\n \"\"\" Prints a log message on disconnecting. \"\"\"\n logging.info('MQTT receiver for ' + str(self.topic) + ' disconnected with result code ' + str(rc))\n\n def on_message(self, client, userdata, message):\n \"\"\"\n Prints a log message if a message is received\n and adds the message as a mqtt_message object\n to the message_list.\n \"\"\"\n logging.debug('MQTT receiver for ' + str(self.topic) + ' got message: ' + str(message.payload))\n self.message_list.append(MqttMessage(topic=self.topic, payload=message.payload, qos=message.qos))\n\n def pop(self):\n \"\"\"\n Pops the first (oldest) element from\n the message list and returns it.\n \"\"\"\n if self.message_list:\n return self.message_list.pop(0)\n","sub_path":"mqtt_receiver.py","file_name":"mqtt_receiver.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"534182945","text":"#!/usr/bin/env python3\n\nimport math\nimport random\n\nclass Dungeon_Generator():\n\n\tROOM_SIZE_MEAN = 7\n\tROOM_SIZE_VAR = 2\n\n\tdef __init__(self, seed, size):\n\t\trandom.seed(seed)\n\t\tself.seed = seed\n\t\tself.size = size\n\t\tself.dungeon = size * [None]\n\t\tfor i in range(0, size):\n\t\t\tself.dungeon[i] = size * [False]\n\t\tself.rooms = []\n\n\tdef generate_dungeon(self):\n\t\twhile len(self.rooms) < 10:\n\t\t\tself._generate_room(1)\n\n\tdef _generate_room(self, size):\n\t\twidth = 0\n\t\twhile width < 2:\n\t\t\twidth = math.floor(random.gauss(Dungeon_Generator.ROOM_SIZE_MEAN, Dungeon_Generator.ROOM_SIZE_VAR))\n\t\theight = 0\n\t\twhile height < 2:\n\t\t\theight = math.floor(random.gauss(Dungeon_Generator.ROOM_SIZE_MEAN, Dungeon_Generator.ROOM_SIZE_VAR))\n\t\tx = random.randint(0, self.size - width)\n\t\ty = random.randint(0, self.size - height)\n\t\tif self._area_is_clear(x-1, y-1, width+2, height+2):\n\t\t\tself._mark_area(x, y, width, height, True)\n\t\t\tself.rooms.append((x, y, width, height))\n\t\t\treturn True\n\t\treturn False\n\n\tdef _mark_area(self, x, y, width, height, mark):\n\t\tfor i in range(y, y + height):\n\t\t\tfor j in range(x, x + width):\n\t\t\t\tself.dungeon[i][j] = mark\n\n\tdef _area_is_clear(self, x, y, width, height):\n\t\treturn (x >= 0 and\n\t\t\t\ty >= 0 and\n\t\t\t\tx + width < len(self.dungeon) and\n\t\t\t\ty + height < len(self.dungeon) and\n\t\t\t\tnot any(any(row) for row in self._extract(x, y, width, height)))\n\n\tdef _extract(self, x, y, width, height):\n\t\treturn [(row[x:x+width]) for row in self.dungeon[y:y+height]]\n\n\tdef print_dungeon(self, dungeon=None):\n\t\tif not dungeon:\n\t\t\tdungeon = self.dungeon\n\t\tprint(\"\\n\".join(\"\".join((\" \" if cell else \"**\") for cell in row) for row in dungeon))\n\nif __name__ == \"__main__\":\n\ta = [[False, False, True],\n\t [False, True, False],\n\t [False, False, False]]\n\tdg = Dungeon_Generator(random.random(), 30)\n\t# dg = Dungeon_Generator(, 50)\n\tdg.generate_dungeon()\n\tdg.print_dungeon()\n\t# print(dg.seed)\n","sub_path":"dungen.py","file_name":"dungen.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"362779727","text":"import select\nimport socket\nimport sys\nimport queue\n\nport = int(sys.argv[1])\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #the accept socket\ns.setblocking(0)\n\ns.bind((socket.gethostname(), port))\ns.listen(5)\n\nread_list = [s]\nwrite_list = []\nmessage_queues = {}\n\nwhile True:\n readable, writable, exceptional = select.select(read_list, write_list, read_list)\n #Note: This code, as well as several other parts of this script, were taken from https://pymotw.com/3/select/\n\n for readable_socket in readable:\n if readable_socket is s:\n clientSock, clientAddr = s.accept()\n clientSock.setblocking(0)\n read_list.append(clientSock)\n message_queues[clientSock] = queue.Queue()\n else:\n fullRequest = readable_socket.recv(4096).decode()\n if fullRequest: #got data\n if readable_socket not in write_list:\n write_list.append(readable_socket)\n\n header = {}\n firstLine = fullRequest.split('\\n')[0].split(' ')\n header['HTTP-Command'] = firstLine[0]\n header['Path'] = firstLine[1]\n header['HTTP-Type'] = firstLine[2]\n for line in fullRequest.split('\\n\\r\\n')[0].split('\\n'):\n if ':' in line:\n x, y = line.split(':', 1)\n header[x] = y.strip()\n\n if len(fullRequest) == 4096:\n try:\n while len(fullRequest) < int(header['Content-Length']):\n request = clientSock.recv(4096) # recieve the request with max of 4096 bits(?) at once\n fullRequest += request.decode()\n except Exception as e:\n while True:\n request = clientSock.recv(4096)\n fullRequest += request.decode()\n if len(request.decode()) < 4096:\n break\n\n\n # Controlling all the request stuff here, pretty self explanatory\n if header['HTTP-Command'] != 'GET':\n fullResponse = 'HTTP/1.1 400 Bad Request\\r\\nContent-Length: 0\\r\\nContent-Type: text/html\\r\\n\\r\\n'\n elif not (header['Path'][-4:] == '.htm' or header['Path'][-5:] == '.html'):\n fullResponse = 'HTTP/1.1 403 Forbidden\\r\\nContent-Length: 0\\r\\nContent-Type: text/html\\r\\n\\r\\n'\n else:\n try:\n file = open(header['Path'][1:], 'r')\n response = file.read()\n file.close()\n\n responselength = len(response)\n responsetype = 'text/html'\n exit_code = '200 OK'\n except Exception as e:\n response = ''\n responselength = 0\n responsetype = 'text/html'\n exit_code = '404 Not Found'\n\n fullResponse = 'HTTP/1.1 ' + exit_code + '\\r\\nContent-Length: ' + str(responselength) + '\\r\\nContent-Type: ' + responsetype +'\\r\\n\\r\\n' + response\n message_queues[readable_socket].put(fullResponse)\n else: #didn't get data\n if readable_socket in write_list:\n write_list.remove(readable_socket)\n read_list.remove(readable_socket)\n readable_socket.close()\n del message_queues[readable_socket]\n\n for writable_socket in writable:\n try:\n next_message = message_queues[writable_socket].get_nowait()\n writable_socket.send(next_message.encode())\n except queue.Empty:\n write_list.remove(writable_socket)\n\n for exceptional_socket in exceptional:\n if exceptional_socket in write_list:\n write_list.remove(exceptional_socket)\n read_list.remove(exceptional_socket)\n exceptional_socket.close()\n del message_queues[exceptional_socket]\n","sub_path":"http_server2_incomplete.py","file_name":"http_server2_incomplete.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"462955453","text":"# coding utf-8\nimport gzip\nimport os\n\nimport numpy as np\nfrom sklearn.preprocessing import OneHotEncoder\n\n\ndef batch_generator(X, y, batch_size=32, shuffle=True):\n n_data = len(X)\n index = np.arange(len(y))\n if shuffle:\n np.random.shuffle(index)\n for ii in range(0, n_data, batch_size):\n if n_data - ii < batch_size:\n features = X[index[ii:]]\n targets = y[index[ii:]]\n else:\n features = X[index[ii : ii + batch_size]]\n targets = y[index[ii : ii + batch_size]]\n yield features, targets\n\n\nclass mnist_dataset:\n def __init__(self, dir_path):\n\n assert os.path.exists(dir_path), \"Arguments error: dir_path does not exist\"\n\n # store path of the data directory\n self.dir_path = dir_path + os.sep\n # define file name\n self.ftrain_feature = self.dir_path + os.path.sep + \"train-images-idx3-ubyte.gz\"\n self.ftrain_labels = self.dir_path + os.path.sep + \"train-labels-idx1-ubyte.gz\"\n self.ftest_features = self.dir_path + os.path.sep + \"t10k-images-idx3-ubyte.gz\"\n self.ftest_labels = self.dir_path + os.path.sep + \"t10k-labels-idx1-ubyte.gz\"\n\n for path in [\n self.ftrain_feature,\n self.ftrain_labels,\n self.ftest_features,\n self.ftest_labels,\n ]:\n print(path)\n assert os.path.exists(path), \"File error: \" + path + \" does not exist\"\n\n def load(self):\n X_train = self.load_features(self.ftrain_feature)\n X_test = self.load_features(self.ftest_features)\n y_train = self.load_labels(self.ftrain_labels)\n y_test = self.load_labels(self.ftest_labels)\n\n # one-hot encoding\n y_train = self.one_hot_encoding(y_train)\n y_test = self.one_hot_encoding(y_test)\n\n # normalize\n X_train = X_train / 255.0\n X_test = X_test / 255.0\n\n return X_train, X_test, y_train, y_test\n\n def load_features(self, file_path):\n \"\"\"Load images as 1D array\"\"\"\n with gzip.open(file_path, \"rb\") as f:\n features = np.frombuffer(f.read(), dtype=np.uint8, offset=16)\n return features.reshape(-1, 28 * 28)\n\n def load_labels(self, file_path):\n \"\"\"Load labels as 1D array\"\"\"\n with gzip.open(file_path, \"rb\") as f:\n labels = np.frombuffer(f.read(), dtype=np.uint8, offset=8)\n return labels\n\n def one_hot_encoding(self, y):\n \"\"\"Convert binary labels into one-hot encoding\"\"\"\n y = y.reshape(1, -1)\n y = y.transpose()\n encoder = OneHotEncoder()\n return encoder.fit_transform(y).toarray()\n","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"406018824","text":"from django.conf.urls import patterns, url\nfrom django.contrib.auth.views import logout as logout_view\nfrom battleshipapp import views\n\nurlpatterns = patterns('',\n\n\turl(r'^$', views.index, name='index' ),\n\turl(r'^/$', views.index, name='index' ),\n url(r'^login/$', views.user_login, name='login'),\n url(r'^logout/$', logout_view, {'next_page': '/' }),\n \n\n url(r'games/$', views.BattleshipGameList.as_view() ),\n url(r'games/(?P[0-9]+)$', views.BattleshipGame.as_view() ),\n\n url(r'player$', views.PlayerView.as_view() ),\n)","sub_path":"battleshipapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"369843988","text":"import random\r\n\r\ndef validate(hand):\r\n if hand < 0 or hand > 2:\r\n return False\r\n else:\r\n return True\r\n\r\ndef handshape(hand, name):\r\n hands = ['Rock', 'Paper', 'Scissors']\r\n print(name + ' picked: ' + hands[hand])\r\n\r\ndef battle(player, computer):\r\n if player == computer:\r\n return 'Draw'\r\n elif player == 0 and computer == 1:\r\n return 'You Lose...'\r\n elif player == 1 and computer == 2:\r\n return 'You Lose...'\r\n elif player == 2 and computer == 0:\r\n return 'You Lose...'\r\n else:\r\n return 'You Win!!'\r\n\r\ndef main():\r\n print(\r\n \"\"\"\r\n \\n \\U0000270A \\U0001F590 \\U0000270C ====== WELCOME TO JANKENPON GAME ====== \\U0000270C \\U0001F590 \\U0000270A\r\n \"\"\")\r\n player_name = input('Please enter your name: ')\r\n\r\n print('\\nPick a hand: (0: Rock, 1: Paper, 2: Scissors)')\r\n\r\n player_handshape = None\r\n while True:\r\n try:\r\n player_handshape = int(input('Please enter a number (0-2): '))\r\n except ValueError:\r\n print('Please input valid number')\r\n continue\r\n else:\r\n break\r\n\r\n if validate(player_handshape):\r\n computer_handshape = random.randint(0, 2)\r\n print('\\n')\r\n handshape(player_handshape, player_name)\r\n handshape(computer_handshape, 'Computer')\r\n result = battle(player_handshape, computer_handshape)\r\n print('\\nResult: ' + result)\r\n\r\n else:\r\n while True:\r\n print(\"Please input valid number\")\r\n while True:\r\n try:\r\n player_handshape = int(input('Please enter a number (0-2): '))\r\n except ValueError:\r\n print('Please input valid number')\r\n continue\r\n else:\r\n break\r\n if validate(player_handshape):\r\n computer_handshape = random.randint(0, 2)\r\n print('\\n')\r\n handshape(player_handshape, player_name)\r\n handshape(computer_handshape, 'Computer')\r\n result = battle(player_handshape, computer_handshape)\r\n print('\\nResult: ' + result)\r\n break\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\nwhile True:\r\n while True:\r\n again = str(input('Play again? (y/n): '))\r\n if again in ('y', 'n'):\r\n break\r\n print('Invalid input')\r\n if again == 'y':\r\n main()\r\n else:\r\n print('\\nThank you for playing JANKENPON game!')\r\n print('See you later!')\r\n break\r\n","sub_path":"jankenpon.py","file_name":"jankenpon.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"77485357","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 26 13:31:58 2019\n\n\"\"\"\n\n# Copyright (c) 2018-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\nimport torch\nimport numpy as np\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nfrom numpy.random import RandomState\nfrom scipy.stats import chi\n#import sys\n#from quat_ops import *\n#import torch_autograd_solver as S\n#import quat_ops\n#from torch_batch_svd import batch_svd\n\n# PyTorch-backed implementations\n\ndef qmul(q, r):\n \"\"\"\n Multiply quaternion(s) q with quaternion(s) r.\n Expects two equally-sized tensors of shape (*, 4), where * denotes any number of dimensions.\n Returns q*r as a tensor of shape (*, 4).\n \"\"\"\n assert q.shape[-1] == 4\n assert r.shape[-1] == 4\n\n original_shape = q.shape\n\n # Compute outer product\n terms = torch.bmm(r.contiguous().view(-1, 4, 1), q.contiguous().view(-1, 1, 4))\n\n w = terms[:, 0, 0] - terms[:, 1, 1] - terms[:, 2, 2] - terms[:, 3, 3]\n x = terms[:, 0, 1] + terms[:, 1, 0] - terms[:, 2, 3] + terms[:, 3, 2]\n y = terms[:, 0, 2] + terms[:, 1, 3] + terms[:, 2, 0] - terms[:, 3, 1]\n z = terms[:, 0, 3] - terms[:, 1, 2] + terms[:, 2, 1] + terms[:, 3, 0]\n return torch.stack((w, x, y, z), dim=1).view(original_shape)\n\ndef qrotv(q, v):\n \"\"\"\n Rotate vector(s) v about the rotation described by quaternion(s) q.\n Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v,\n where * denotes any number of dimensions.\n Returns a tensor of shape (*, 3).\n \"\"\"\n assert q.shape[-1] == 4\n assert v.shape[-1] == 3\n assert q.shape[:-1] == v.shape[:-1]\n\n original_shape = list(v.shape)\n q = q.view(-1, 4)\n v = v.view(-1, 3)\n\n qvec = q[:, 1:]\n uv = torch.cross(qvec, v, dim=1)\n uuv = torch.cross(qvec, uv, dim=1)\n return (v + 2 * (q[:, :1] * uv + uuv)).view(original_shape)\n\n\ndef qrotv3(q, v):\n \"\"\"\n Rotate vector(s) v about the rotation described by quaternion(s) q.\n Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v,\n where * denotes any number of dimensions.\n Returns a tensor of shape (*, 3).\n \"\"\"\n assert q.shape[-1] == 4\n assert v.shape[-1] == 3\n assert q.shape[:-1] == v.shape[:-1]\n\n original_shape = list(v.shape)\n q = q.view(-1, 4)\n v = v.view(-1, 3)\n\n # Compute outer product\n terms = torch.bmm(q.view(-1, 4, 1), q.view(-1, 1, 4))\n b2=terms[:,1,1]\n c2=terms[:,2,2]\n d2=terms[:,3,3]\n ab=terms[:,0,1]\n ac=terms[:,0,2]\n ad=terms[:,0,3]\n bc=terms[:,1,2]\n bd=terms[:,1,3]\n cd=terms[:,2,3]\n\n\n qvec_x=[1-2*c2-2*d2, 2*bc-2*ad, 2*ac+2*bd]\n qvec_y=[2*bc+2*ad, 1-2*b2-2*d2, 2*cd-2*ab]\n qvec_z=[2*bd-2*ac, 2*ab+2*cd, 1-2*b2-2*c2]\n qvec=torch.stack((torch.stack(qvec_x, dim=1), torch.stack(qvec_y, dim=1), torch.stack(qvec_z, dim=1)), dim=1)\n\n return torch.bmm(qvec,v.unsqueeze(-1)).view(original_shape)\n\n\n\ndef qrotq(q, p):\n \"\"\"\n Rotate quaternion(s) p about the rotation described by quaternion(s) q.\n Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 4) for p,\n where * denotes any number of dimensions.\n Returns a tensor of shape (*, 4).\n \"\"\"\n# assert q.shape[-1] == 4\n# assert p.shape[-1] == 4\n# assert q.shape[:-1] == p.shape[:-1]\n\n original_shape = list(p.shape)\n q = q.view(-1, 4)\n p = p.view(-1, 4)\n pw=p[:,0]\n pv=p[:,1:4]\n\n qvec = q[:, 1:]\n uv = torch.cross(qvec, pv, dim=1)\n uuv = torch.cross(qvec, uv, dim=1)\n\n pv=(pv + 2 * (q[:, :1] * uv + uuv))\n\n# return (pv + 2 * (q[:, :1] * uv + uuv)).view(original_shape)\n return torch.cat((pw.unsqueeze(-1), pv), dim=1).view(original_shape)\n\ndef qrotq3(q, p):\n \"\"\"\n Rotate quaternion(s) p about the rotation described by quaternion(s) q.\n Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 4) for p,\n where * denotes any number of dimensions.\n Returns a tensor of shape (*, 4).\n \"\"\"\n assert q.shape[-1] == 4\n assert p.shape[-1] == 4\n assert q.shape[:-1] == p.shape[:-1]\n\n original_shape = list(p.shape)\n q = q.view(-1, 4)\n p = p.view(-1, 4)\n pw=p[:,0]\n pv=p[:,1:4]\n\n # Compute outer product\n terms = torch.bmm(q.view(-1, 4, 1), q.view(-1, 1, 4))\n b2=terms[:,1,1]\n c2=terms[:,2,2]\n d2=terms[:,3,3]\n ab=terms[:,0,1]\n ac=terms[:,0,2]\n ad=terms[:,0,3]\n bc=terms[:,1,2]\n bd=terms[:,1,3]\n cd=terms[:,2,3]\n\n\n qvec_x=[ 1-2*c2-2*d2, 2*bc-2*ad, 2*ac+2*bd]\n qvec_y=[ 2*bc+2*ad, 1-2*b2-2*d2, 2*cd-2*ab]\n qvec_z=[ 2*bd-2*ac, 2*ab+2*cd, 1-2*b2-2*c2]\n qvec=torch.stack((torch.stack(qvec_x, dim=1), torch.stack(qvec_y, dim=1), torch.stack(qvec_z, dim=1)), dim=1)\n\n pv=torch.bmm(qvec, pv.unsqueeze(-1)).squeeze()\n\n return torch.cat((pw.unsqueeze(-1), pv), dim=1).view(original_shape)\n\ndef qeuler(q, order, epsilon=0):\n \"\"\"\n Convert quaternion(s) q to Euler angles.\n Expects a tensor of shape (*, 4), where * denotes any number of dimensions.\n Returns a tensor of shape (*, 3).\n \"\"\"\n assert q.shape[-1] == 4\n\n original_shape = list(q.shape)\n original_shape[-1] = 3\n q = q.view(-1, 4)\n\n q0 = q[:, 0]\n q1 = q[:, 1]\n q2 = q[:, 2]\n q3 = q[:, 3]\n\n if order == 'xyz':\n x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2*(q1 * q1 + q2 * q2))\n y = torch.asin(torch.clamp(2 * (q1 * q3 + q0 * q2), -1+epsilon, 1-epsilon))\n z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2*(q2 * q2 + q3 * q3))\n elif order == 'yzx':\n x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2*(q1 * q1 + q3 * q3))\n y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2*(q2 * q2 + q3 * q3))\n z = torch.asin(torch.clamp(2 * (q1 * q2 + q0 * q3), -1+epsilon, 1-epsilon))\n elif order == 'zxy':\n x = torch.asin(torch.clamp(2 * (q0 * q1 + q2 * q3), -1+epsilon, 1-epsilon))\n y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2*(q1 * q1 + q2 * q2))\n z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2*(q1 * q1 + q3 * q3))\n elif order == 'xzy':\n x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2*(q1 * q1 + q3 * q3))\n y = torch.atan2(2 * (q0 * q2 + q1 * q3), 1 - 2*(q2 * q2 + q3 * q3))\n z = torch.asin(torch.clamp(2 * (q0 * q3 - q1 * q2), -1+epsilon, 1-epsilon))\n elif order == 'yxz':\n x = torch.asin(torch.clamp(2 * (q0 * q1 - q2 * q3), -1+epsilon, 1-epsilon))\n y = torch.atan2(2 * (q1 * q3 + q0 * q2), 1 - 2*(q1 * q1 + q2 * q2))\n z = torch.atan2(2 * (q1 * q2 + q0 * q3), 1 - 2*(q1 * q1 + q3 * q3))\n elif order == 'zyx':\n x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2*(q1 * q1 + q2 * q2))\n y = torch.asin(torch.clamp(2 * (q0 * q2 - q1 * q3), -1+epsilon, 1-epsilon))\n z = torch.atan2(2 * (q0 * q3 + q1 * q2), 1 - 2*(q2 * q2 + q3 * q3))\n else:\n raise\n\n return torch.stack((x, y, z), dim=1).view(original_shape)\n\n\ndef unitary_init(in_features, out_features, rng, kernel_size=None, criterion='he'):\n\n if kernel_size is not None:\n receptive_field = np.prod(kernel_size)\n fan_in = in_features * receptive_field\n fan_out = out_features * receptive_field\n else:\n fan_in = in_features\n fan_out = out_features\n\n if criterion == 'glorot':\n s = 1. / np.sqrt(2*(fan_in + fan_out))\n elif criterion == 'he':\n s = 1. / np.sqrt(2*fan_in)\n else:\n raise ValueError('Invalid criterion: ' + criterion)\n\n if kernel_size is None:\n kernel_shape = (in_features, out_features)\n else:\n if type(kernel_size) is int:\n kernel_shape = (out_features, in_features) + tuple((kernel_size,))\n else:\n kernel_shape = (out_features, in_features) + (*kernel_size,)\n\n s = np.sqrt(3.0) * s\n\n number_of_weights = np.prod(kernel_shape)\n v_r = np.random.uniform(-s,s,number_of_weights)\n v_i = np.random.uniform(-s,s,number_of_weights)\n v_j = np.random.uniform(-s,s,number_of_weights)\n v_k = np.random.uniform(-s,s,number_of_weights)\n\n\n\n # Unitary quaternion\n for i in range(0, number_of_weights):\n norm = np.sqrt(v_r[i]**2 + v_i[i]**2 + v_j[i]**2 + v_k[i]**2)+0.0001\n v_r[i]/= norm\n v_i[i]/= norm\n v_j[i]/= norm\n v_k[i]/= norm\n\n v_r = v_r.reshape(kernel_shape)\n v_i = v_i.reshape(kernel_shape)\n v_j = v_j.reshape(kernel_shape)\n v_k = v_k.reshape(kernel_shape)\n\n return (v_r, v_i, v_j, v_k)\n\ndef random_init(in_features, out_features, rng, kernel_size=None, criterion='glorot'):\n\n if kernel_size is not None:\n receptive_field = np.prod(kernel_size)\n fan_in = in_features * receptive_field\n fan_out = out_features * receptive_field\n else:\n fan_in = in_features\n fan_out = out_features\n\n if criterion == 'glorot':\n s = 1. / np.sqrt(2*(fan_in + fan_out))\n elif criterion == 'he':\n s = 1. / np.sqrt(2*fan_in)\n else:\n raise ValueError('Invalid criterion: ' + criterion)\n\n if kernel_size is None:\n kernel_shape = (in_features, out_features)\n else:\n if type(kernel_size) is int:\n kernel_shape = (out_features, in_features) + tuple((kernel_size,))\n else:\n kernel_shape = (out_features, in_features) + (*kernel_size,)\n\n number_of_weights = np.prod(kernel_shape)\n v_r = np.random.uniform(0.0,1.0,number_of_weights)\n v_i = np.random.uniform(0.0,1.0,number_of_weights)\n v_j = np.random.uniform(0.0,1.0,number_of_weights)\n v_k = np.random.uniform(0.0,1.0,number_of_weights)\n\n v_r = v_r.reshape(kernel_shape)\n v_i = v_i.reshape(kernel_shape)\n v_j = v_j.reshape(kernel_shape)\n v_k = v_k.reshape(kernel_shape)\n\n weight_r = v_r * s\n weight_i = v_i * s\n weight_j = v_j * s\n weight_k = v_k * s\n return (weight_r, weight_i, weight_j, weight_k)\n\n\ndef quaternion_init(in_features, out_features, rng, kernel_size=None, criterion='glorot'):\n\n if kernel_size is not None:\n receptive_field = np.prod(kernel_size)\n fan_in = in_features * receptive_field\n fan_out = out_features * receptive_field\n else:\n fan_in = in_features\n fan_out = out_features\n\n if criterion == 'glorot':\n s = 1. / np.sqrt(2*(fan_in + fan_out))\n elif criterion == 'he':\n s = 1. / np.sqrt(2*fan_in)\n else:\n raise ValueError('Invalid criterion: ' + criterion)\n\n rng = RandomState(np.random.randint(1,1234))\n\n\n # Generating randoms and purely imaginary quaternions :\n if kernel_size is None:\n kernel_shape = (in_features, out_features)\n else:\n if type(kernel_size) is int:\n kernel_shape = (out_features, in_features) + tuple((kernel_size,))\n else:\n kernel_shape = (out_features, in_features) + (*kernel_size,)\n\n modulus = chi.rvs(4,loc=0,scale=s,size=kernel_shape)\n\n# modulus= rng.uniform(size=kernel_shape)\n number_of_weights = np.prod(kernel_shape)\n\n\n v_i = np.random.normal(0,1.0,number_of_weights)\n v_j = np.random.normal(0,1.0,number_of_weights)\n v_k = np.random.normal(0,1.0,number_of_weights)\n\n # Purely imaginary quaternions unitary\n for i in range(0, number_of_weights):\n \tnorm = np.sqrt(v_i[i]**2 + v_j[i]**2 + v_k[i]**2 +0.0001)\n \tv_i[i]/= norm\n \tv_j[i]/= norm\n \tv_k[i]/= norm\n v_i = v_i.reshape(kernel_shape)\n v_j = v_j.reshape(kernel_shape)\n v_k = v_k.reshape(kernel_shape)\n\n phase = rng.uniform(low=-np.pi, high=np.pi, size=kernel_shape)\n\n weight_r = modulus * np.cos(phase)\n weight_i = modulus * v_i*np.sin(phase)\n weight_j = modulus * v_j*np.sin(phase)\n weight_k = modulus * v_k*np.sin(phase)\n\n return (weight_r, weight_i, weight_j, weight_k)\n\ndef create_dropout_mask(dropout_p, size, rng, as_type, operation='linear'):\n if operation == 'linear':\n mask = rng.binomial(n=1, p=1-dropout_p, size=size)\n return Variable(torch.from_numpy(mask).type(as_type))\n else:\n raise Exception(\"create_dropout_mask accepts only 'linear'. Found operation = \"\n + str(operation))\n\ndef affect_init(q_weight, init_func, rng, init_criterion):\n# if r_weight.size() != i_weight.size() or r_weight.size() != j_weight.size() or \\\n# r_weight.size() != k_weight.size() :\n# raise ValueError('The real and imaginary weights '\n# 'should have the same size . Found: r:'\n# + str(r_weight.size()) +' i:'\n# + str(i_weight.size()) +' j:'\n# + str(j_weight.size()) +' k:'\n# + str(k_weight.size()))\n#\n# elif r_weight.dim() != 2:\n# raise Exception('affect_init accepts only matrices. Found dimension = '\n# + str(r_weight.dim()))\n kernel_size = None\n r, i, j, k = init_func(q_weight.size(0), q_weight.size(1), rng, kernel_size, init_criterion)\n r, i, j, k = torch.from_numpy(r), torch.from_numpy(i), torch.from_numpy(j), torch.from_numpy(k)\n q_weight.data= torch.stack((r.type_as(q_weight.data),i.type_as(q_weight.data),j.type_as(q_weight.data),k.type_as(q_weight.data)),2)\n\n# r_weight.data = r.type_as(r_weight.data)\n# i_weight.data = i.type_as(i_weight.data)\n# j_weight.data = j.type_as(j_weight.data)\n# k_weight.data = k.type_as(k_weight.data)\n\n\n\n\nif __name__ == '__main__':\n p=torch.rand(16,64,4)\n pool_grid=torch.FloatTensor([[-1.0, -1.0, -1.0], [1.0, -1.0, -1.0],\n [-1.0, 1.0, -1.0], [1.0, 1.0, -1.0],\n [-1.0, -1.0, 1.0], [1.0, -1.0, 1.0],\n [-1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])\n#\n p=p.unsqueeze(-2)\n p=p.expand(p.size(0), p.size(1),8, p.size(3)).contiguous()\n pool_grid=pool_grid.view(1,1,8,3)\n pool_grid=pool_grid.expand(p.size(0), p.size(1),8, 3).contiguous()\n\n# p=p.cuda()\n# pool_grid=pool_grid.cuda()\n test1=qrotv(p, pool_grid)\n\n\n test3=qrotv3(p, pool_grid)\n print(test1[0,0,])\n print(test3[0,0,])\n\n input_lrf=torch.rand(16,64,8,4)\n t_ij=torch.rand(16,64,8,32,4)\n input_lrf=input_lrf.unsqueeze(-2)\n input_lrf=input_lrf.expand(t_ij.size(0), t_ij.size(1), t_ij.size(2), 32, t_ij.size(4)).contiguous()\n test2=qrotq(t_ij, input_lrf)\n test4=qrotq3(t_ij, input_lrf)\n","sub_path":"models/quat_ops.py","file_name":"quat_ops.py","file_ext":"py","file_size_in_byte":14430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"84441185","text":"\"\"\"Evaluates all the tests that live in `scss/tests/files`.\n\nA test is any file with a `.scss` extension. It'll be compiled, and the output\nwill be compared to the contents of a file named `foo.css`.\n\nCurrently, test files must be nested exactly one directory below `files/`.\nThis limitation is completely arbitrary. Files starting with '_' are skipped.\n\n\"\"\"\n\nfrom __future__ import absolute_import, unicode_literals\n\nimport os\nimport logging\nimport sys\nfrom importlib import import_module\n\nimport six\n\nimport scss\n\n\nif six.PY2:\n from io import open\n\n\nconsole = logging.StreamHandler()\nlogger = logging.getLogger('scss')\nlogger.setLevel(logging.ERROR)\nlogger.addHandler(console)\n\n\ndef test_pair_programmatic(scss_file_pair):\n scss_fn, css_fn = scss_file_pair\n\n # look for a python module related to the pair and execute it if found\n mod = None\n cfg_script = scss_fn.replace('.scss', '.py')\n if os.path.exists(cfg_script):\n sys.path[0:0] = [os.path.dirname(scss_fn)]\n mod = import_module(os.path.splitext(os.path.split(scss_fn)[1])[0])\n getattr(mod, 'setUp', lambda: None)()\n sys.path = sys.path[1:]\n\n with open(scss_fn) as fh:\n source = fh.read()\n with open(css_fn, 'r', encoding='utf8') as fh:\n expected = fh.read()\n\n directory, _ = os.path.split(scss_fn)\n include_dir = os.path.join(directory, 'include')\n scss.config.STATIC_ROOT = os.path.join(directory, 'static')\n\n try:\n compiler = scss.Scss(scss_opts=dict(style='expanded'), search_paths=[include_dir, directory])\n actual = compiler.compile(source)\n\n getattr(mod, 'tearDown', lambda:None)()\n\n # Normalize leading and trailing newlines\n actual = actual.strip('\\n')\n expected = expected.strip('\\n')\n\n assert expected == actual\n\n finally:\n # cleanup generated assets if any\n assets_dir = os.path.join(directory, 'static', 'assets')\n if os.path.isdir(assets_dir):\n for x in os.listdir(assets_dir):\n if x != '.placeholder':\n os.remove(os.path.join(assets_dir, x))\n\ndef test_rel_import():\n\n scss_vars = {}\n _scss = scss.Scss(scss_vars=scss_vars)\n\n actual = _scss.compile(scss_file=os.path.join(os.path.dirname(__file__),\n 'files', 'general',\n 'relative-import.fscss'))\n\n expected = open(os.path.join(os.path.dirname(__file__), 'files',\n 'general', 'relative-import.css')).read()\n\n assert expected == actual\n","sub_path":"scss/tests/test_files.py","file_name":"test_files.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"502515817","text":"import tensorflow as tf\r\nimport time\r\nimport model_helper as helper\r\nfrom utils import misc_util as misc\r\nimport os\r\n\r\n\r\ntf.logging.set_verbosity(tf.logging.INFO)\r\n\r\n\r\ndef get_model_creator(model_type):\r\n # from models.vgg16 import VGG16\r\n from models.inception_v3 import InceptionV3\r\n from models.vgg_16 import VGG16B\r\n from models.resnet_v1 import ResNetV1\r\n if model_type == \"VGG_16\":\r\n return VGG16B\r\n elif model_type == \"InceptionV3\":\r\n return InceptionV3\r\n elif model_type == \"ResNetV1\":\r\n return ResNetV1\r\n\r\n\r\ndef evaluate(hparams, scope=None, target_session=\"\", ckpt_path=None,\r\n summary_writer=None, global_step_=0, alternative=False):\r\n if alternative:\r\n out_dir = os.path.join(hparams.base_dir, \"train\")\r\n else:\r\n out_dir = os.path.join(hparams.base_dir, \"eval\")\r\n ckpt_path = os.path.join(hparams.base_dir, \"ckpt\")\r\n if not misc.check_file_existence(out_dir):\r\n tf.gfile.MakeDirs(out_dir)\r\n tf.logging.info(\"All eval relevant results will be put in %s\" % out_dir)\r\n\r\n # Create model\r\n model_creator = get_model_creator(hparams.model_type)\r\n eval_model = helper.create_eval_model(model_creator,\r\n hparams, scope)\r\n config_proto = misc.get_config_proto(\r\n log_device_placement=hparams.log_device_placement,\r\n num_intra_threads=hparams.num_intra_threads,\r\n num_inter_threads=hparams.num_inter_threads)\r\n eval_sess = tf.Session(\r\n target=target_session, config=config_proto,\r\n graph=eval_model.graph)\r\n tf.logging.info(\"Create model successfully\")\r\n with eval_model.graph.as_default():\r\n loaded_eval_model, global_step = helper.create_or_load_model(\r\n eval_model.model,\r\n ckpt_path,\r\n eval_sess)\r\n if global_step > 0:\r\n if global_step_ > 0:\r\n assert global_step_ == global_step\r\n tf.logging.info(\"Loading model from global step %d to evaluate\" % global_step)\r\n else:\r\n tf.logging.info(\"With global step is 0, can not execute evaluation\")\r\n return\r\n # Summary writer\r\n if summary_writer is None:\r\n summary_name = \"eval_summary\"\r\n summary_path = os.path.join(out_dir, summary_name)\r\n if not tf.gfile.Exists(summary_path):\r\n tf.gfile.MakeDirs(summary_path)\r\n summary_writer = tf.summary.FileWriter(\r\n os.path.join(out_dir, summary_name), eval_model.graph)\r\n eval_sess.run(eval_model.data_wrapper.initializer)\r\n tf.logging.info(\"Ready to eval\")\r\n step = 0\r\n accuracies = 0.\r\n while True:\r\n start_time = time.time()\r\n try:\r\n tf.logging.info(\"Start eval step:%d\" % step)\r\n results = loaded_eval_model.eval(eval_sess)\r\n summary_writer.add_summary(results.summary, global_step)\r\n tf.logging.info(\"Evaluation step %d, accuracy is %f, %s\"\r\n % (step, results.accuracy, time.ctime()))\r\n accuracies += results.accuracy\r\n step += 1\r\n except tf.errors.OutOfRangeError:\r\n avg_accuracy = accuracies / step\r\n tf.logging.info(\"After %d steps of evaluation, accuracy is %f \"\r\n % (step, avg_accuracy, time.time()-start_time))\r\n tf.logging.info(\"Finish evaluating\")\r\n summary_writer.close()\r\n break\r\n","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"321943727","text":"from django.shortcuts import get_object_or_404\nfrom django.views.generic import CreateView, UpdateView, ListView,\\\n DeleteView, View\nfrom django.http import HttpResponseRedirect, JsonResponse\nfrom django.core.exceptions import PermissionDenied\nimport random\n# Create your views here.\n\nfrom apps.tournament.models import Tournament, Team, Round, Match\nfrom apps.tournament.forms import RoundCreateForm, TournCreateForm\n\n\nclass StaffOnly(object):\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.is_staff:\n raise PermissionDenied\n return super(StaffOnly, self).dispatch(request, *args, **kwargs)\n\n\nclass CreateTournamentView(StaffOnly, CreateView):\n\n model = Tournament\n template_name = 'tourn_create.html'\n success_url = '/tournament/admin/'\n form_class = TournCreateForm\n\n def form_valid(self, form):\n self.object = form.save()\n if form.cleaned_data['generate_team']:\n pairs = generate_player_pair(self.object.tour_players.all())\n for pair in pairs:\n team = Team.objects.create(tournament=self.object)\n for i in pair:\n team.team_players.add(i)\n team.generate_name()\n team.save()\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass ListTournamentView(ListView):\n\n model = Tournament\n template_name = 'tourn_list.html'\n context_object_name = 'tournaments'\n\n\nclass DeleteTournamentView(StaffOnly, DeleteView):\n\n model = Tournament\n success_url = '/tournament/admin/'\n\n\nclass EditTournamentView(StaffOnly, UpdateView):\n\n model = Tournament\n template_name = 'tourn_edit.html'\n fields = ('name',)\n success_url = '/tournament/admin/'\n\n def get_context_data(self, **kwargs):\n context = super(EditTournamentView, self).get_context_data(**kwargs)\n context['teams'] = self.get_object().team_set\n context['rounds'] = self.get_object().round_set\n return context\n\n\nclass CreateRoundView(StaffOnly, CreateView):\n\n model = Round\n template_name = 'round_create.html'\n form_class = RoundCreateForm\n\n def get_success_url(self):\n return '/tournament/edit/%s/' % (self.tourn.pk)\n\n def dispatch(self, *args, **kwargs):\n self.tourn = get_object_or_404(Tournament, pk=kwargs['tourn'])\n return super(CreateRoundView, self).dispatch(*args, **kwargs)\n\n def get_form(self, form_class):\n form = super(CreateRoundView, self).get_form(form_class)\n form.fields['round_team'].queryset = self.tourn.team_set.all()\n return form\n\n def form_valid(self, form):\n self.object = form.save(commit=False)\n self.object.tournament = self.tourn\n self.object.save()\n for team in form.cleaned_data['round_team']:\n self.object.round_team.add(team)\n self.object.save()\n if self.object.round_type == 'reg':\n pairs = generate_matches_pairs(self.object.round_team.all())\n else:\n pairs = generate_play_off_pairs(self.object.round_team.all())\n for i in range(form.cleaned_data['match_count']):\n for pair in pairs:\n Match.objects.create(team1=pair[0], team2=pair[1],\n match_round=self.object,\n tournament=self.tourn)\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass RoundListView(ListView):\n\n model = Round\n template_name = 'round_list.html'\n context_object_name = 'rounds'\n\n def get_queryset(self):\n self.tourn = get_object_or_404(Tournament, pk=self.kwargs['tourn'])\n return Round.objects.filter(tournament=self.tourn)\n\n\nclass MatchesListView(ListView):\n model = Match\n template_name = 'matches_list.html'\n context_object_name = 'matches'\n\n def get_queryset(self):\n self.round = get_object_or_404(Round, pk=self.kwargs['round'])\n return Match.objects.filter(match_round=self.round)\n\n def get_context_data(self, **kwargs):\n context = super(MatchesListView, self).get_context_data(**kwargs)\n context['round'] = self.round\n return context\n\n\nclass MatchesAdminEditView(StaffOnly, MatchesListView):\n\n template_name = 'matches_admin.html'\n\n\nclass AdminTournamentView(StaffOnly, ListView):\n\n model = Tournament\n template_name = 'admin_main.html'\n context_object_name = 'tournaments'\n\n\nclass GetMatchView(View):\n\n def get(self, request):\n match_pk = request.GET.get('pk')\n match = Match.objects.get(pk=match_pk)\n res = {}\n res['name_1'] = match.team1.name\n res['name_2'] = match.team2.name\n res['score_1'] = match.team_1_hit\n res['score_2'] = match.team_2_hit\n return JsonResponse(res)\n\n\nclass GetMatchListView(View):\n\n def get(self, request):\n round_pk = request.GET.get('pk')\n round_entry = Round.objects.get(pk=round_pk)\n res = {}\n for i in round_entry.match_set.all():\n res[i.pk] = i.show_score()\n return JsonResponse(res)\n\n\nclass SetMatchScore(View):\n\n def post(self, request):\n match_pk = request.POST.get('pk')\n score_1 = request.POST.get('score_1')\n score_2 = request.POST.get('score_2')\n match = Match.objects.get(pk=match_pk)\n match.set_result(score_1, score_2)\n res = {}\n res['pk'] = match_pk\n res['score'] = '%s : %s' % (score_1, score_2)\n return JsonResponse(res)\n\n\nclass TournamentTable(ListView):\n\n model = Match\n tempalte_name = 'tourn_table'\n\n\ndef generate_player_pair(query):\n random_index = int(len(query)/5)\n res = []\n first_index = 0\n last_index = random_index\n one_more = True\n middle_index = len(query)/2\n while one_more:\n first_query = list(query[first_index:last_index])\n last_query = list(query.reverse()[first_index:last_index])\n for i in range(len(first_query)):\n first_choice = random.choice(first_query)\n first_query.remove(first_choice)\n second_choice = random.choice(last_query)\n last_query.remove(second_choice)\n res.append((first_choice, second_choice))\n if last_index == middle_index:\n one_more = False\n if last_index + random_index <= middle_index:\n first_index += random_index\n last_index += random_index\n else:\n new_random_index = middle_index - last_index\n first_index += random_index\n last_index += new_random_index\n return res\n\n\ndef generate_matches_pairs(query):\n res = []\n query = list(query)\n for i in query:\n for j in query[query.index(i)+1:]:\n res.append((i, j))\n return res\n\n\ndef generate_play_off_pairs(query):\n res = []\n query = list(query)\n for i in range(0, len(query), 2):\n res.append((query[i], query[i+1]))\n return res\n","sub_path":"apps/tournament/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"344318266","text":"import time\n\ndef update_index(index_filename, left, right):\n '''Append a record to the index.'''\n\n # Read existing data.\n with open(index_filename, 'r') as raw:\n reader = csv.reader(raw)\n records = []\n for r in reader:\n records.append(r)\n \n # Create new record.\n timestamp = time.strftime('%Y-%m-%d')\n data_filename = left + '-' + right + '.csv'\n new_record = (timestamp, left, right, data_filename)\n \n # Save.\n records.append(new_record)\n with open(index_filename, 'w') as raw:\n writer = csv.writer(raw)\n writer.writerows(records)\n","sub_path":"src/syndicate/make_index.py","file_name":"make_index.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"411625083","text":"import logging\nimport time\n\nfrom django.core.management.base import BaseCommand\nfrom web3 import Web3\n\nfrom hub20.apps.blockchain.client import get_web3\nfrom hub20.apps.ethereum_money.client import get_token_information\nfrom hub20.apps.ethereum_money.models import EthereumToken\nfrom hub20.apps.raiden import models\nfrom hub20.apps.raiden.client import RaidenClient, RaidenConnectionError\nfrom hub20.apps.raiden.contracts import get_token_network_registry_contract\n\nlogger = logging.getLogger(__name__)\n\n\ndef sync_token_networks(client: RaidenClient, w3: Web3):\n logger.info(\"Updating Token Networks\")\n known_tokens = client.raiden.token_networks.values_list(\"token__address\", flat=True)\n\n chain_id = int(w3.net.version)\n\n for token_address in client.get_token_addresses():\n if token_address in known_tokens:\n continue\n\n logger.info(f\"Getting information about token on {token_address}\")\n token_data = get_token_information(w3=w3, address=token_address)\n token = EthereumToken.make(address=token_address, chain_id=chain_id, **token_data)\n token_network_registry_contract = get_token_network_registry_contract(w3)\n token_network = models.TokenNetwork.make(token, token_network_registry_contract)\n client.raiden.token_networks.add(token_network)\n\n\ndef sync_channels(client: RaidenClient):\n logger.info(\"Updating Channels\")\n for channel_data in client.get_channels():\n channel = models.Channel.make(client.raiden, **channel_data)\n logger.info(f\"{channel} information synced\")\n\n\ndef sync_payments(client: RaidenClient):\n for channel in client.raiden.channels.all():\n logger.info(f\"Getting new payments from {channel}\")\n for payment_data in client.get_new_payments(channel):\n models.Payment.make(channel, **payment_data)\n\n\nclass Command(BaseCommand):\n help = \"Connects to Raiden via REST API to collect information about new transfers\"\n\n def handle(self, *args, **options):\n w3 = get_web3()\n\n while True:\n for raiden in models.Raiden.objects.all():\n client = RaidenClient(raiden)\n try:\n sync_token_networks(client, w3)\n sync_channels(client)\n sync_payments(client)\n except RaidenConnectionError as exc:\n logger.warn(str(exc))\n time.sleep(5)\n except Exception as exc:\n logger.exception(exc)\n time.sleep(3)\n","sub_path":"hub20/apps/raiden/management/commands/sync_raiden.py","file_name":"sync_raiden.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"635954754","text":"from lib.database import get_postgres_connection\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nimport datetime\npostgres = get_postgres_connection()\nPOSTGRES_HOST = postgres.host\nPOSTGRES_PORT = postgres.port\nPOSTGRES_USER = postgres.user\nPOSTGRES_PWD = postgres.password\nPOSTGRES_DB = postgres.db\nBase = automap_base()\n# Create engine, session\nengine = create_engine(\n \"postgresql+psycopg2://\"\n + POSTGRES_USER\n + \":\"\n + POSTGRES_PWD\n + \"@\"\n + POSTGRES_HOST\n + \":\"\n + POSTGRES_PORT\n + \"/\"\n + POSTGRES_DB,\n client_encoding=\"utf-8\",\n)\nsession = Session(engine)\n# Reflect the tables\nBase.prepare(engine, reflect=True)\n# Mapped classes are now created with names by default\n# matching that of the table name.\nScantistLibraryVersion = Base.classes.scantist_library_version\nScantistLibrary = Base.classes.scantist_library\nScantistLibraryVersionIssue = Base.classes.scantist_libraryversionissue\nScantistSecurityIssue = Base.classes.scantist_securityissue\n\n\ndef get_updated_vul_info(timestamp=datetime.datetime(2017, 7, 25), lib_only=False):\n \"\"\"\n start from issues from a timestamp which is stored last time we did this update,\n get all related versions and their libname, vendor, version number\n \"\"\"\n updated_vul_ver_info = (\n session.query(\n ScantistLibrary.name,\n ScantistLibrary.vendor,\n ScantistLibraryVersion.version_number,\n ScantistSecurityIssue.public_id,\n ScantistLibraryVersionIssue.is_valid,\n )\n .filter(\n ScantistLibraryVersionIssue.processed_time > timestamp,\n ScantistLibrary.platform == 'Go',\n )\n .join(\n ScantistLibraryVersion,\n ScantistLibrary.id == ScantistLibraryVersion.library_id,\n )\n .join(\n ScantistLibraryVersionIssue,\n ScantistLibraryVersion.id\n == ScantistLibraryVersionIssue.library_version_id,\n )\n .join(\n ScantistSecurityIssue,\n ScantistLibraryVersionIssue.security_issue_id\n == ScantistSecurityIssue.id,\n )\n .order_by(ScantistLibraryVersionIssue.processed_time.asc())\n )\n unique_vul_node = {}\n for update in updated_vul_ver_info:\n if (\n not f\"{update[1]}-{update[0]}-{update[2]}-{update[3]}\"\n in unique_vul_node\n ):\n unique_vul_node[\n f\"{update[1]}-{update[0]}-{update[2]}-{update[3]}\"\n ] = update\n\n add_vul_rel = filter(lambda x: x[4] == True, list(unique_vul_node.values()))\n del_vul_rel = filter(lambda x: x[4] == False, list(unique_vul_node.values()))\n\n add_node = []\n del_node = {}\n\n for update in add_vul_rel:\n node = next((x for x in add_node if x[\"public_id\"] == update[3]), None)\n if node:\n if update[1] + \":\" + update[0] in node[\"affects\"]:\n node[\"affects\"][update[1] + \":\" + update[0]].append(update[2])\n else:\n node[\"affects\"][update[1] + \":\" + update[0]] = [update[2]]\n else:\n add_node.append(\n {\n \"public_id\": update[3],\n \"vulnerabilityId\": update[3],\n \"affects\": {update[1] + \":\" + update[0]: [update[2]]},\n }\n )\n libaffects_list = []\n affect_list = []\n vulnerable_lib = []\n vul_node = add_node\n vul_node_list = []\n for vul in vul_node:\n for libvendor, vers in vul[\"affects\"].items():\n for ver in vers:\n affect_list.append((vul['public_id'], libvendor.lstrip(':')+':'+ver))\n libaffects_list.append((vul['public_id'], libvendor.lstrip(':')))\n vulnerable_lib.append(libvendor.lstrip(':'))\n vul_node_list.append(vul['public_id'])\n vulnerable_lib = list(set(vulnerable_lib))\n libaffects_list = list(set(libaffects_list))\n affect_list = list(set(affect_list))\n vul_node_list =list(set(vul_node_list))\n if lib_only:\n return vulnerable_lib\n else:\n return libaffects_list, affect_list, vul_node_list\n\n","sub_path":"calculating_affected_libs/get_libaffect.py","file_name":"get_libaffect.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"344650468","text":"\"\"\"lstm_utils.py contains utility functions for running LSTM Baselines.\"\"\"\n\nimport os\nfrom typing import Any, Dict, List, Tuple\nimport torch\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nfrom torch.utils.data import Dataset\nfrom .map_features_utils import MapFeaturesUtils\n\nfrom .baseline_config import *\n\nuse_cuda = torch.cuda.is_available()\nif use_cuda:\n device = torch.device(\"cuda\")\nelse:\n device = torch.device(\"cpu\")\n\n\n\ncmd_codes = dict(m=0, l=1, c=2, a=3, EOS=4, SOS=5, z=6)\nCOLOR_IDXS = slice(1,6)\n\n\ndef linear_cmd_to_tensor(cmd_index, end_position: tuple, start_position: tuple = None, pad=-1):\n start_pos = start_position if start_position is not None else (0, 0)\n return torch.tensor(\n [cmd_index, *([pad] * 5), start_pos[0], start_pos[1], *([pad] * 4), end_position[0], end_position[1]])\n\n\ndef linear_path_to_tensor(path, pad=-1):\n return torch.stack([linear_cmd_to_tensor(cmd_codes['m'], path[0], pad=pad)] + [\n linear_cmd_to_tensor(cmd_codes['l'], path[i], path[i - 1], pad=pad) for i in range(1, len(path))])\n\n\ndef apply_colors(paths, colors, idxs: slice = COLOR_IDXS):\n colors = colors if colors is not None else [-1] * len(paths)\n for i in range(len(paths)):\n paths[i][:, idxs] = colors[i]\n return paths\n\n\n\n\nclass RasterDataset(Dataset):\n \"\"\"PyTorch Dataset for LSTM Baselines.\"\"\"\n def __init__(self, data_dict: Dict[str, Any], args: Any, mode: str):\n \"\"\"Initialize the Dataset.\n\n Args:\n data_dict: Dict containing all the data\n args: Arguments passed to the baseline code\n mode: train/val/test mode\n\n \"\"\"\n self.data_dict = data_dict\n self.args = args\n self.mode = mode\n\n # Get input\n self.input_data = data_dict[\"{}_input\".format(mode)]\n if mode != \"test\":\n self.output_data = data_dict[\"{}_output\".format(mode)]\n self.data_size = self.input_data.shape[0]\n\n # Get helpers\n self.helpers = self.get_helpers()\n self.helpers = list(zip(*self.helpers))\n \n \n \n from argoverse.map_representation.map_api import ArgoverseMap\n\n self.avm = ArgoverseMap()\n self.mf=MapFeaturesUtils()\n \n\n def __len__(self):\n \"\"\"Get length of dataset.\n\n Returns:\n Length of dataset\n\n \"\"\"\n return self.data_size\n\n def __getitem__(self, idx: int\n ) -> Tuple[torch.FloatTensor, Any, Dict[str, np.ndarray]]:\n \"\"\"Get the element at the given index.\n\n Args:\n idx: Query index\n\n Returns:\n A list containing input Tensor, Output Tensor (Empty if test) and viz helpers. \n\n \"\"\"\n helper=self.helpers[idx]\n cnt_lines,img,cnt_lines_norm=self.mf.get_candidate_centerlines_for_trajectory(\n helper[0] if self.mode != \"test\" else helper[0][:20],\n yaw_deg=helper[5],centroid=helper[0][0],\n city_name=helper[1][0],avm=self.avm,\n viz=True,\n seq_len = 80,\n max_candidates=10,\n )\n \n \n res = torch.cat([linear_path_to_tensor(path, -1) for path in cnt_lines_norm], 0)\n\n return (\n torch.FloatTensor(self.input_data[idx]),\n torch.empty(1) if self.mode == \"test\" else torch.FloatTensor(\n self.output_data[idx]),\n img,\n cnt_lines,\n cnt_lines_norm,\n res,\n \n )\n\n def get_helpers(self) -> Tuple[Any]:\n \"\"\"Get helpers for running baselines.\n\n Returns:\n helpers: Tuple in the format specified by LSTM_HELPER_DICT_IDX\n\n Note: We need a tuple because DataLoader needs to index across all these helpers simultaneously.\n\n \"\"\"\n helper_df = self.data_dict[f\"{self.mode}_helpers\"]\n candidate_centerlines = helper_df[\"CANDIDATE_CENTERLINES\"].values\n# print(\"ss\",candidate_centerlines)\n candidate_nt_distances = helper_df[\"CANDIDATE_NT_DISTANCES\"].values\n xcoord = np.stack(helper_df[\"FEATURES\"].values\n )[:, :, FEATURE_FORMAT[\"X\"]].astype(\"float\")\n ycoord = np.stack(helper_df[\"FEATURES\"].values\n )[:, :, FEATURE_FORMAT[\"Y\"]].astype(\"float\")\n centroids = np.stack((xcoord, ycoord), axis=2)\n _DEFAULT_HELPER_VALUE = np.full((centroids.shape[0]), None)\n city_names = np.stack(helper_df[\"FEATURES\"].values\n )[:, :, FEATURE_FORMAT[\"CITY_NAME\"]]\n seq_paths = helper_df[\"SEQUENCE\"].values\n translation = (helper_df[\"TRANSLATION\"].values\n if self.args.normalize else _DEFAULT_HELPER_VALUE)\n rotation = (helper_df[\"ROTATION\"].values\n if self.args.normalize else _DEFAULT_HELPER_VALUE)\n\n use_candidates = self.args.use_map and self.mode == \"test\"\n\n candidate_delta_references = (\n helper_df[\"CANDIDATE_DELTA_REFERENCES\"].values\n if self.args.use_map and use_candidates else _DEFAULT_HELPER_VALUE)\n delta_reference = (helper_df[\"DELTA_REFERENCE\"].values\n if self.args.use_delta and not use_candidates else\n _DEFAULT_HELPER_VALUE)\n\n helpers = [None for i in range(len(LSTM_HELPER_DICT_IDX))]\n\n # Name of the variables should be the same as keys in LSTM_HELPER_DICT_IDX\n for k, v in LSTM_HELPER_DICT_IDX.items():\n helpers[v] = locals()[k.lower()]\n\n return tuple(helpers)\n","sub_path":"src/argoverse/utils/.ipynb_checkpoints/raster_utils-checkpoint.py","file_name":"raster_utils-checkpoint.py","file_ext":"py","file_size_in_byte":5615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"455763441","text":"# -*- coding: utf-8 -*-\n# 15/6/27\n# create by: snower\n\nimport os\nfrom .utils import string_type, number_type\n\n__config = {}\n\nDEFAULT_CONFIG = {\n \"LOG_FILE\": \"/var/log/funsun.log\",\n \"LOG_LEVEL\": \"ERROR\",\n \"LOG_FORMAT\": \"\",\n\n \"BIND_ADDRESS\": \"0.0.0.0\",\n \"PORT\": 6458,\n\n \"HTTP_BIND\": \"\",\n\n \"STORE_DRIVER\": \"mem\",\n\n \"STORE_MEM_STORE_FILE\": \"/tmp/forsun.session\",\n\n \"STORE_REDIS_HOST\": \"127.0.0.1\",\n \"STORE_REDIS_PORT\": 6379,\n \"STORE_REDIS_DB\": 0,\n \"STORE_REDIS_PREFIX\": \"forsun\",\n \"STORE_REDIS_SERVER_ID\": 0,\n \"STORE_REDIS_MAX_CONNECTIONS\": 8,\n \"STORE_REDIS_CLIENT_TIMEOUT\": 7200,\n \"STORE_REDIS_BULK_SIZE\": 5,\n\n \"ACTION_SHELL_CWD\": \"/tmp\",\n \"ACTION_HTTP_MAX_CLIENTS\": 64,\n \"ACTION_HTTP_CONNECT_TIMEOUT\": 5,\n \"ACTION_HTTP_REQUEST_TIMEOUT\": 120,\n \"ACTION_REDIS_MAX_CONNECTIONS\": 8,\n \"ACTION_REDIS_CLIENT_TIMEOUT\": 7200,\n \"ACTION_REDIS_BULK_SIZE\": 5,\n \"ACTION_THRIFT_MAX_CONNECTIONS\": 64,\n \"ACTION_MYSQL_USER\": \"root\",\n \"ACTION_MYSQL_PASSWD\": \"\",\n \"ACTION_MYSQL_MAX_CONNECTIONS\": 8,\n \"ACTION_MYSQL_WAIT_CONNECTION_TIMEOUT\": 7200,\n \"ACTION_MYSQL_IDLE_SECONDS\": 120,\n\n \"EXTENSION_PATH\": \"\",\n \"EXTENSIONS\": [],\n}\n\ndef get(name, default=None):\n return __config.get(name, default)\n\ndef set(name, value):\n old_value = __config[name]\n __config[name] = value\n return old_value\n\ndef update(config):\n __config.update(config)\n return __config\n\nupdate(DEFAULT_CONFIG)\nfor key, value in DEFAULT_CONFIG.items():\n env_value = os.environ.get(key)\n if env_value is not None:\n try:\n if isinstance(value, number_type):\n set(key, int(env_value))\n elif isinstance(value, float):\n set(key, float(env_value))\n elif isinstance(value, string_type):\n set(key, str(env_value))\n except:pass","sub_path":"forsun/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"180362107","text":"import numpy as np\nfrom sympy import symbols, S, binomial, summation, sqrt, cos, sin, Function,atan2,expand_trig,diff,Matrix\nfrom .hamiltonian import Hamiltonian\nfrom .disturbing_function import get_fg_coeffs , laplace_b\nfrom .disturbing_function import DFCoeff_C,eval_DFCoeff_dict,get_DFCoeff_symbol\nfrom .nbody_simulation_utilities import get_canonical_heliocentric_orbits,add_canonical_heliocentric_elements_particle\nfrom itertools import combinations\nimport rebound\nimport warnings\ndef get_re_im_components(x,y,k):\n \"\"\"\n Get the real and imaginary components of\n (x + sgn(k) * i y)^|k|\n \"\"\"\n if k==0:\n return 1,0\n absk = abs(k)\n sgnk = np.sign(k)\n re,im=0,0\n for l in range(0,absk+1):\n b = binomial(absk,l)\n if l%2==0:\n re += b * (sgnk * y)**l * x**(absk-l) * (-1)**(l//2)\n else:\n im += b * (sgnk * y)**l * x**(absk-l) * (-1)**((l-1)//2)\n return re,im\n\ndef single_true(iterable): # Returns true if only one element in the iterable is set \n # make generator from iterable setting any zeros as valid entries (otherwise they evaluate to False)\n i = iter([item if item != 0 else True for item in iterable]) # make generator and set zeros to valid inputs\n return any(i) and not any(i) # any(i) True once first valid item found. not any(i) ensures no additional ones exist\n\nclass PoincareParticle(object):\n \"\"\"\n A class representing an individual member (star, planet, or test particle) of a planetary system.\n\n Attributes\n ----------\n m : float\n Mass of particle.\n Mstar : float\n Mass of central body.\n \"\"\"\n def __init__(self, m, Mstar, l, gamma, q, G=1., sLambda=None, sGamma=None, sQ=None, Lambda=None, Gamma=None, Q=None, a=None, e=None, inc=None):\n \"\"\"\n We store the specific Lambda = sqrt(G*M*a) and specific Gamma = sLambda*(1-sqrt(1-e**2)) to support test particles\n \"\"\"\n if not single_true([sLambda, Lambda, a]):\n raise AttributeError(\"Can only pass one of Lambda, sLambda (specific Lambda, i.e. per unit mass), or a (semimajor axis)\")\n if not single_true([sGamma, Gamma, e]):\n raise AttributeError(\"Can only pass one of Gamma, sGamma (specific Gamma, i.e. per unit mass), or e (eccentricity)\")\n if not single_true([sQ, Q, inc]):\n raise AttributeError(\"Can only pass one of Q, sQ (specific Q, i.e. per unit mass), or inc (inclination)\")\n \n mu = m * Mstar / (m + Mstar)\n if sLambda:\n self.sLambda = sLambda\n elif Lambda:\n try:\n self.sLambda = Lambda/mu\n except:\n raise AttributeError(\"Need to pass specific actions (sLambda, sGamma, and sQ) or a, e, and inc for test particles\")\n elif a:\n self.sLambda = np.sqrt(G*M*a)\n\n if Gamma:\n try:\n sGamma = Gamma/mu\n except:\n raise AttributeError(\"Need to pass specific actions (sLambda, sGamma, and sQ) or a, e, and inc for test particles\")\n elif e:\n sGamma = self.sLambda*(1.-np.sqrt(1.-e**2))\n\n if Q:\n try:\n sQ = Q/mu\n except:\n raise AttributeError(\"Need to pass specific actions (sLambda, sGamma, and sQ) or a, e, and inc for test particles\")\n elif inc:\n sQ = (self.sLambda - self.sGamma) * (1 - np.cos(inc))\n\n self.skappa = np.sqrt(2.*sGamma)*np.cos(gamma) # X per unit sqrt(mass)\n self.seta = np.sqrt(2.*sGamma)*np.sin(gamma)\n\n self.ssigma = np.sqrt(2.*sQ)*np.cos(q) # Xinc per unit sqrt(mass)\n self.srho = np.sqrt(2.*sQ)*np.sin(q)\n\n self.m = m \n self.Mstar = Mstar\n self.G = G\n self.l = l\n \n @property\n def mu(self):\n return self.m * self.M / (self.M + self.m)\n @property \n def M(self):\n return self.Mstar + self.m\n\n @property\n def x(self):\n return (self.kappa - 1j * self.eta) / np.sqrt(2)\n @property\n def X(self):\n return self.x * np.sqrt(2 / self.Lambda)\n @property\n def y(self):\n return (self.sigma - 1j * self.rho) / np.sqrt(2)\n @property\n def Y(self):\n return self.y * np.sqrt(0.5 / self.Lambda)\n\n @property\n def xbar(self):\n return np.conj(self.x)\n @property\n def Xbar(self):\n return np.conj(self.X)\n @property\n def ybar(self):\n return np.conj(self.y)\n @property\n def Ybar(self):\n return np.conj(self.Y)\n\n @property\n def kappa(self):\n return np.sqrt(self.mu)*self.skappa\n @kappa.setter\n def kappa(self, value):\n self.skappa = value/np.sqrt(self.mu)\n @property\n def eta(self):\n return np.sqrt(self.mu)*self.seta\n @eta.setter\n def eta(self, value):\n self.seta = value/np.sqrt(self.mu)\n\n @property\n def sigma(self):\n return np.sqrt(self.mu)*self.ssigma\n @sigma.setter\n def sigma(self, value):\n self.ssigma = value/np.sqrt(self.mu)\n\n @property\n def rho(self):\n return np.sqrt(self.mu)*self.srho\n @rho.setter\n def rho(self, value):\n self.srho = value/np.sqrt(self.mu)\n\n @property\n def Lambda(self):\n return self.mu*self.sLambda\n @Lambda.setter\n def Lambda(self, value):\n self.sLambda = value/self.mu\n\n @property\n def Gamma(self):\n return self.mu*(self.skappa**2+self.seta**2)/2.\n @Gamma.setter\n def Gamma(self, value):\n self.sGamma = value/self.mu\n\n @property\n def Q(self):\n return self.mu*(self.ssigma**2+self.srho**2)/2.\n @Q.setter\n def Q(self, value):\n self.sQ = value/self.mu\n\n @property\n def sGamma(self):\n return (self.skappa**2+self.seta**2)/2.\n @property\n def gamma(self):\n return np.arctan2(self.seta, self.skappa)\n\n @property\n def sQ(self):\n return (self.ssigma**2+self.srho**2)/2.\n @property\n def q(self):\n return np.arctan2(self.srho,self.ssigma)\n\n @property\n def a(self):\n return self.sLambda**2/self.G/self.M\n @property\n def e(self):\n GbyL = self.sGamma/self.sLambda\n if 1-(1.-GbyL)*(1.-GbyL) < 0:\n raise AttributeError(\"sGamma:{0}, sLambda:{1}, GbyL:{2}, val:{3}\".format(self.sGamma, self.sLambda, GbyL, 1-(1.-GbyL)*(1.-GbyL)))\n return np.sqrt(1 - (1-GbyL)*(1-GbyL))\n @property\n def inc(self):\n QbyLminusG = self.sQ / (self.sLambda - self.sGamma)\n cosi = 1 - QbyLminusG\n if np.abs(cosi) > 1:\n raise AttributeError(\"sGamma:{0}, sLambda:{1}, sQ:{2}, cosi:{3}\".format(self.sGamma, self.sLambda, self.sQ,cosi))\n return np.arccos(cosi)\n\n @property\n def pomega(self):\n return -self.gamma\n\n @property\n def Omega(self):\n return -self.q\n @property\n def n(self):\n return np.sqrt(self.G*self.M/self.a**3)\n\nclass Poincare(object):\n \"\"\"\n A class representing a collection of Poincare particles constituting a planetary system.\n \"\"\"\n def __init__(self, G, poincareparticles=[]):\n self.G = G\n self.t = 0\n self.particles = [PoincareParticle(m=np.nan, Mstar=np.nan, G=np.nan, l=np.nan, gamma=np.nan,q=np.nan, sLambda=np.nan, sGamma=np.nan, sQ=np.nan)] # dummy particle for primary\n try:\n for p in poincareparticles:\n self.add(m=p.m, Mstar=p.Mstar, sLambda=p.sLambda, l=p.l, sGamma=p.sGamma, gamma=p.gamma, sQ = p.sQ,q=p.q)\n except TypeError:\n raise TypeError(\"poincareparticles must be a list of PoincareParticle objects\")\n\n @classmethod\n def from_Simulation(cls, sim, average=True):\n masses = [p.m for p in sim.particles]\n Mstar = masses[0]\n pvars = Poincare(sim.G)\n ps = sim.particles\n o = get_canonical_heliocentric_orbits(sim)\n for i in range(1,sim.N-sim.N_var):\n orb = o[i-1]\n M = Mstar + masses[i]\n m = masses[i]\n if orb.a <= 0. or orb.e >= 1.:\n raise AttributeError(\"Celmech error: Poincare.from_Simulation only support elliptical orbits. Particle {0}'s (heliocentric) a={1}, e={2}\".format(i, orb.a, orb.e))\n sLambda = np.sqrt(sim.G*M*orb.a)\n sGamma = sLambda*(1.-np.sqrt(1.-orb.e**2))\n sQ = sLambda*np.sqrt(1.-orb.e**2) * (1 - np.cos(orb.inc))\n pvars.add(m=m,Mstar=Mstar, sLambda=sLambda, l=orb.l, sGamma=sGamma, sQ = sQ, gamma=-orb.pomega,q=-orb.Omega)\n if average is True:\n pvars.average_synodic_terms()\n return pvars\n\n def to_Simulation(self, masses=None, average=True):\n \"\"\" \n Convert Poincare object to a REBOUND simulation.\n\n Arguments\n --------\n masses : array-like, optional\n If masses is None, will calculate physical masses from the m and M \n parameters stored by the particles. If masses is a list, will use \n those as the physical masses. Default is None.\n average : boole, optional\n If True, semi-major axes of simulation planets will be computed\n by converting 'mean' elements to 'osculating' ones to 0th order\n in eccentricity.\n\n Returns\n -------\n sim : rebound.Simulation\n \"\"\" \n\n if average is True:\n self.average_synodic_terms(inverse=True)\n\n if not masses:\n p1 = self.particles[1]\n masses = [p1.Mstar] + [p.m for p in self.particles]\n\n sim = rebound.Simulation()\n sim.G = self.G\n sim.add(m=masses[0])\n ps = self.particles\n for i in range(1, self.N):\n p = ps[i]\n elements = {element:getattr(p,element) for element in ['a','e','inc','l','pomega','Omega']}\n add_canonical_heliocentric_elements_particle(masses[i],elements,sim)\n sim.move_to_com()\n return sim\n \n def add(self, **kwargs):\n self.particles.append(PoincareParticle(G=self.G, **kwargs))\n\n def copy(self):\n return Poincare(self.G, self.particles[1:self.N])\n\n def average_synodic_terms(self, inverse=False):\n \"\"\"\n Do a canonical transformation to correct the Lambdas for the fact that we have implicitly\n averaged over all the synodic terms we do not include in the Hamiltonian.\n \"\"\"\n corrpvars = self.copy() # so we use original values when planet appears in more than one pair\n pairs = combinations(range(1,self.N), 2)\n #TODO assumes particles ordered going outward so a1 < a2 always. Sort first?\n for i1, i2 in pairs:\n ps = self.particles\n m1 = ps[i1].m\n m2 = ps[i2].m\n deltalambda = ps[i1].l-ps[i2].l\n G = self.G\n\n prefac = G/ps[i2].a/(ps[i1].n-ps[i2].n) \n alpha = ps[i1].a/ps[i2].a\n summation = (1. + alpha**2 - 2*alpha*np.cos(deltalambda))**(-0.5)\n s = prefac*(alpha*np.cos(deltalambda)-summation+laplace_b(0.5, 0, 0, alpha)/2.)\n if inverse is True:\n s *= -1\n corrpvars.particles[i1].sLambda += m2*s # prefac*m1*m2*s/m1 (sLambda=Lambda/m)\n corrpvars.particles[i2].sLambda -= m1*s\n \n for i, p in enumerate(self.particles):\n p.sLambda = corrpvars.particles[i].sLambda\n\n @property\n def N(self):\n return len(self.particles)\n\nclass PoincareHamiltonian(Hamiltonian):\n \"\"\"\n A class representing the Hamiltonian governing the dynamical evolution of a system of particles,\n stored as a :class:`celmech.poincare.Poincare` instance.\n\n Attributes\n ----------\n H : sympy expression\n Symbolic expression for the Hamiltonian.\n NH : sympy expression\n Symbolic expression for the Hamiltonian with \n numerical values of parameters substituted\n where applicable.\n N : int\n Number of particles\n particles : list\n List of :class:`celmech.poincare.PoincareParticle`s \n making up the system.\n state : :class:`celmech.poincare.Poincare`\n A set of Poincare variables to which \n transformations are applied.\n \"\"\"\n def __init__(self, pvars):\n Hparams = {symbols('G'):pvars.G}\n pqpairs = []\n ps = pvars.particles\n H = S(0) \n for i in range(1, pvars.N):\n pqpairs.append(symbols(\"kappa{0}, eta{0}\".format(i))) \n pqpairs.append(symbols(\"Lambda{0}, lambda{0}\".format(i))) \n pqpairs.append(symbols(\"sigma{0}, rho{0}\".format(i))) \n Hparams[symbols(\"mu{0}\".format(i))] = ps[i].mu\n Hparams[symbols(\"m{0}\".format(i))] = ps[i].m\n Hparams[symbols(\"M{0}\".format(i))] = ps[i].M\n H = self.add_Hkep_term(H, i)\n self.resonance_indices = []\n super(PoincareHamiltonian, self).__init__(H, pqpairs, Hparams, pvars) \n \n @property\n def particles(self):\n return self.state.particles\n\n @property\n def N(self):\n return len(self.particles)\n \n def state_to_list(self, state):\n ps = state.particles\n vpp = 6 # vars per particle\n y = np.zeros(vpp*(state.N-1)) # remove padded 0th element in ps for y\n for i in range(1, state.N):\n y[vpp*(i-1)] = ps[i].kappa\n y[vpp*(i-1)+1] = ps[i].eta\n y[vpp*(i-1)+2] = ps[i].Lambda\n y[vpp*(i-1)+3] = ps[i].l \n y[vpp*(i-1)+4] = ps[i].sigma\n y[vpp*(i-1)+5] = ps[i].rho\n return y\n def set_secular_mode(self):\n # \n state = self.state\n for i in range(1,state.N):\n Lambda0,Lambda = symbols(\"Lambda{0}0 Lambda{0}\".format(i))\n self.H = self.H.subs(Lambda,Lambda0)\n self.Hparams[Lambda0] = state.particles[i].Lambda\n self._update()\n\n def set_planar_mode(self):\n state = self.state\n ps = state.particles\n for i in xrange(1,state.N):\n rho,sigma = symbols(\"rho{0} sigma{0}\".format(i))\n self.H = self.H.subs({rho:0,sigma:0})\n ps[i].srho = 0\n ps[i].ssigma = 0\n self._update() \n\n def update_state_from_list(self, state, y):\n ps = state.particles\n vpp = 6 # vars per particle\n for i in range(1, state.N):\n ps[i].skappa = y[vpp*(i-1)]/np.sqrt(ps[i].mu)\n ps[i].seta = y[vpp*(i-1)+1]/np.sqrt(ps[i].mu)\n ps[i].sLambda = y[vpp*(i-1)+2]/ps[i].mu\n ps[i].l = y[vpp*(i-1)+3]\n ps[i].ssigma = y[vpp*(i-1)+4] / np.sqrt(ps[i].mu) \n ps[i].srho = y[vpp*(i-1)+5] / np.sqrt(ps[i].mu) \n \n \n def add_Hkep_term(self, H, index):\n \"\"\"\n Add the Keplerian component of the Hamiltonian for planet ''.\n \"\"\"\n G, M, mu, Lambda = symbols('G, M{0}, mu{0}, Lambda{0}'.format(index))\n #m, M, mu, Lambda, lam, Gamma, gamma = self._get_symbols(index)\n H += -G**2*M**2*mu**3 / (2 * Lambda**2)\n return H\n def add_monomial_term(self,kvec,zvec,indexIn=1,indexOut=2,update=True):\n \"\"\"\n Add individual monomial term to Hamiltonian. The term \n is specified by 'kvec', which specifies the cosine argument\n and 'zvec', which specfies the order of inclination and\n eccentricities in the Taylor expansion of the \n cosine coefficient. \n \"\"\"\n if (indexIn,indexOut,(kvec,zvec)) in self.resonance_indices:\n warnings.warn(\"Monomial term alread included Hamiltonian; no new term added.\")\n return\n G = symbols('G')\n mIn,muIn,MIn,LambdaIn,lambdaIn,kappaIn,etaIn,sigmaIn,rhoIn = symbols('m{0},mu{0},M{0},Lambda{0},lambda{0},kappa{0},eta{0},sigma{0},rho{0}'.format(indexIn)) \n mOut,muOut,MOut,LambdaOut,lambdaOut,kappaOut,etaOut,sigmaOut,rhoOut = symbols('m{0},mu{0},M{0},Lambda{0},lambda{0},kappa{0},eta{0},sigma{0},rho{0}'.format(indexOut)) \n \n alpha = self.particles[indexIn].a/self.state.particles[indexOut].a\n\t# aIn = LambdaIn * LambdaIn / mIn / mIn / G / MIn\n\t# aOut = LambdaOut * LambdaOut / mOut / mOut / G / MOut\n # alpha = aIn/aOut\n # Resonance components\n #\n k1,k2,k3,k4,k5,k6 = kvec\n z1,z2,z3,z4 = zvec\n C = get_DFCoeff_symbol(k1,k2,k3,k4,k5,k6,z1,z2,z3,z4,indexIn,indexOut)\n C_dict = DFCoeff_C(k1,k2,k3,k4,k5,k6,z1,z2,z3,z4)\n C_val = eval_DFCoeff_dict(C_dict,alpha)\n self.Hparams[C] = C_val\n rtLIn = sqrt(LambdaIn)\n rtLOut = sqrt(LambdaOut)\n xin,yin = get_re_im_components(kappaIn/rtLIn ,-etaIn / rtLIn,k3)\n xout,yout = get_re_im_components( kappaOut/rtLOut, -etaOut/rtLOut,k4)\n uin,vin = get_re_im_components(sigmaIn/rtLIn/2, -rhoIn/rtLIn/2,k5)\n uout,vout = get_re_im_components(sigmaOut/rtLOut/2, -rhoOut/rtLOut/2,k6)\n\n re = uin*uout*xin*xout - vin*vout*xin*xout - uout*vin*xout*yin - uin*vout*xout*yin - uout*vin*xin*yout - uin*vout*xin*yout - uin*uout*yin*yout + vin*vout*yin*yout\n im = uout*vin*xin*xout + uin*vout*xin*xout + uin*uout*xout*yin - vin*vout*xout*yin + uin*uout*xin*yout - vin*vout*xin*yout - uout*vin*yin*yout - uin*vout*yin*yout\n \n GammaIn = (kappaIn*kappaIn + etaIn*etaIn)/2\n GammaOut = (kappaOut*kappaOut + etaOut*etaOut)/2\n QIn = (sigmaIn*sigmaIn + rhoIn*rhoIn)/2\n QOut = (sigmaOut*sigmaOut + rhoOut*rhoOut)/2\n \n eIn_sq_term = (2 * GammaIn / LambdaIn )**z3\n eOut_sq_term = (2 * GammaOut / LambdaOut )**z4\n incIn_sq_term = ( QIn / LambdaIn / 2 )**z1\n incOut_sq_term = ( QOut / LambdaOut / 2 )**z2\n \n # Update internal Hamiltonian\n aOut_inv = G*MOut*muOut*muOut / LambdaOut / LambdaOut \n prefactor1 = -G * mIn * mOut * aOut_inv\n prefactor2 = eIn_sq_term * eOut_sq_term * incIn_sq_term * incOut_sq_term \n trig_term = re * cos(k1 * lambdaOut + k2 * lambdaIn) - im * sin(k1 * lambdaOut + k2 * lambdaIn) \n \n # Keep track of resonances\n self.resonance_indices.append((indexIn,indexOut,(kvec,zvec)))\n \n self.H += prefactor1 * C * prefactor2 * trig_term\n if update:\n self._update()\n \n def add_all_MMR_and_secular_terms(self,p,q,max_order,indexIn = 1, indexOut = 2):\n \"\"\"\n Add all disturbing function terms associated with a p:p-q mean\n motion resonance along with secular terms up to a given order.\n\n Arguments\n ---------\n p : int\n Coefficient of lambdaOut in resonant argument\n j*lambdaOut - (j-k)*lambdaIn\n q : int\n Order of the mean motion resonance.\n\n \"\"\"\n assert max_order>=0, \"max_order= {:d} not allowed, must be non-negative.\".format(max_order)\n if p=0, \"max_order= {:d} not allowed, must be non-negative.\".format(max_order)\n if p=0, \"max_order= {:d} not allowed, must be non-negative.\".format(max_order)\n raise RuntimeError(\"THIS METHOD NEEDS TO BE FIXED!!!\")\n max_order_by_2 = max_order//2\n max_order_by_4 = max_order//4\n for a in range(0,max_order_by_4+1):\n b_hi = max_order_by_2 - 2 * a\n if a==0:\n b_lo = 0\n else:\n b_lo = -b_hi\n for b in range(b_lo,b_hi+1):\n c_hi = max_order_by_2 - abs(b) - 2 * a\n if a == 0 and b ==0:\n c_lo = 0\n else:\n c_lo = -c_hi\n for c in range(c_lo,c_hi+1):\n k3 = a-b\n k4 = a+b\n k5 = -c-a\n k6 = c-a\n self.add_cos_term_to_max_order([0,0,k3,k4,k5,k6],max_order,indexIn,indexOut,update=False)\n\n # finish with update\n self._update()\n\n def add_cos_term_to_max_order(self,jvec,max_order,indexIn=1,indexOut=2,update = True):\n \"\"\"\n Add disturbing function term \n c(alpha,e1,e2,s1,s2) * cos(j1 * lambda + j2 * lambda1 + j3 * pomega1 + j4 * pomega2 + j5 * Omega1 + j6 * Omega2)\n approximating c up to order 'max_order' in eccentricity and inclination.\n\n Arguments\n ---------\n jvec : array-like\n Vector of integers specifying cosine argument.\n max_order : int\n Maximum order of terms in include in the expansion of c\n indexIn : int, optional\n Integer index of inner planet.\n indexOut : anit, optional\n Intgeger index of outer planet.\n \"\"\"\n _,_,j3,j4,j5,j6 = jvec\n order = max_order - abs(j3) - abs(j4) - abs(j5) - abs(j6)\n orderBy2 = order // 2\n N = orderBy2+1\n for z1 in range(0,N):\n for z2 in range(0,N - z1):\n for z3 in range(0,N - z1 - z2):\n for z4 in range(0,N - z1 - z2 - z3):\n zvec = [z1,z2,z3,z4]\n self.add_monomial_term(jvec,zvec,indexIn,indexOut,update=False)\n if update:\n self._update() \n\n def _get_laplace_lagrange_matrices(self):\n set_e_and_inc_zero_rule = {\n S('{0}{1}'.format(var,i)):0\n for i in range(1,self.N)\n for var in ['eta','kappa','rho','sigma']\n }\n mtrx = []\n for s1 in [S('eta{}'.format(i)) for i in range(1,self.N)]:\n row = []\n for s2 in [S('kappa{}'.format(i)) for i in range(1,self.N)]:\n entry= diff(self.derivs[s1],s2)\n row.append(entry.subs(set_e_and_inc_zero_rule))\n mtrx.append(row)\n ecc_mtrx = Matrix(mtrx)\n mtrx = []\n for s1 in [S('rho{}'.format(i)) for i in range(1,self.N)]:\n row = []\n for s2 in [S('sigma{}'.format(i)) for i in range(1,self.N)]:\n entry= diff(self.derivs[s1],s2)\n row.append(entry.subs(set_e_and_inc_zero_rule))\n mtrx.append(row)\n inc_mtrx = Matrix(mtrx)\n return ecc_mtrx,inc_mtrx\n","sub_path":"celmech/poincare.py","file_name":"poincare.py","file_ext":"py","file_size_in_byte":25689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"525188489","text":"def filter_manager(data):\n filtered_data = get_trends(data)\n final_fata = time_construct(filtered_data)\n return final_fata\n\n\ndef get_trends(data):\n trends = {\"Data\": []}\n\n index = 0\n for i in range(len(data[\"Data\"])):\n for j, k in data[\"Data\"][i].items():\n trends[\"Data\"].append({\"Index\": index+1})\n for m, n in k.items():\n trends[\"Data\"][index].update({m: n})\n index += 1\n\n return trends\n\n\ndef time_construct(data):\n trends = {\"Data\": []}\n\n for i in range(len(data[\"Data\"])):\n trends[\"Data\"].append({})\n for j, k in data[\"Data\"][i].items():\n if j != \"Time Query\":\n trends[\"Data\"][i].update({j: k})\n else:\n trends[\"Data\"][i].update({\"Date Query\": k[:10]})\n\n # adjusting time zone, with less three hours. UGLY, I know...\n time = k[11:19]\n hour = int(time[:2])\n hour = (hour-3) % 24\n if hour < 10:\n time = f\"0{hour}\" + time[2:]\n else:\n time = f\"{hour} \" + time[2:]\n trends[\"Data\"][i].update({\"Time Query\": time})\n\n return trends\n","sub_path":"a_data_processing/Twitter/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"219288094","text":"#! /usr/bin/env python2.7\n# -*- coding: utf-8 -*-\n#\n# pySpatialETL is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# any later version.\n#\n# pySpatialETL is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# Author : Fabien Rétif - fabien.retif@zoho.com\n#\nimport math\n\nfrom netCDF4 import Dataset\nfrom netCDF4 import date2num\nfrom numpy import float32\nfrom numpy import float64\n\nfrom spatialetl.coverage.io.CoverageWriter import CoverageWriter\n\n\nclass AcademicECMWFWriter (CoverageWriter):\n\n def __init__(self, myFile,\n lon,\n lat,\n times,\n wind_speed=0,\n wind_from_direction_angle=0,\n surface_air_pressure=1013.25, # Pression MSL\n sea_surface_air_pressure=1013.25, # Pression MSL\n surface_air_temperature=283.15, # 10°C\n dewpoint_temperature=283.15, # 10°C\n surface_downward_sensible_heat_flux=0,\n surface_downward_latent_heat_flux=0,\n surface_downward_solar_radiation=0,\n surface_downward_thermal_radiation=0,\n surface_solar_radiation=0,\n surface_thermal_radiation=0,\n total_rain=0,\n update=False):\n CoverageWriter.__init__(self,None,myFile);\n self.x_axis = lon;\n self.y_axis= lat;\n self.t_axis = times\n self.wind_speed = wind_speed\n self.wind_from_direction_angle = wind_from_direction_angle\n self.surface_air_pressure = surface_air_pressure\n self.sea_surface_air_pressure = sea_surface_air_pressure\n self.surface_downward_sensible_heat_flux = surface_downward_sensible_heat_flux\n self.surface_downward_latent_heat_flux = surface_downward_latent_heat_flux\n self.surface_air_temperature = surface_air_temperature\n self.dewpoint_temperature = dewpoint_temperature\n self.surface_downward_solar_radiation = surface_downward_solar_radiation\n self.surface_solar_radiation = surface_solar_radiation\n self.surface_thermal_radiation = surface_thermal_radiation\n self.surface_downward_thermal_radiation = surface_downward_thermal_radiation\n self.total_rain=total_rain\n self.ncfile = None\n self.update = update;\n\n if self.update == False :\n self.ncfile = Dataset(self.filename, 'w', format='NETCDF4')\n else:\n self.ncfile = Dataset(self.filename, 'r+', format='NETCDF4')\n\n self.ncfile.description = 'ECMWF Writer. Generated with pySpatialETL'\n\n if self.update == False:\n # dimensions\n self.ncfile.createDimension('time', None)\n self.ncfile.createDimension('lat', len(self.y_axis))\n self.ncfile.createDimension('lon', len(self.x_axis))\n\n # variables\n times = self.ncfile.createVariable('time', float64, ('time',))\n times.units= 'seconds since 2008-01-29 00:00:00'\n times.calendar= 'gregorian'\n times.standard_name= 'time'\n times.axis='T'\n times.conventions = \"UTC time\"\n\n latitudes = self.ncfile.createVariable('lat', float32, ('lat',))\n latitudes.units = \"degree_north\" ;\n latitudes.long_name = \"latitude\" ;\n latitudes.standard_name = \"latitude\" ;\n latitudes.valid_min = -90.0;\n latitudes.valid_max = 90.0 ;\n latitudes.axis = \"Y\" ;\n\n longitudes = self.ncfile.createVariable('lon', float32, ('lon',))\n longitudes.units = \"degree_east\" ;\n longitudes.long_name = \"longitude\" ;\n longitudes.standard_name = \"longitude\" ;\n longitudes.valid_min = -180.0 ;\n longitudes.valid_max = 180.0 ;\n longitudes.axis = \"X\" ;\n\n # data\n latitudes[:] = self.y_axis;\n longitudes[:] = self.x_axis;\n times[:] = date2num(self.t_axis,units = times.units, calendar = times.calendar);\n\n def close(self):\n self.ncfile.close()\n\n def write_variable_3D_mask(self):\n\n var = self.ncfile.createVariable('LSM', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n var.long_name = \"Land/sea mask\";\n var.code = 172;\n var.table = 128;\n var[::] = 0\n\n def write_variable_surface_pressure(self):\n\n if self.update == False:\n var = self.ncfile.createVariable('SP', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n var.long_name = \"Surface pressure\";\n var.code = 134;\n var.table = 128;\n var.units = \"Pa\";\n else:\n var = self.ncfile.variables['SP'];\n\n var[:] = self.surface_air_pressure*100;\n\n def write_variable_sea_surface_air_pressure(self):\n\n if self.update == False:\n var = self.ncfile.createVariable('MSL', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n var.long_name = \"Sea surface air pressure\";\n var.code = 134;\n var.table = 128;\n var.units = \"Pa\";\n else:\n var = self.ncfile.variables['MSL'];\n\n var[:] = self.sea_surface_air_pressure*100;\n\n def write_variable_wind(self):\n\n if self.update == False:\n data_u = self.ncfile.createVariable('U10M', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n data_u.long_name = \"10 metre U wind component\";\n data_u.code = 147;\n data_u.table = 128;\n data_u.units = \"m s**-1\";\n\n data_v = self.ncfile.createVariable('V10M', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n data_v.long_name = \"10 metre V wind component\";\n data_v.code = 147;\n data_v.table = 128;\n data_v.units = \"m s**-1\";\n else :\n data_u = self.ncfile.variables['U10M']\n data_v = self.ncfile.variables['V10M']\n\n #data_u[:] = 270. - (math.degrees(self.wind_speed*math.sin(math.radians(self.wind_to_direction_angle)))) + 180.0 % 360.0\n #data_v[:] = 270. - (math.degrees(self.wind_speed*math.cos(math.radians(self.wind_to_direction_angle)) + 180.0 % 360.0\n\n data_u[:] = self.wind_speed * math.sin(math.radians((self.wind_from_direction_angle + 180.0) % 360.0))\n data_v[:] = self.wind_speed * math.cos(math.radians((self.wind_from_direction_angle + 180.0) % 360.0))\n\n def write_variable_surface_downward_sensible_heat_flux(self):\n\n if self.update == False:\n var = self.ncfile.createVariable('SSHF', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n var.long_name = \"Surface sensible heat flux\";\n var.code = 146;\n var.table = 128;\n var.units = \"W m**-2 s\";\n else:\n var = self.ncfile.variables['SSHF']\n\n var[:] = self.surface_downward_sensible_heat_flux;\n\n def write_variable_surface_downward_latent_heat_flux(self):\n\n var = self.ncfile.createVariable('SLHF', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n var.long_name = \"Surface latent heat flux\";\n var.code = 147;\n var.table = 128;\n var.units = \"W m**-2 s\";\n var[:] = self.surface_downward_latent_heat_flux;\n\n def write_variable_surface_air_temperature(self):\n\n var = self.ncfile.createVariable('T2M', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n var.long_name = \"2 metre temperature\";\n var.code = 167;\n var.table = 128;\n var.units = \"K\";\n var[:] = self.surface_air_temperature;\n\n def write_variable_dewpoint_temperature(self):\n\n var = self.ncfile.createVariable('D2M', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n var.long_name = \"2 metre dewpoint temperature\";\n var.code = 168;\n var.table = 128;\n var.units = \"K\";\n # Value\n var[:] = self.dewpoint_temperature;\n\n def write_variable_surface_downward_solar_radiation(self):\n\n var = self.ncfile.createVariable('SSRD', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n var.long_name = \"Surface solar radiation downwards\";\n var.code = 169;\n var.table = 128;\n var.units = \"W m**-2 s\";\n var[:] = self.surface_downward_solar_radiation;\n\n def write_variable_surface_downward_thermal_radiation(self):\n\n var = self.ncfile.createVariable('STRD', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n var.long_name = \"Surface thermal radiation downwards\";\n var.code = 175;\n var.table = 128;\n var.units = \"W m**-2 s\";\n var[:] = self.surface_downward_thermal_radiation;\n\n def write_variable_surface_solar_radiation(self):\n\n var = self.ncfile.createVariable('SSR', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n var.long_name = \"Surface solar radiation\";\n var.code = 176;\n var.table = 128;\n var.units = \"W m**-2 s\";\n var[:] = self.surface_solar_radiation;\n\n def write_variable_surface_thermal_radiation(self):\n\n var = self.ncfile.createVariable('STR', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n var.long_name = \"Surface thermal radiation\";\n var.code = 177;\n var.table = 128;\n var.units = \"W m**-2 s\";\n var[:] = self.surface_thermal_radiation;\n\n def write_variable_rainfall_amount(self):\n\n var = self.ncfile.createVariable('TP', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n var.long_name = \"Total precipitation\";\n var.code = 228;\n var.table = 128;\n var.units = \"m\";\n var[:] = self.total_rain;","sub_path":"spatialetl/coverage/io/netcdf/ecmwf/AcademicECMWFWriter.py","file_name":"AcademicECMWFWriter.py","file_ext":"py","file_size_in_byte":10042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"364071458","text":"# 整型\n# 定义:年龄、手机号码、身份证(X除外)\na = 1 # a = int(1)\n\n# b = '12'\n# print(type(b))\n# b = int(b)\n# print(type(b))\n\n# 浮点型\n# 定义:体重、升高、薪资\n\nf1 = 1.9 # f1 = float(1.9)\n# print(type(f1))\n#\n# f2 = '1.9'\n# f2 = float(f2)\n# print(type(f2))\n\n# f2 = int(f2)\n# print(f2)\n# print(type(f2))\n\n# 复数\n\"\"\"\n>>> a = 1-2j\n>>> a\n(1-2j)\n>>> type(a)\n\n>>>\n>>>\n>>> a.real\n1.0\n>>> a.imag\n-2.0\n>>>\n\"\"\"\n\n\n# python2与python3的区别\n\n\"\"\"\npython2\n这个范围[-24xxxxxxxx,24xxxxxxxx],就叫int\n超过这个范围:long\n\npython3:\n没有长整型这么一说\n\"\"\"","sub_path":"2.python/0.python基础/day3/代码/数字类型.py","file_name":"数字类型.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"615575781","text":"from __future__ import generators\nfrom collections import defaultdict\n\nclass priorityDictionary(dict):\n def __init__(self):\n '''Initialize priorityDictionary by creating binary heap\nof pairs (value,key). Note that changing or removing a dict entry will\nnot remove the old pair from the heap until it is found by smallest() or\nuntil the heap is rebuilt.'''\n self.__heap = []\n dict.__init__(self)\n\n def smallest(self):\n '''Find smallest item after removing deleted items from heap.'''\n #if len(self) == 0:\n # raise IndexError, \"smallest of empty priorityDictionary\"\n heap = self.__heap\n while heap[0][1] not in self or self[heap[0][1]] != heap[0][0]:\n lastItem = heap.pop()\n insertionPoint = 0\n while 1:\n smallChild = 2*insertionPoint+1\n if smallChild+1 < len(heap) and \\\n heap[smallChild] > heap[smallChild+1]:\n smallChild += 1\n if smallChild >= len(heap) or lastItem <= heap[smallChild]:\n heap[insertionPoint] = lastItem\n break\n heap[insertionPoint] = heap[smallChild]\n insertionPoint = smallChild\n return heap[0][1]\n\n def __iter__(self):\n '''Create destructive sorted iterator of priorityDictionary.'''\n def iterfn():\n while len(self) > 0:\n x = self.smallest()\n yield x\n del self[x]\n return iterfn()\n\n def __setitem__(self,key,val):\n '''Change value stored in dictionary and add corresponding\npair to heap. Rebuilds the heap if the number of deleted items grows\ntoo large, to avoid memory leakage.'''\n dict.__setitem__(self,key,val)\n heap = self.__heap\n if len(heap) > 2 * len(self):\n self.__heap = [(v,k) for k,v in self.iteritems()]\n self.__heap.sort() # builtin sort likely faster than O(n) heapify\n else:\n newPair = (val,key)\n insertionPoint = len(heap)\n heap.append(None)\n while insertionPoint > 0 and \\\n newPair < heap[(insertionPoint-1)//2]:\n heap[insertionPoint] = heap[(insertionPoint-1)//2]\n insertionPoint = (insertionPoint-1)//2\n heap[insertionPoint] = newPair\n\n def setdefault(self,key,val):\n '''Reimplement setdefault to call our customized __setitem__.'''\n if key not in self:\n self[key] = val\n return self[key]\n\ndef dks(G, start, end):\n D = {}\t# dictionary of final distances\n p = {}\t# dictionary of predecessors\n Q = priorityDictionary() # est.dist. of non-final vert.\n Q[start] = 0\n\n for v in Q:\n D[v] = Q[v]\n if v == end: break\n for w in G[v]:\n vwLength = D[v] + G[v][w]\n if w in D:\n if vwLength < D[w]:\n raise(ValueError,\"Dijkstra: found better path to already-final vertex\")\n elif w not in Q or vwLength < Q[w]:\n Q[w] = vwLength\n P[w] = v\n return (D,P)\n\n\nfor _ in range(int(input())):\n n, m = [int(x) for x in input().split()]\n bridges = defaultdict(list)\n for _ in range(m):\n a, b = [int(x) for x in input().split()]\n bridges[a].append(b)\n bridges[b].append(a)\n d, p = dks(bridges,1, n)\n\n\n","sub_path":"Hackerrank/rocky_village.py","file_name":"rocky_village.py","file_ext":"py","file_size_in_byte":3425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"72806040","text":"import myutil as mu\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import TensorDataset # 텐서데이터셋\nfrom torch.utils.data import DataLoader # 데이터로더\nfrom torch.utils.data import Dataset\nimport matplotlib.pyplot as plt # 맷플롯립사용\nimport torchvision.datasets as dsets\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\nimport random\nfrom sklearn.datasets import load_digits\n\n################################################################################\n# - 다층 퍼셉트론으로 손글씨 분류하기\n# - 이번 챕터에서는 다층 퍼셉트론을 구현하고, 딥 러닝을 통해서 숫자 필기 데이터를 분류해봅시다.\n# - MNIST 데이터랑 다른 데이터입니다.\n\n################################################################################\n# - 숫자 필기 데이터 소개\n# - 숫자 필기 데이터는 사이킷런 패키지에서 제공하는 분류용 예제 데이터입니다.\n# - 0부터 9까지의 숫자를 손으로 쓴 이미지 데이터로 load_digits() 명령으로 로드할 수 있습니다.\n# - 각 이미지는 0부터 15까지의 명암을 가지는 8 × 8 = 64 픽셀 해상도의 흑백 이미지입니다.\n# - 그리고 해당 이미지가 1,797개가 있습니다.\n# - load_digits()를 통해 이미지 데이터를 로드할 수 있습니다.\n# - 로드한 전체 데이터를 digits에 저장합니다.\n\n\ndigits = load_digits()\nmu.log(\"len(digits.images)\", len(digits.images))\n\nimages_labels = list(zip(digits.images, digits.target))\nsub_sample_size = 20\n\nfor i, (image, label) in enumerate(images_labels[:sub_sample_size]):\n plt.subplot(4, 5, i + 1)\n plt.axis(\"off\")\n plt.imshow(image, cmap=plt.cm.gray_r, interpolation=\"nearest\")\n plt.title(\"label : {}\".format(label))\n\nplt.show()\n\n################################################################################\n# - 다층 퍼셉트론 분류기 만들기\n\n\nmodel = nn.Sequential(\n nn.Linear(64, 32), # input_layer = 64, hidden_layer1 = 32\n nn.ReLU(),\n nn.Linear(32, 16), # hidden_layer2 = 32, hidden_layer3 = 16\n nn.ReLU(),\n nn.Linear(16, 10) # hidden_layer3 = 16, output_layer = 10\n)\n\nmu.log(\"model\", model)\n\nX = digits.data # 이미지. 즉, 특성 행렬\nY = digits.target # 각 이미지에 대한 레이블\n\nmu.log(\"len(X)\", len(X))\nmu.log(\"X[0].shape\", X[0].shape)\nmu.log(\"len(Y)\", len(Y))\nmu.log(\"Y[0].shape\", Y[0].shape)\n\nX = torch.tensor(X, dtype=torch.float32)\nY = torch.tensor(Y, dtype=torch.int64)\n\nloss_fn = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters())\nnb_epochs = 100\nmu.plt_init()\n\nfor epoch in range(nb_epochs + 1):\n y_pred = model(X)\n loss = loss_fn(y_pred, Y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if epoch % 10 == 0:\n accuracy = mu.get_cross_entropy_accuracy(y_pred, Y)\n mu.log_epoch(epoch, nb_epochs, loss, accuracy)\n\nmu.plt_show()\n\nmu.log(\"model\", model)\n\n################################################################################\n# accuracy 측정\n\n\nwith torch.no_grad():\n prediction = model(X)\n accuracy = mu.get_cross_entropy_accuracy(prediction, Y)\n mu.log(\"accuracy\", accuracy)\n\n################################################################################\n# 랜덤 5 항목 테스트\n\n\nfor _ in range(5):\n print(\"-\" * 80)\n r = random.randint(0, len(X) - 1)\n mu.log(\"r\", r)\n X_single_data = X[r:r + 1]\n mu.log(\"X_single_data.shape\", X_single_data.shape)\n Y_single_data = Y[r:r + 1]\n mu.log(\"Y_single_data\", Y_single_data)\n single_prediction = model(X_single_data)\n mu.log(\"single_prediction\", single_prediction)\n single_prediction_res = torch.argmax(single_prediction, 1).item()\n mu.log(\"single_prediction_res\", single_prediction_res)\n mu.plt_img_show(X_single_data.view(8, 8))\n","sub_path":"0607_multi_layer_perceptron_sklearn.py","file_name":"0607_multi_layer_perceptron_sklearn.py","file_ext":"py","file_size_in_byte":3929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"487136406","text":"import argparse\nimport json\n\nimport numpy as np\n\ndef add_arguments(parser):\n parser.add_argument(\"--input_file\", help=\"path to input file\", required=True)\n parser.add_argument(\"--output_file\", help=\"path to output file\", required=True)\n parser.add_argument(\"--answer_threshold\", help=\"threshold of answer\", required=False, default=0.1, type=float)\n\ndef convert_coqa(input_file,\n output_file,\n answer_threshold):\n with open(input_file, \"r\") as file:\n input_data = json.load(file)\n \n output_data = []\n for data in input_data:\n id_items = data[\"qas_id\"].split('_')\n id = id_items[0]\n turn_id = int(id_items[1])\n \n prob_list = [data[\"unk_prob\"], data[\"yes_prob\"], data[\"no_prob\"]]\n answer_list = [\"unknown\", \"yes\", \"no\"]\n \n prob_idx = np.argmax(prob_list)\n if prob_list[prob_idx] >= answer_threshold:\n answer = answer_list[prob_idx]\n if answer == \"yes\" and \"true or false\" in data[\"question_text\"].lower():\n answer = \"true\"\n elif answer == \"no\" and \"true or false\" in data[\"question_text\"].lower():\n answer = \"false\"\n else:\n answer = data[\"predict_text\"]\n \n score = prob_list[prob_idx]\n \n output_data.append({\n \"id\": id,\n \"turn_id\": turn_id,\n \"answer\": answer,\n \"score\": score\n })\n \n with open(output_file, \"w\") as file:\n json.dump(output_data, file, indent=4)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n add_arguments(parser)\n args = parser.parse_args()\n convert_coqa(args.input_file, args.output_file, args.answer_threshold)\n","sub_path":"tool/convert_coqa.py","file_name":"convert_coqa.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"481432412","text":"import tensorflow as tf\nimport numpy as np\n\n\nx = tf.Variable(tf.random_normal([1,2,6,1]))\ny = tf.squeeze(x, axis=[0])\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n sess.run(init)\n\n print(\"x shape\", x.get_shape().as_list())\n print(\"y shape\", y.get_shape().as_list())\n","sub_path":"ga3c/test_tensorflow.py","file_name":"test_tensorflow.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"42402414","text":"#! /usr/bin/env python3\n\nfrom itertools import product\nimport sys\n\ntaxa_mut = [0.005, 0.010]\ntaxa_cruz = [0.95, 0.99]\ntam_pop = [200, 400, 800]\noperador_cruz = [0, 1]\noperador_mut = [0, 1]\nnumero_grupos = 1\n\ncombinacoes = list(product(tam_pop, taxa_cruz, operador_cruz, operador_mut,\n taxa_mut))\nnumero_combinacoes = len(combinacoes)\nprint('Numero de combinacoes: ', numero_combinacoes)\n\ncombinacoes_por_grupo = numero_combinacoes // numero_grupos\nprint('Combinacoes por grupo:', combinacoes_por_grupo)\n\n\ndef getid(c):\n return '.'.join(str(x) for x in c)\n\n\ndef comb2str(i, c):\n return 'ag ' + str(i) + '-' + getid(c) + ' ' + ' '.join(str(x) for x in c)\n\n\nid_grupo = int(sys.argv[1])\ncomeco_intervalo = id_grupo * combinacoes_por_grupo\nfim_intervalo = (id_grupo + 1) * combinacoes_por_grupo\n\nprint('Intervalo: [%d, %d)' % (comeco_intervalo, fim_intervalo))\n\nwith open('restantes.txt', 'w') as f:\n for i, c in enumerate(combinacoes[comeco_intervalo:fim_intervalo],\n comeco_intervalo):\n print(comb2str(i, c), file=f)\n\n","sub_path":"scripts/gerar_configuracoes_ag.py","file_name":"gerar_configuracoes_ag.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"61466472","text":"#!/usr/bin/env python3\n\n# Evaluating Code\n# Clarity first!\n# Maintainability (minimal repetition or dependencies)\n# Consistency (syntax, variable naming)\n# Brevity\n# At higher levels, and after the above:\n# Time efficiency\n# Memory efficiency\n\nclass MaxSizeList(object):\n def __init__(self, maxSize):\n self.l = []\n self.maxSize = maxSize\n\n def push(self, element):\n self.l.append(element)\n if (len(self.l) > self.maxSize):\n self.l.pop(0)\n\n def get_list(self):\n return self.l\n\na = MaxSizeList(3)\nb = MaxSizeList(1)\n\na.push(\"hey\")\na.push(\"hi\")\na.push(\"let's\")\na.push(\"go\")\n\nb.push(\"hey\")\nb.push(\"hi\")\nb.push(\"let's\")\nb.push(\"go\")\n\nprint(a.get_list())\nprint(b.get_list())\n","sub_path":"OOP/008. Assignment 1.py","file_name":"008. Assignment 1.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"453358786","text":"# 2. Посчитать четные и нечетные цифры введенного натурального числа.\n# Например, если введено число 34560, то у него 3 четные цифры (4, 6 и 0)\n# и 2 нечетные (3 и 5).\n\nEVEN_NUMS = 0\nODD_NUMS = 0\n\n\n# ---Цикл---\ndef counting_cycle(a):\n even_nums = 0\n odd_nums = 0\n while a != 0:\n numb = a % 10 # Получаем крайнее правое число\n a = a // 10 # Убираем крайнее правое число\n if numb % 2 == 0:\n even_nums += 1\n else:\n odd_nums += 1\n print(f'Кол-во четных чисел: {even_nums}\\n'\n f'Кол-во нечетных чисел: {odd_nums}')\n\n\n# ---Рекурсия---\ndef counting_req(a):\n global EVEN_NUMS, ODD_NUMS\n if a == 0:\n return print(f'Кол-во четных чисел: {EVEN_NUMS}\\n'\n f'Кол-во нечетных чисел: {ODD_NUMS}')\n numb = a % 10\n if numb % 2 == 0:\n EVEN_NUMS += 1\n else:\n ODD_NUMS += 1\n return counting_req(a // 10)\n\n\nif __name__ == '__main__':\n A = int(input('Введите натуральное число:\\n'))\n counting_cycle(A)\n counting_req(A)\n","sub_path":"lesson_2/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"240227770","text":"import requests\nimport sys\nfrom datetime import datetime, timedelta\n\nGITHUB_SEARCH_REPOS = 'https://api.github.com/search/repositories'\n\n\ndef get_last_week_date():\n today = datetime.today()\n today = datetime.date(today)\n last_week = today - timedelta(days=7)\n last_week = last_week.isoformat()\n return last_week\n\n\ndef get_trending_repositories(top_size):\n last_week = get_last_week_date()\n params = {'q': 'created:>{0}'.format(last_week),\n 'sort': 'stars',\n 'per_page': top_size}\n request = requests.get(GITHUB_SEARCH_REPOS, params=params)\n trending_repos = request.json()\n return trending_repos.get('items')\n\n\ndef output_interesting_repositories(trending_repos, top_size):\n print('\\nTop {0} trending repos ordered by stars:\\n'.format(top_size))\n for repo in trending_repos:\n print('{0} \\nDescription: {1}'.format(repo.get('name'), repo.get('description')))\n print('Stars: {0} \\nOpen issues: {1}'.format(repo.get('stargazers_count'), repo.get('open_issues_count')))\n print('Link: {0} \\n'.format(repo.get('html_url')))\n\n\nif __name__ == '__main__':\n try:\n top_size = int(sys.argv[1])\n except:\n top_size = 100\n list_trending_repos = get_trending_repositories(top_size)\n output_interesting_repositories(list_trending_repos, top_size)\n","sub_path":"github_trending.py","file_name":"github_trending.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"251194998","text":"class Solution:\n def minDistance(self, height, width, tree, squirrel, nuts):\n \"\"\"\n :type height: int\n :type width: int\n :type tree: List[int]\n :type squirrel: List[int]\n :type nuts: List[List[int]]\n :rtype: int\n \"\"\"\n def distance(x, y):\n return abs(x[0] - y[0]) + abs(x[1] - y[1])\n\n\n mindis, first = 999999999, 0\n for i, nut in enumerate(nuts):\n if distance(squirrel, nut) - distance(tree, nut) < mindis:\n mindis = distance(squirrel, nut) - distance(tree, nut)\n first = i\n\n ans = mindis + sum(2 * distance(tree, nut) for i, nut in enumerate(nuts))\n return ans\n","sub_path":"leetcode/573. Squirrel Simulation.py","file_name":"573. Squirrel Simulation.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"217600602","text":"from google.appengine.api import users\nfrom google.appengine.api import channel\n\nimport json\nimport webapp2\nfrom random import randint\n\n\nclass checkCredentials(webapp2.RequestHandler):\n def get(self):\n \n tokensalt = randint(1561, 8644242454)\n mytoken = 'ebiduh' + str(tokensalt)\n token = channel.create_channel(mytoken)\n data = { 'token':token, 'clientID':mytoken }\n self.response.out.write(json.dumps(data))\n \n \nclass doNothing(webapp2.RequestHandler):\n def get(self):\n self.response.out.write('successfully did nothing...')\n \n \n \n\n\napp = webapp2.WSGIApplication([\n ('/app/authenticate', checkCredentials),\n ('/app/donothing', doNothing),\n ],debug=True) ","sub_path":"app/authenticate.py","file_name":"authenticate.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"213776907","text":"from django.forms.models import modelformset_factory\nimport requests\nimport json\n\nfrom django.shortcuts import render\nfrom django.http.response import HttpResponse\nfrom django.forms.formsets import formset_factory\n\nfrom forms import TestRESTForm, TYPE_REQUEST_CHOICES, AddUsersForm\nfrom models import MyUser\n\n\ndef requests_view(request):\n if request.method == 'GET':\n context = {}\n context['form'] = TestRESTForm()\n return render(request, 'rest_app/index.html', context)\n\n elif request.method == 'POST':\n form = TestRESTForm(request.POST)\n context = {}\n info = ''\n if form.is_valid():\n type_request = dict(TYPE_REQUEST_CHOICES).get(int(request.POST.get('type_request')))\n if type_request == 'GET':\n try:\n info = requests.get(request.POST.get('request')).content\n info = json.loads(info)\n info = json.dumps(info, indent=4, sort_keys=True)\n except:\n info = 'Please try again'\n elif type_request == 'POST':\n try:\n if request.POST.get('request').count('?') == 1:\n url, data = request.POST.get('request').split('?')\n data = json.loads(data)\n headers = {'content-type': 'application/json'}\n info = requests.post(url, data=json.dumps(data), headers=headers)\n if str(info) == '':\n info = 'data is changed'\n else:\n info = 'Please try again'\n except:\n info = 'Please try again'\n elif type_request == 'DELETE':\n try:\n url = request.POST.get('request')\n info = requests.delete(url=url)\n if str(info) == '':\n info = 'ID does not exist'\n else:\n info = 'Record is deleted'\n except:\n info = 'Please try again'\n context['info'] = info\n\n context['form'] = form\n data = json.dumps({'info': info, 'errors': str(form.errors)})\n return HttpResponse(data, content_type=\"application/json\")\n\n\ndef add_users_view(request):\n if request.method == 'GET':\n context = {}\n context['form'] = formset_factory(AddUsersForm, extra=5)\n return render(request, 'rest_app/add_users.html', context)\n\n elif request.method == 'POST':\n context = {}\n AddUsersFormset = modelformset_factory(MyUser, form=AddUsersForm, extra=5)\n formset = AddUsersFormset(request.POST)\n if formset.is_valid():\n formset.save()\n context['form'] = formset_factory(AddUsersForm, extra=5)\n return render(request, 'rest_app/add_users.html', context)\n\n","sub_path":"rest_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"382088905","text":"# Import the model class\n# This is just to show how the module works\n\nfrom prosail2 import Prosail\nimport pdb\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom BRDF import est_spec\nimport cProfile\nimport matplotlib.colors as mcolors\n\ndef main():\n #pdb.set_trace()\n # nothing happens, but it gives access to stuff\n p = Prosail()\n wvs = p.wl\n# #def run(self, N, Cab, Car, Cbrown, Cw, Cm, LAI, psoil, hspot, tts, tto, psi, LIDF, outname=None, Py6S=False):\n \n # Common leaf distributions\n Planophile = (1, 0)\n Erectophile = (-1, 0)\n Plagiophile = (0, -1)\n Extremophile = (0, 1)\n Spherical = (-0.35, -0.15)\n \n # psi is the relative azimuth angle\n cnf_template = {'N': 1.5, 'Cab': 40, 'Car': 8, 'Cbrown': 0, 'Cw': 0.01, \\\n 'Cm': 0.009, 'LAI': 3, 'psoil': 0, 'hspot': 0.5, 'tts': 45, 'tto':1, 'psi':30, 'LIDF': Planophile}\n \n cnf_template = {'N': 2.0, 'Cab': 50, 'Car': 10, 'Cbrown': 0.1, 'Cw': 0.02, \\\n 'Cm': 0.015, 'LAI': 5, 'psoil': 0.3, 'hspot': 0.8, 'tts': 30, 'tto': 2, 'psi': 45, 'LIDF': 'Erectophile'}\n \n cnf_template = {'N': 1.5, 'Cab': 45, 'Car': 15, 'Cbrown': 0.2, 'Cw': 0.03, \\\n 'Cm': 0.02, 'LAI': 2, 'psoil': 0.2, 'hspot': 0.5, 'tts': 40, 'tto': 1, 'psi': 60, 'LIDF': 'Spherical'}\n \n cnf_template_corn = {'N': 1.5, 'Cab': 45, 'Car': 8, 'Cbrown': 0.2, 'Cw': 0.03, \\\n 'Cm': 0.02, 'LAI': 2, 'psoil': 0.2, 'hspot': 0.1, 'tts': 45, 'tto': 30, \\\n 'psi': 60, 'LIDFa': Erectophile[0], 'LIDFb': Erectophile[1]}\n\n\n #spc = p.run(cnf_template_corn)\n #spc = p.run(cnf_template)\n #plt.plot(p.wl, spc, label='Corn')\n #plt.savefig('img.jpg')\n \n\n # produce a bunch of different Cab levels:\n # Define colormap\n\n\n ##### CHLOROPHIL\n colors = ['#D2B48C', '#90EE90', '#008000', '#006400']\n cmap = mcolors.LinearSegmentedColormap.from_list('custom_colormap', colors)\n\n # Create a list of Cab values\n Cab_values = np.arange(20, 100, 7.5)\n\n for i, Cab in enumerate(Cab_values):\n color=cmap(i / (len(np.arange(20, 100, 10)) - 1))\n cnf_template_corn['Cab'] = Cab\n spc = p.run(cnf_template_corn)\n plt.plot(p.wl, spc, label=\"Cab = {}\".format(Cab), color=color)\n\n # Create a colorbar\n sm = plt.cm.ScalarMappable(cmap=cmap)\n sm.set_array(Cab_values)\n cbar = plt.colorbar(sm)\n cbar.set_label('Cab')\n\n plt.ylim((0, 0.3))\n plt.grid()\n plt.xlabel('Wavelengths (nm)')\n plt.ylabel('HDRF')\n #plt.title('Spectrum Plot')\n plt.savefig('img.jpg')\n #pdb.set_trace()\n\n plt.figure()\n # Define the custom colormap\n cmap = mcolors.LinearSegmentedColormap.from_list('custom_colormap', ['#964B00', '#00BFFF'])\n\n\n ##### Liquid Water\n # Create a list of Cw values\n Cw_values = np.arange(0.01, 0.1, 0.01)\n\n # Iterate over Cw values\n for i, Cw in enumerate(Cw_values):\n cnf_template_corn['Cw'] = Cw\n spc = p.run(cnf_template_corn)\n plt.plot(p.wl, spc, color=cmap(i / (len(Cw_values) - 1)))\n\n # Create a colorbar\n sm = plt.cm.ScalarMappable(cmap=cmap)\n sm.set_array(Cw_values)\n cbar = plt.colorbar(sm)\n cbar.set_label('Cw')\n plt.ylim((0, 0.3))\n plt.grid()\n plt.xlabel('Wavelengths (nm)')\n plt.ylabel('HDRF')\n #plt.title('Spectrum Plot')\n plt.savefig('Cw.jpg')\n #pdb.set_trace()\n\n\n ###### BROWN PIGMENT\n plt.figure()\n cmap = mcolors.LinearSegmentedColormap.from_list('custom_colormap', ['#008000', '#A52A2A'])\n # Create a list of Cw values\n Cbrown_values = np.arange(0.01, 1, 0.05)\n\n # Iterate over Cw values\n for i, Cbrown in enumerate(Cbrown_values):\n cnf_template_corn['Cbrown'] = Cbrown\n spc = p.run(cnf_template_corn)\n plt.plot(p.wl, spc, color=cmap(i / (len(Cbrown_values) - 1)))\n\n # Create a colorbar\n sm = plt.cm.ScalarMappable(cmap=cmap)\n sm.set_array(Cbrown_values)\n cbar = plt.colorbar(sm)\n cbar.set_label('Cbrown')\n plt.ylim((0, 0.3))\n plt.grid()\n plt.xlabel('Wavelengths (nm)')\n plt.ylabel('HDRF')\n #plt.title('Spectrum Plot')\n plt.savefig('Cbrown.jpg')\n #pdb.set_trace()\n\n #### LAI\n plt.figure()\n cmap = mcolors.LinearSegmentedColormap.from_list('custom_colormap', ['#00FF00', '#006400'])\n\n # Create a list of Cw values\n LAI_values = np.arange(1, 10, 0.5)\n\n # Iterate over Cw values\n for i, LAI in enumerate(LAI_values):\n cnf_template_corn['LAI'] = LAI\n spc = p.run(cnf_template_corn)\n plt.plot(p.wl, spc, color=cmap(i / (len(LAI_values) - 1)))\n\n # Create a colorbar\n sm = plt.cm.ScalarMappable(cmap=cmap)\n sm.set_array(LAI_values)\n cbar = plt.colorbar(sm)\n cbar.set_label('LAI')\n plt.ylim((0,0.3))\n plt.grid()\n plt.xlabel('Wavelengths (nm)')\n plt.ylabel('HDRF')\n #plt.title('Spectrum Plot')\n plt.savefig('LAI.jpg')\n print('done')\n pdb.set_trace()\n\n\n\n\n results = []\n for lidf, typename in zip([Planophile, Erectophile, Plagiophile, Extremophile, Spherical], ['Planophile', 'Erectophile', 'Plagiophile', 'Extremophile', 'Spherical']):\n cnf_template['LIDF'] = lidf\n spc = p.run(cnf_template)\n plt.plot(p.wl, spc, label=typename)\n results.append(spc)\n \n\n # get a single output:\n #profiler = cProfile.Profile()\n #profiler.enable()\n results1 = p.run(cnf_template)\n #profiler.disable()\n #profiler.print_stats()\n #p.run(1.5, 40, 8, 0, 0.01, 0.009, 1, 3, 0.01, 30, 10, 10, p.Planophile)\n #profile_result = cProfile.run('runit()', globals(), locals())\n #print(\"Method return value:\", profile_result)\n pdb.set_trace()\n \n cnf = cnf_template.copy()\n \n var_name = 'N'\n var_range = list(np.arange(0, 5, 0.1))\n #tto_range = list(np.arange(0, 20))\n results = []\n for val in var_range:\n cnf[var_name] = val\n print(cnf[var_name])\n results.append(p.run(cnf))\n\n\n wvs = p.wl\n results = np.array(results).T\n #cnfs = [cnf.update(tts=value) for value in np.arange(30, 60)]\n #plt.plot(wvs, results); plt.show()\n pdb.set_trace()\n try_rho = results[:, 0]\n brdf_adj = est_spec(try_rho, 2, 30, 5, 30, 50, 10)\n plt.plot(wvs, try_rho, wvs, brdf_adj); plt.show()\n #results = p.run(cnf)\n print(results)\n\n # Results ready for use with Py6S\n #results2 = p.run(1.5, 40, 8, 0, 0.01, 0.009, 1, 3, 0.01, 30, 10, 10, p.Planophile, Py6S=True)\n #print(results2)\n pdb.set_trace()\n\n # Use these results with Py6S by running something like:\n # s = SixS()\n # s.ground_reflectance = GroundReflectance.HomogeneousLambertian(results2)\n # s.run()\n\n\ndef runit(p):\n Planophile = (1, 0)\n cnf_template = {'N': 1.5, 'Cab': 40, 'Car': 8, 'Cbrown': 0, 'Cw': 0.01, \\\n 'Cm': 0.009, 'LAI': 1, 'psoil': 3, 'hspot': 0.01, 'tts': 30, 'tto':10, 'psi':10, 'LIDF': Planophile}\n return p.run(cnf_template)\n\n\ndef plotspc(results):\n plt.plot(results[:,0], results[:,1])\n return 1\n\n\n \n return wvs\n\nif __name__=='__main__': main()","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":6970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"408731460","text":"\"\"\"这个例子将展示如何使用AODE分类器.\"\"\"\nimport pandas as pd\nimport classicML as cml\n\nDATASET_PATH = './datasets/西瓜数据集.csv'\nATTRIBUTE_NAME = ['脐部', '色泽', '根蒂', '敲声', '纹理', '触感', '密度', '含糖率']\n\n# 读取��据\ndataframe = pd.read_csv(DATASET_PATH, index_col=0, header=0)\nx = dataframe.iloc[:, :-1]\ny = dataframe.iloc[:, -1].values\ny[y == '是'] = 1\ny[y == '否'] = 0\n# 生成模型\nmodel = cml.AODE(attribute_name=ATTRIBUTE_NAME)\nmodel.compile(smoothing=True)\n# 训练模型\nmodel.fit(x, y)","sub_path":"examples/aode.py","file_name":"aode.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"640935044","text":"test_case = 1\nwhile True:\n dump = input()\n if not dump:\n break\n dump = int(dump)\n wall = list(map(int,input().split()))\n\n for _ in range(dump):\n wall.sort()\n wall[0] += 1\n wall[-1] -= 1\n print(f\"#{test_case} {wall[-1]-wall[0]}\")\n test_case += 1","sub_path":"python_workspace/coding_test/sw_expert_academy/Flatten.py","file_name":"Flatten.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"457539271","text":"from django.http import HttpResponse\r\nfrom django.shortcuts import render\r\nfrom navigation.models import Category\r\nfrom navigation.models import Page\r\nfrom navigation.forms import UserForm, UserProfileForm\r\nfrom django.shortcuts import redirect, reverse\r\nfrom django.contrib.auth import authenticate, login\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.contrib.auth import logout\r\n\r\nfrom datetime import datetime\r\n\r\ndef index(request):\r\n # 查询数据库,获取目前存储的所有分类\r\n # 按点赞次数倒序排列分类\r\n # 获取前 5 个分类(如果分类数少于 5 个,那就获取全部)\r\n # 把分类列表放入 context_dict 字典\r\n # 稍后传给模板引擎\r\n category_list = Category.objects.order_by('-likes')[:5]\r\n context_dict = {'categories': category_list}\r\n # 渲染响应,发给客户端\r\n page_list = Page.objects.order_by('-views')[:5]\r\n context_dict[\"pages\"] = page_list\r\n response = render(request, 'navigation/index.html', context_dict)\r\n # 调用处理 cookie 的辅助函数\r\n visitor_cookie_handler(request, response)\r\n # 返回 response 对象,更新目标 cookie\r\n return response\r\n\r\ndef about(request):\r\n #return HttpResponse(\"龙腾测试!!! 首页\")\r\n return render(request, 'navigation/about.html')\r\n\r\n@login_required\r\ndef show_category(request, category_name_slug):\r\n # 创建上下文字典,稍后传给模板渲染引擎\r\n context_dict = {}\r\n try:\r\n # 能通过传入的分类别名找到对应的分类吗?\r\n # 如果找不到, .get() 方法抛出 DoesNotExist 异常\r\n # 因此 .get() 方法返回一个模型实例或抛出异常\r\n category = Category.objects.get(slug=category_name_slug)\r\n # 检索关联的所有网页\r\n # 注意, filter() 返回一个网页对象列表或空列表\r\n pages = Page.objects.filter(category=category)\r\n # 把得到的列表赋值给模板上下文中名为 pages 的键\r\n context_dict['pages'] = pages\r\n # 也把从数据库中获取的 category 对象添加到上下文字典中\r\n # 我们将在模板中通过这个变量确认分类是否存在\r\n context_dict['category'] = category\r\n except Category.DoesNotExist:\r\n # 没找到指定的分类时执行这里\r\n # 什么也不做\r\n # 模板会显示消息,指明分类不存在\r\n context_dict['category'] = None\r\n context_dict['pages'] = None\r\n # 渲染响应,返回给客户端\r\n return render(request, 'navigation/category.html', context_dict)\r\n\r\ndef register(request):\r\n # 一个布尔值,告诉模板注册是否成功\r\n # 一开始设为 False,注册成功后改为 True\r\n registered = False\r\n # 如果是 HTTP POST 请求,处理表单数据\r\n if request.method == 'POST':\r\n # 尝试获取原始表单数据\r\n # 注意, UserForm 和 UserProfileForm 中的数据都需要\r\n user_form = UserForm(data=request.POST)\r\n profile_form = UserProfileForm(data=request.POST)\r\n # 如果两个表单中的数据是有效的……\r\n if user_form.is_valid() and profile_form.is_valid():\r\n # 把 UserForm 中的数据存入数据库\r\n user = user_form.save()\r\n # 使用 set_password 方法计算密码哈希值\r\n # 然后更新 user 对象\r\n user.set_password(user.password)\r\n user.save()\r\n # 现在处理 UserProfile 实例\r\n # 因为要自行处理 user 属性,所以设定 commit=False\r\n # 延迟保存模型,以防出现完整性问题\r\n profile = profile_form.save(commit=False)\r\n profile.user = user\r\n # 用户提供头像了吗?\r\n # 如果提供了,��表单数据库中提取出来,赋给 UserProfile 模型\r\n if 'picture' in request.FILES:\r\n profile.picture = request.FILES['picture']\r\n # 保存 UserProfile 模型实例\r\n profile.save()\r\n # 更新变量的值,告诉模板成功注册了\r\n registered = True\r\n else:\r\n # 表单数据无效,出错了\r\n # 在终端打印问题\r\n print(user_form.errors, profile_form.errors)\r\n else:\r\n # 不是 HTTP POST 请求,渲染两个 ModelForm 实例\r\n # 表单为空,待用户填写\r\n user_form = UserForm()\r\n profile_form = UserProfileForm()\r\n # 根据上下文渲染模板\r\n return render(request,\r\n 'navigation/register.html',\r\n {'user_form': user_form,\r\n 'profile_form': profile_form,\r\n 'registered': registered})\r\n \r\n\r\ndef user_login(request):\r\n # 如果是 HTTP POST 请求,尝试提取相关信息\r\n if request.method == 'POST':\r\n # 获取用户在登录表单中输入的用户名和密码\r\n # 我们使用的是 request.POST.get('')\r\n # 而不是 request.POST['']\r\n # 这是因为对应的值不存在时,前者返回 None,\r\n # 而后者抛出 KeyError 异常\r\n username = request.POST.get('username')\r\n password = request.POST.get('password')\r\n # 使用 Django 提供的函数检查 username/password 是否有效\r\n # 如果有效,返回一个 User 对象\r\n user = authenticate(username=username, password=password)\r\n # 如果得到了 User 对象,说明用户输入的凭据是对的\r\n # 如果是 None( Python 表示没有值的方式),说明没找到与凭据匹配的用户\r\n if user:\r\n # 账户激活了吗?可能被禁了\r\n if user.is_active:\r\n # 登入有效且已激活的账户\r\n # 然后重定向到首页\r\n login(request, user)\r\n return redirect(reverse('index'))\r\n else:\r\n # 账户未激活,禁止登录\r\n return HttpResponse(\"Your Rango account is disabled.\")\r\n else:\r\n # 提供的登录凭据有问题,不能登录\r\n print(\"Invalid login details: {0}, {1}\".format(username, password))\r\n return HttpResponse(\"Invalid login details supplied.\")\r\n # 不是 HTTP POST 请求,显示登录表单\r\n # 极有可能是 HTTP GET 请求\r\n else:\r\n # 没什么上下文变量要传给模板系统\r\n # 因此传入一个空字典\r\n return render(request, 'navigation/login.html', {})\r\n\r\n# 使用 login_required() 装饰器限制\r\n# 只有已登录的用户才能访问这个视图\r\n@login_required\r\ndef user_logout(request):\r\n # 可以确定用户已登录,因此直接退出\r\n logout(request)\r\n # 把用户带回首页\r\n return redirect(reverse('index'))\r\n\r\n\r\ndef visitor_cookie_handler(request, response):\r\n # 获取网站的访问次数\r\n # 使用 COOKIES.get() 函数读取“visits”cookie\r\n # 如果目标 cookie 存在,把值转换为整数\r\n # 如果目标 cookie 不存在,返回默认值 1\r\n visits = int(request.COOKIES.get('visits', '1'))\r\n last_visit_cookie = request.COOKIES.get('last_visit', str(datetime.now()))\r\n last_visit_time = datetime.strptime(last_visit_cookie[:-7],'%Y-%m-%d %H:%M:%S')\r\n # 如果距上次访问已超过1s……\r\n if (datetime.now() - last_visit_time).seconds > 0:\r\n visits = visits + 1\r\n # 增加访问次数后更新“last_visit”cookie\r\n response.set_cookie('last_visit', str(datetime.now()))\r\n else:\r\n # 设定“last_visit”cookie\r\n response.set_cookie('last_visit', last_visit_cookie)\r\n # 更新或设定“visits”cookie\r\n response.set_cookie('visits', visits)","sub_path":"Chapter-08-code/first_project/navigation/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"99775303","text":"#!/usr/bin/env python\n#\n# gpaeq: GTK EQ editor for PulseAudio's EQ plugin\n# Copyright (C) 2014 Philippe Proulx \n#\n# D-bus communication and EQ computations inspired by Jason Newton's qpaeq; see\n# .\n# Copyright (C) 2009 Jason Newton \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n\n\nfrom gi.repository import Gtk\nfrom gi.repository import Gdk\nfrom gi.overrides import Pango\nimport signal\nimport cairo\n\n\nclass SliderModel:\n def __init__(self, label, value, on_change):\n self._label = label\n self._value = value\n self._on_change = on_change\n self._scroll_incr = 0.01\n\n def set_label(self, label):\n self._label = label\n\n def get_label(self):\n return self._label\n\n def set_value(self, value):\n self._value = value\n if self._on_change:\n self._on_change()\n\n def set_scroll_incr(self, incr):\n self._scroll_incr = incr\n\n def get_scroll_incr(self):\n return self._scroll_incr\n\n def format_label(self):\n return self.get_label()\n\n def format_value(self):\n return '{:03}'.format(round(self.get_value() * 100))\n\n def get_value(self):\n return self._value\n\n def get_mid(self):\n return 0.5\n\n\nclass EqDbSliderModel(SliderModel):\n def __init__(self, hz, db_value, mindb, maxdb, on_change):\n self._mindb = mindb\n self._maxdb = maxdb\n self._hz = hz\n self._mid = EqDbSliderModel._db_value_to_value(0, mindb, maxdb)\n value = EqDbSliderModel._db_value_to_value(db_value, mindb, maxdb)\n SliderModel.__init__(self, str(hz), value, on_change)\n\n @staticmethod\n def _db_value_to_value(db_value, mindb, maxdb):\n return (db_value - mindb) / (maxdb - mindb)\n\n def get_db_value(self):\n return self.get_value() * (self._maxdb - self._mindb) + self._mindb\n\n def set_db_value(self, db_value):\n self.set_value(EqDbSliderModel._db_value_to_value(db_value, self._mindb, self._maxdb))\n\n def format_label(self):\n if self._hz < 1000:\n return str(int(self._hz))\n else:\n return '{:.1f}'.format(self._hz / 1000).rstrip('0').rstrip('.') + 'k'\n\n def format_value(self):\n db_value = self.get_db_value()\n ret = '{}{:.1f}'.format('+' if db_value >= 0 else '', db_value)\n return ret\n\n def get_mid(self):\n return self._mid\n\n\nclass Sliders(Gtk.DrawingArea):\n DEF_SLIDER_WIDTH = 35\n DEF_HANDLE_COLOR = (77 / 255, 224 / 255, 213 / 255)\n DEF_HANDLE_HOVER_COLOR = (214 / 255, 64 / 255, 72 / 255)\n DEF_SLIDER_BAR_COLOR = (0.35, 0.35, 0.35)\n DEF_SLIDER_BAR_HOVER_COLOR = (222 / 255, 96 / 255, 104 / 255)\n DEF_LABEL_COLOR = (0.8, 0.8, 0.8)\n DEF_VALUE_COLOR = (77 / 255, 224 / 255, 213 / 255)\n DEF_VERT_GUIDE_COLOR = (0.18, 0.18, 0.18)\n DEF_SLIDER_GUTTER = 2\n DEF_BORDER_WIDTH = 15\n INFO_HEIGHT = 27\n\n def __init__(self):\n self._pressed = False\n self._locked_x = None\n self._hover_slider_index = None\n self._sliders = []\n\n # default properties\n self._slider_width = Sliders.DEF_SLIDER_WIDTH\n self._handle_color = Sliders.DEF_HANDLE_COLOR\n self._handle_hover_color = Sliders.DEF_HANDLE_HOVER_COLOR\n self._slider_bar_color = Sliders.DEF_SLIDER_BAR_COLOR\n self._slider_bar_hover_color = Sliders.DEF_SLIDER_BAR_HOVER_COLOR\n self._slider_gutter = Sliders.DEF_SLIDER_GUTTER\n self._border_width = Sliders.DEF_BORDER_WIDTH\n self._label_color = Sliders.DEF_LABEL_COLOR\n self._value_color = Sliders.DEF_VALUE_COLOR\n self._vert_guide_color = Sliders.DEF_VERT_GUIDE_COLOR\n\n # precompute stuff\n self._precompute_stuff()\n\n # initialize drawing area\n Gtk.DrawingArea.__init__(self)\n\n # connect stuff\n self.add_events(Gdk.EventMask.BUTTON_PRESS_MASK)\n self.add_events(Gdk.EventMask.BUTTON_RELEASE_MASK)\n self.add_events(Gdk.EventMask.POINTER_MOTION_MASK)\n self.add_events(Gdk.EventMask.LEAVE_NOTIFY_MASK)\n self.add_events(Gdk.EventMask.ENTER_NOTIFY_MASK)\n self.add_events(Gdk.EventMask.POINTER_MOTION_MASK)\n self.add_events(Gdk.EventMask.SCROLL_MASK | Gdk.EventMask.SMOOTH_SCROLL_MASK)\n self.connect('draw', self._on_draw)\n self.connect('button-press-event', self._on_button_press)\n self.connect('button-release-event', self._on_button_release)\n self.connect('motion-notify-event', self._on_motion)\n self.connect('scroll-event', self._on_scroll)\n self.connect('enter-notify-event', self._on_enter)\n self.connect('leave-notify-event', self._on_leave)\n self.connect('configure-event', self._on_configure)\n\n def set_slider_width(self, width):\n if width < 1:\n raise RuntimeError('slider width < 1')\n self._slider_width = width\n self._precompute_stuff()\n self.queue_draw()\n\n def set_handle_color(self, color):\n self._handle_color = color\n self.queue_draw()\n\n def set_handle_hover_color(self, color):\n self._handle_hover_color = color\n self.queue_draw()\n\n def set_slider_bar_color(self, color):\n self._slider_bar_color = color\n self.queue_draw()\n\n def set_slider_bar_hover_color(self, color):\n self._slider_bar_hover_color = color\n self.queue_draw()\n\n def set_label_color(self, color):\n self._label_color = color\n self.queue_draw()\n\n def set_value_color(self, color):\n self._value_color = color\n self.queue_draw()\n\n def set_vert_guide_color(self, color):\n self._vert_guide_color = color\n self.queue_draw()\n\n def set_slider_gutter(self, gutter):\n if gutter < 0:\n raise RuntimeError('gutter width < 0')\n self._slider_gutter = gutter\n self._precompute_stuff()\n self.queue_draw()\n\n def set_border_width(self, width):\n if width < 0:\n raise RuntimeError('border width < 0')\n self._border_width = width\n self._precompute_stuff()\n self.queue_draw()\n\n def get_width_for_nb_sliders(self, nb):\n # width is: number of sliders * single slider width + borders\n return nb * self._slider_width + 2 * self._border_width\n\n def get_nb_fitting_sliders(self):\n alloc = self.get_allocation()\n width = alloc.width - 2 * self._border_width\n return width // self._slider_width\n\n def _get_size(self):\n # size allocated to me\n alloc = self.get_allocation()\n\n # width will depend on sliders in fact\n width = self._slider_width * len(self._sliders) + 2 * self._border_width\n height = (alloc.height // 2) * 2\n\n return width, height\n\n def _get_sliders_size(self):\n # total width/height\n width, height = self._get_size()\n\n # remove borders from width\n width -= 2 * self._border_width\n\n # remove borders from height\n height -= 2 * self._border_width\n\n # remove info from height\n height -= self._border_width\n height -= Sliders.INFO_HEIGHT\n\n return width, height\n\n def _get_slider_index_at_x(self, x):\n width, height = self._get_sliders_size()\n x = round(x)\n if x < 0 or x >= width:\n return None\n return x // self._slider_width\n\n def _get_slider_at_x(self, x):\n index = self._get_slider_index_at_x(x)\n return None if index is None else self._sliders[index]\n\n def set_sliders(self, sliders):\n self._sliders = sliders\n self.queue_draw()\n\n def _translate_xy(self, x, y):\n return x - self._border_width, y - self._border_width\n\n def _get_handle_width(self):\n return self._slider_width - self._slider_gutter\n\n def _get_handle_height(self):\n return round(self._get_handle_width() / 4)\n\n def _get_slider_bar_width(self):\n return round(self._get_handle_width() / 2)\n\n def _precompute_stuff(self):\n width, height = self._get_sliders_size()\n off = self._border_width\n hw = self._get_handle_width()\n hh = self._get_handle_height()\n self._handle_padding = round(hh / 2)\n self._mi = self._handle_padding + off\n self._ma = height - self._handle_padding + off\n self._mid = round((height / 2) + off)\n self._sbw = self._get_slider_bar_width()\n self._offset_sb = round((hw - self._sbw) / 2)\n\n def _draw_slider(self, cr, slider_index):\n # size\n width, height = self._get_sliders_size()\n\n # hover?\n is_hover = False\n if self._hover_slider_index is not None:\n is_hover = (slider_index == self._hover_slider_index)\n\n # colors\n hfg = self._handle_color\n hhfg = self._handle_hover_color\n sbg = self._slider_bar_color\n sbhg = self._slider_bar_hover_color\n lfg = self._label_color\n vfg = self._value_color\n vgfg = self._vert_guide_color\n\n # handle dimensions\n hw = self._get_handle_width()\n hh = self._get_handle_height()\n\n # get slider\n slider = self._sliders[slider_index]\n\n # get X\n x = self._border_width + self._slider_width * slider_index\n\n # center and top Y of handle\n ma_mi_diff = self._ma - self._mi\n center_y = self._ma - round(slider.get_value() * ma_mi_diff)\n top_y = center_y - self._handle_padding\n\n # vertical guide\n cr.set_source_rgb(vgfg[0], vgfg[1], vgfg[2])\n cr.set_line_width(1)\n nx = x + round(hw / 2)\n cr.move_to(nx, self._mi)\n cr.line_to(nx, self._ma)\n cr.stroke()\n\n # slider bar\n mid = self._ma - round(slider.get_mid() * ma_mi_diff)\n cr.set_source_rgb(sbg[0], sbg[1], sbg[2])\n if is_hover:\n cr.set_source_rgb(sbhg[0], sbhg[1], sbhg[2])\n cr.rectangle(x + self._offset_sb, mid, self._sbw, center_y - mid)\n cr.fill()\n\n # handle\n cr.set_source_rgb(hfg[0], hfg[1], hfg[2])\n if is_hover:\n cr.set_source_rgb(hhfg[0], hhfg[1], hhfg[2])\n cr.rectangle(x, top_y, hw, hh)\n cr.fill()\n\n # label\n cr.select_font_face('sans-serif')\n cr.set_font_size(9)\n cr.move_to(x + 1, height + 3 * self._border_width)\n cr.set_source_rgb(lfg[0], lfg[1], lfg[2])\n cr.show_text(slider.format_label())\n\n # value\n value = slider.format_value()\n cr.select_font_face('Fixed', cairo.FONT_SLANT_NORMAL,\n cairo.FONT_WEIGHT_NORMAL)\n cr.set_font_size(9)\n cr.set_source_rgb(vfg[0], vfg[1], vfg[2])\n if is_hover:\n cr.set_source_rgb(hhfg[0], hhfg[1], hhfg[2])\n cr.move_to(x + 1, height + 3 * self._border_width + 12)\n cr.show_text(value)\n\n def _draw_sliders(self, cr, clip_rect):\n # do we even have sliders?\n if len(self._sliders) == 0:\n return\n\n # now we will only redraw the sliders that are affected by the clip region\n clip_x = clip_rect.x\n clip_w = clip_rect.width\n t_x = clip_x - self._border_width\n if t_x < 0:\n first_index = 0\n else:\n first_index = self._get_slider_index_at_x(t_x)\n last_index = self._get_slider_index_at_x(t_x + clip_w - 1)\n if last_index is None:\n last_index = len(self._sliders) - 1\n indexes = range(first_index, last_index + 1)\n\n # draw\n for slider_index in indexes:\n self._draw_slider(cr, slider_index)\n\n def _on_draw(self, drawing_area, cr):\n do_draw, clip_rect = Gdk.cairo_get_clip_rectangle(cr)\n if not do_draw:\n return False\n\n self._draw_sliders(cr, clip_rect)\n\n return False\n\n def _update_slider_at_pos(self, x, y):\n width, height = self._get_sliders_size()\n padding = self._get_handle_height() / 2\n x = round(x)\n y = round(y)\n mi = padding\n ma = height - padding\n if x < 0 or x >= width:\n return\n if y < mi:\n y = mi\n if y >= ma:\n y = ma\n\n # set slider's new value\n value = (ma - y) / (ma - mi)\n slider = self._get_slider_at_x(x)\n slider.set_value(value)\n\n def _redraw_slider_index(self, index):\n width, height = self._get_size()\n clip_x = self._border_width + index * self._slider_width\n clip_w = self._slider_width\n self.queue_draw_area(clip_x, 0, clip_w, height)\n\n def _on_button_press(self, widget, ev):\n x, y = self._translate_xy(ev.x, ev.y)\n if ev.button != 1:\n return\n self._locked_x = None\n if ev.state & Gdk.ModifierType.CONTROL_MASK:\n self._locked_x = x\n self._pressed = True\n self._update_slider_at_pos(x, y)\n index = self._get_slider_index_at_x(x)\n if index is not None:\n self._redraw_slider_index(index)\n\n def _on_button_release(self, widget, ev):\n if ev.button != 1:\n return\n x, y = self._translate_xy(ev.x, ev.y)\n self._locked_x = None\n self._pressed = False\n index = self._get_slider_index_at_x(x)\n if index is not None:\n self._redraw_slider_index(index)\n\n def _on_motion(self, widget, ev):\n x, y = self._translate_xy(ev.x, ev.y)\n if self._locked_x:\n x = self._locked_x\n cur_slider_index = self._get_slider_index_at_x(x)\n if cur_slider_index is None:\n if self._hover_slider_index is not None:\n self._redraw_slider_index(self._hover_slider_index)\n self._hover_slider_index = None\n return\n if self._hover_slider_index is not None:\n if cur_slider_index != self._hover_slider_index:\n # undraw old hover\n self._redraw_slider_index(self._hover_slider_index)\n self._hover_slider_index = None\n\n # update value?\n if self._pressed:\n self._update_slider_at_pos(x, y)\n\n # redraw slider\n self._hover_slider_index = cur_slider_index\n self._redraw_slider_index(cur_slider_index)\n\n def _on_scroll(self, widget, ev):\n y_scroll = ev.get_scroll_deltas()[2]\n x, y = self._translate_xy(ev.x, ev.y)\n slider = self._get_slider_at_x(x)\n if slider is None:\n return\n value = slider.get_value()\n if y_scroll < 0:\n value += slider.get_scroll_incr()\n if value > 1:\n value = 1\n elif y_scroll > 0:\n value -= slider.get_scroll_incr()\n if value < 0:\n value = 0\n slider.set_value(value)\n index = self._get_slider_index_at_x(x)\n if index is not None:\n self._redraw_slider_index(index)\n\n def _on_configure(self, widget, ev):\n self._precompute_stuff()\n\n def _on_enter(self, widget, ev):\n pass\n\n def _on_leave(self, widget, ev):\n if self._pressed:\n return\n if self._hover_slider_index is not None:\n self._redraw_slider_index(self._hover_slider_index)\n self._hover_slider_index = None\n\n\nclass EqWindow(Gtk.Window):\n INITIAL_NB_SLIDERS = 16\n GUTTER = 15\n MIN_HEIGHT = 300\n DEF_SLIDERS_BG_COLOR = (0.1, 0.1, 0.1)\n\n def __init__(self):\n # init parent window\n Gtk.Window.__init__(self, title=\"gpaeq\")\n\n # UI and stuff\n self._make_me_nice()\n self._init_ui()\n\n # signals\n self._on_resize_handle = self.connect('check-resize', self._on_resize)\n\n def _make_me_nice(self):\n self.set_position(Gtk.WindowPosition.CENTER)\n self.set_border_width(EqWindow.GUTTER)\n self.set_icon_from_file('res/equalizer.png')\n\n def _init_ui(self):\n # vbox for top and bottom\n self._vbox = Gtk.VBox(homogeneous=False, spacing=EqWindow.GUTTER)\n self.add(self._vbox)\n\n # init sliders\n self._init_top()\n self._init_bottom()\n\n def _init_top(self):\n # box\n self._top_hbox = Gtk.HBox(homogeneous=False, spacing=EqWindow.GUTTER)\n self._vbox.pack_start(self._top_hbox, False, False, 0)\n\n # list of sinks\n self._init_sinks_combo()\n\n def _init_bottom(self):\n # box for preamp and EQ sliders\n self._sliders_ev_box = Gtk.EventBox()\n self._sliders_hbox = Gtk.HBox(homogeneous=False, spacing=0)\n self._sliders_ev_box.add(self._sliders_hbox)\n def_color = EqWindow.DEF_SLIDERS_BG_COLOR\n bg_color = Gdk.Color(red=def_color[0] * 65535,\n blue=def_color[1] * 65535,\n green=def_color[2] * 65535)\n self._sliders_ev_box.modify_bg(Gtk.StateType.NORMAL, bg_color)\n self._vbox.pack_start(self._sliders_ev_box, True, True, 0)\n\n # init sliders\n self._init_preamp_slider()\n self._init_eq_sliders()\n\n def _init_sinks_combo(self):\n # label\n lbl = Gtk.Label(label='Sink:')\n lbl.modify_font(Pango.FontDescription('sans-serif 9'))\n self._top_hbox.pack_start(lbl, False, False, 0)\n\n # combo\n self._sinks_store = Gtk.ListStore(str)\n self._sinks_store.append(['alsa_output.pci-0000_01_00.1.hdmi-stereo'])\n self._sinks_store.append(['alsa_output.pci-0000_00_14.2.analog-stereo'])\n self._sinks_combo = Gtk.ComboBox.new_with_model(self._sinks_store)\n renderer_text = Gtk.CellRendererText()\n self._sinks_combo.modify_font(Pango.FontDescription('sans-serif 8'))\n self._sinks_combo.pack_start(renderer_text, True)\n self._sinks_combo.add_attribute(renderer_text, \"text\", 0)\n self._sinks_combo.set_active(0)\n self._top_hbox.pack_start(self._sinks_combo, False, False, 0)\n\n def _init_preamp_slider(self):\n self._preamp_sliders = Sliders()\n preamp_slider = EqDbSliderModel(0, 0, -12, 12, lambda: None)\n preamp_slider.set_scroll_incr(0.005)\n sliders_width = self._preamp_sliders.get_width_for_nb_sliders(1)\n self._preamp_sliders.set_sliders([preamp_slider])\n self._preamp_sliders.set_size_request(sliders_width, EqWindow.MIN_HEIGHT)\n self._sliders_hbox.pack_start(self._preamp_sliders, False, True, 0)\n\n def _init_eq_sliders(self):\n self._eq_sliders = Sliders()\n sliders = self._new_sliders(EqWindow.INITIAL_NB_SLIDERS)\n sliders_width = self._eq_sliders.get_width_for_nb_sliders(EqWindow.INITIAL_NB_SLIDERS)\n self._eq_sliders.set_sliders(sliders)\n self._eq_sliders.set_size_request(sliders_width, EqWindow.MIN_HEIGHT)\n self._sliders_hbox.pack_start(self._eq_sliders, True, True, 0)\n\n def _new_sliders(self, nb):\n sliders = []\n for i in range(nb):\n slider = EqDbSliderModel(i / nb * 20000, 0, -12, 12, lambda: None)\n slider.set_scroll_incr(0.005)\n sliders.append(slider)\n return sliders\n\n def _on_resize(self, window):\n nb_sliders = self._eq_sliders.get_nb_fitting_sliders()\n sliders = self._new_sliders(nb_sliders)\n self._eq_sliders.set_sliders(sliders)\n\n\ndef main():\n # enable Ctrl+C\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n # main app\n win = EqWindow()\n win.connect('delete-event', Gtk.main_quit)\n win.show_all()\n Gtk.main()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"gpaeq.py","file_name":"gpaeq.py","file_ext":"py","file_size_in_byte":20155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"57139124","text":"import json\nimport sys\nimport requests\nimport pprint\nimport urllib\n\nrequests.packages.urllib3.disable_warnings()\n\n\nclass SecurityCenterAPI: \n url = \"https://10.14.226.13\"\n username = \"\"\n password = \"\"\n token = ''\n cookie = ''\n\n def __init__(self): \n self.data = {}\n \n def set_url(self, url):\n self.url = url\n\t\t\n def build_url(self, restCall):\n \"\"\" Formats the SC URL with the rest API call\"\"\"\n return '{0}{1}'.format(self.url, restCall)\n\n def connect(self, method, resource, data=None, headers=None, cookies=None):\n \"\"\" The connect method is used to connect to SC and pass our API calls.\"\"\"\n if headers is None:\n headers = {'Content-type': 'application/json',\n 'X-SecurityCenter': str(self.token)}\n if data is not None:\n data = json.dumps(data)\n\n if method == \"POST\":\n r = requests.post(self.build_url(resource), data=data, headers=headers, cookies=self.cookie,\n verify=False)\n elif method == \"DELETE\":\n r = requests.delete(self.build_url(resource), data=data, headers=headers, cookies=self.cookie,\n verify=False)\n elif method == 'PATCH':\n r = requests.patch(self.build_url(resource), data=data, headers=headers, cookies=self.cookie,\n verify=False)\n else:\n r = requests.get(self.build_url(resource), data=data, headers=headers, cookies=self.cookie,\n verify=False)\n\n if r.status_code != 200:\n e = r.json()\n print(e['error_msg'])\n sys.exit()\n\n return r\n\n\n def login(self, uname, pword):\n \"\"\" Logs into SecurityCenter and retrieves our token and cookie.\n We create a seperate header here since we do not have a X-SecurityCenter token yet.\"\"\"\n headers = {'Content-Type':'application/json'}\n login = {'username': uname, 'password':pword}\n self.username = uname;\n self.password = pword;\n\n # We use the connect function and pass it a POST method, /rest/token resource,\n # and our login credentials as data. We also pass our headers from above for this function.\n # if the credentials fails to get token sys.exit will be called. \n data = self.connect('POST', '/rest/token', data=login, headers=headers)\n\n # We can pull the cookie out of our data object and store it as a variable.\n self.cookie = data.cookies\n\n # We can alo pull our token out from the returned data as well.\n self.token = data.json()['response']['token']\n return (self.cookie, self.token)\n\n # ------ UNCOMMENT THE CODE BELOW TO ENABLE THE FUNCTION. THIS WAS LEFT IN FOR REFERENCE. ------ #\n # ------ LINES WITH '##' ARE COMMENTS, YOU DO NOT NEED TO UNCOMMENT THOSE LINES. ------ #\n def get_assets(self):\n # Initiate an empty asset list.\n assets = []\n\n # Use the connect function with a GET method and /rest/asset resource.\n data = self.connect('GET', '/rest/asset')\n\n # Store the manageable assets in the results variable.\n results = data.json()['response']['manageable']\n\n # If results is empty, there are no manageable assets and the script exits.\n if not results:\n sys.exit(\"This user has no managed assets.\")\n else:\n # For each asset in our results file, append the asset ID to our asset list.\n for i in results:\n assets.append(i['id'])\n return assets\n\n def get_asset_by_id(self, id):\n #Get the asset group by its id. The ID should be a number\n data = self.connect('GET', '/rest/asset/{0}'.format(id))\n \n results = data.json()['response'];\n \n if not results:\n sys.exit(\"no managed assets\")\n else: \n return results;\n \n def update_hosts_by_asset_id(self, id, hosts_ips):\n #Post the hosts private IPs to the asset identified by ID\n #hosts_ips is an array of ips. \n patch_records = {'definedIPs' : ', '.join(hosts_ips)};\n \n data = self.connect('PATCH', '/rest/asset/{0}'.format(id), patch_records)\n results = data.json()['response'];\n \n if not results:\n sys.exit(\"No response from patch operation\");\n else:\n return results;\n \n def get_analysis_by_id(self, scanId): \n #Post the hosts with a commmand to get analysis by scanID. \n #scanID is an integer of the scan. \n\n begin_offset = 0;\n end_offset = 50;\n totalRecords = 50;\n totalRecordsIsValid = False;\n allAnalysisRecords = [];\n scanIDStr = str(scanId)\n\n\n while (begin_offset < totalRecords):\n query_data = {\n \"query\": {\n \"createdTime\":0,\n \"modifiedTime\":0,\n \"groups\":[],\n \"type\":\"vuln\",\n \"tool\":\"sumid\",\n \"sourceType\":\"individual\",\n \"startOffset\":begin_offset,\n \"endOffset\":end_offset,\n \"filters\":[],\n \"sortColumn\":\"severity\",\n \"sortDirection\":\"desc\",\n \"scanID\": scanIDStr, \n \"view\": \"all\"\n },\n \"sourceType\": \"individual\",\n \"scanID\": scanIDStr,\n \"sortField\": \"severity\",\n \"sortDir\": \"desc\",\n \"columns\":[],\n \"type\":\"vuln\"\n };\n\n data = self.connect('POST', '/rest/analysis', query_data);\n results = data.json()['response'];\n\n if (totalRecordsIsValid == False): \n #update totalRecords count once and only once\n totalRecords = results['totalRecords']\n totalRecords = int(totalRecords)\n totalRecordsIsValid = True; \n #print 'totalRecords: ' + totalRecords\n\n returnedRecordsCount = results['returnedRecords']\n #print 'returnedRecordsCount: ' + str(returnedRecordsCount);\n\n returnedRecords = results['results'];\n #print 'first record: ' + str(returnedRecords[0])\n allAnalysisRecords.extend(returnedRecords);\n begin_offset += returnedRecordsCount; \n #print 'begin_offset: ' + str(begin_offset)\n end_offset += returnedRecordsCount;\n #print 'end_offset: ' + str(end_offset)\n\n if not results: \n sys.exit(\"No response from patch operation\");\n \n return allAnalysisRecords\n\n def get_respository_fields(self): \n # this function apparently pulls the repository data. \n # this data will subsequently be used to construct a statement for acceptRiskRule API\n \n query_string = { 'fields' : 'name,description,type,dataFormat,modifiedTime,vulnCount,ipCount,typeFields'};\n encoded_query_string = urllib.urlencode(query_string)\n data = self.connect('GET', '/rest/repository'+ '?' + encoded_query_string);\n results = data.json()['response']\n\n return results;\n\n\n def acceptRiskSingleItem(self, pluginId, comments, expiration_date, hostType, name, repositories): \n query_data = {\n \"comments\": comments,\n \"expires\": expiration_date, #mockup, the real value is the epoch time of the date. \n \"hostType\": \"all\", #mockup\n #\"name\": \"RHEL-06-000019 - There must be no .rhosts or hosts.equiv files on the system - ~/.rhosts.\", #mockup \n \"name\": name,\n \"newSeverity\": {\n \"id\": 3\n },\n \"plugin\": {\n \"id\": str(pluginId)\n },\n \"port\": \"0\",\n \"protocol\": 6,\n \"repositories\": repositories\n }\n\n data = self.connect('POST', '/rest/acceptRiskRule', query_data)\n\n result = data.json()['response']\n\n return result;\n\n def postAcceptRiskSingleItem(self, query_data): \n data = self.connect('POST', '/rest/acceptRiskRule', query_data);\n result = data.json()['response'];\n return result;\n\n def transformRepositoriesForAcceptRisk(self, resposRawData):\n transformedReposArray = [];\n for repo in resposRawData : \n transformedRepo = {\n \"context\": \"\",\n \"correlation\": [],\n \"createdTime\": None, #-1 is for forever, transform to EpochTime for date/month/year\n \"dataFormat\": \"IPv4\",\n \"description\": repo[\"description\"],\n \"id\": repo[\"id\"],\n \"ipRange\": repo[\"typeFields\"][\"ipRange\"],\n \"modifiedTime\": repo[\"modifiedTime\"],\n \"name\": repo[\"name\"],\n \"organizations\": [],\n \"status\": None,\n \"trendWithRaw\": repo[\"typeFields\"][\"trendWithRaw\"],\n \"trendingDays\": repo[\"typeFields\"][\"trendingDays\"],\n \"type\": repo[\"type\"]\n }\n\n transformedReposArray.append(transformedRepo);\n\n return transformedReposArray\n\n \n #todo: take one line single item of vulnerabilty, get the respositories, fill it in the vuln request, \n # get the date, put in the date. \n","sub_path":"SC5API.py","file_name":"SC5API.py","file_ext":"py","file_size_in_byte":9418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"320438446","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 6 13:44:15 2018\n\n@author: hashemk\n\"\"\"\n\nfrom Aligner import Aligner\nfrom Neoantigen import Neoantigen\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\n\n\n\ndef immuno_app():\n\n \n test_xml_fname = '../pyFitnessNeoantigen/neoantigens_AL4602_iedb.xml'\n neo_fname = '/Users/hashemk/Desktop/Fitness_Model/SupplementaryDataFile7/InputData/neoantigens_Rizvi.txt'\n aln_dname = '/Users/hashemk/Desktop/Fitness_Model/SupplementaryDataFile7/InputData/neoantigen_alignments_Rizvi'\n fitness_output_fname = '/Users/hashemk/Desktop/Fitness_Model/SupplementaryDataFile7/MyTestOutput/neoantigens_Rizvi_fitness.txt'\n a = 26\n k = 4.86936\n eps = 1e-5\n \n \n [neoantigens, samples] = read_neoantigens(neo_fname)\n# [neoantigens, samples] = read_neoantigens_old(neo_fname)\n \n aligner = Aligner()\n for s in samples:\n xml_path = aln_dname + \"/neoantigens_\" + s + \"_iedb.xml\"\n# print(xml_path)\n aligner.read_blastp_xml(xml_path)\n aligner.compute_R(a= a, k=k)\n \n \n \n \n output_f = open(fitness_output_fname, 'w')\n header = ['NeoantigenID', 'Mutation', 'Sample', 'mtPosition', 'ResidueChangeClass', 'mtPeptide', 'wtPeptide', 'Excluded', 'A', \n 'R', 'Recognition_Potential', 'WtToMt_R_Ratio', 'allele', 'HLA']\n header = '\\t'.join(header)\n output_f.write(header+'\\n')\n \n for nid in neoantigens.keys():\n neo = neoantigens[nid]\n W = neo.get_weight() # excludes neoantigens that mutated from nonhydorphobic residue on position 2 or 9\n A = neo.get_A()\n mtpeptide = neo.mtPeptide #mutant peptide\n wtpeptide = neo.wtPeptide\n neo_mut_full_id = 'MUT_' + str(nid) + '_' + neo.coord \n neo_wt_full_id = 'WT_' + str(nid) + '_' + neo.coord \n \n \n R_mt = aligner.get_R(neo_mut_full_id) \n R_wt = aligner.get_R(neo_wt_full_id)\n fitness = A * R_mt * W\n \n A_wt_to_A_mt_log_ratio = np.log10( (R_wt + eps) / (R_mt+ eps) ) # eps to prevent dividing by zeros\n \n \n residue_change = neo.residue_change\n w = neo.get_weight()\n excluded = 1-w\n l = [nid, neo.coord, neo.sample, neo.position, residue_change, mtpeptide, wtpeptide, excluded, A, R_mt, \n fitness, A_wt_to_A_mt_log_ratio, neo.allele, neo.HLA]\n l = '\\t'.join(map(lambda s: str(s), l))\n output_f.write(l+'\\n')\n output_f.close()\n \n \n\n\n\ndef read_neoantigens(neo_fname):\n neoantigens = dict()\n neo_df = pd.read_csv(neo_fname, sep='\\t')\n print(neo_df.shape)\n \n # filter out rows for which the column 7 (MT.Score) is not defined\n neo_df = neo_df[neo_df.iloc[: , 7] != None]\n num_neos = neo_df.shape[0]\n for i in range(num_neos):\n one_row = np.array(neo_df.iloc[i, :])\n neoantigen = Neoantigen(one_row)\n neoantigens[neoantigen.id] = neoantigen\n neoantigen.set_A()\n samples = set(map(lambda neo: neo.get_sample_name(), neoantigens.values()))\n return([neoantigens, samples])\n\n \ndef read_neoantigens_odl(neo_fname):\n neoantigens = dict()\n \n f = open(neo_fname)\n header = f.readline()\n htab = header.strip().split('\\t')\n print(htab)\n \n hdict = dict()\n \n for i in range(0, len(htab)):\n hdict[htab[i]] = i\n line = f.readline()\n while line:\n line = line.strip()\n nparams = line.split('\\t')\n if nparams[7] == 'NA':\n line = f.readline()\n continue\n neoantigen = Neoantigen(nparams)\n neoantigens[neoantigen.id] = neoantigen\n neoantigen.set_A()\n line = f.readline()\n f.close()\n samples = set(map(lambda neo: neo.get_sample_name(), neoantigens.values())) \n \n return([neoantigens, samples])\n \n\nif __name__ == '__main__':\n immuno_app()","sub_path":"pyEpitope/pyImmunogenecity/immunogenecity_main.py","file_name":"immunogenecity_main.py","file_ext":"py","file_size_in_byte":3908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"138350536","text":"num = []\na = 0\nohno = input(\"To find the sum and average of a string of numbers please key in the number seperated by a space: \")\nnum = ohno.split()\nsum = 0\ncount = 0\nfor i in range(len(num)):\n a = int(num[i])\n sum += a\n count += 1\navg = sum/count\nprint(\"The sum of number is: {}, the average is: {:.2f}\".format(sum,avg)) \n","sub_path":"Early Assignments/A2/A2Q1.py","file_name":"A2Q1.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"540322398","text":"\nimport argparse\nfrom ctapipe.utils import get_dataset_path\n\nfrom lstchain.reco import dl0_to_dl1\nfrom lstchain.reco import dl1_to_dl2\nfrom lstchain.visualization import plot_dl2\nimport matplotlib.pyplot as plt\nimport os\n\nparser = argparse.ArgumentParser(description = \"Train Random Forests.\")\n\n# Required argument\nparser.add_argument('--gammafile', '-fg', type=str,\n dest='gammafile',\n help='path to the dl1 file of gamma events for training',\n )\n\nparser.add_argument('--protonfile', '-fp', type=str,\n dest='protonfile',\n help='path to the dl1 file of proton events for training',\n )\n\nparser.add_argument('--storerf', '-srf', action='store', type=bool,\n dest='storerf',\n help='Boolean. True for storing trained RF in 3 files'\n 'Deafult=False, any user input will be considered True',\n default=False)\n\nparser.add_argument('--datafile', '-f', type=str,\n dest='datafile',\n help='path to the file with simtelarray events',\n default=get_dataset_path('gamma_test_large.simtel.gz'))\n\nparser.add_argument('--storeresults', '-s', action='store', type=bool,\n dest='storeresults',\n help='Boolean. True for storing the reco dl2 events'\n 'Default=False, any user input will be considered True',\n default=False)\n\n# Optional arguments\nparser.add_argument('--opath', '-om', action='store', type=str,\n dest='path_models',\n help='Path to store the resulting RF',\n default='./results/')\n\nparser.add_argument('--outdir', '-or', action='store', type=str,\n dest='outdir',\n help='Path where to store the reco dl2 events',\n default='./results/')\n\nargs = parser.parse_args()\n\nif __name__ == '__main__':\n \n #Train the models\n \n features = ['intensity',\n 'time_gradient',\n 'width',\n 'length',\n 'wl',\n 'phi',\n 'psi']\n\n reg_energy, reg_disp, cls_gh = dl1_to_dl2.build_models(args.gammafile,\n args.protonfile,\n features,\n save_models=args.storerf,\n path_models=args.path_models,\n )\n\n #Get out the data from the Simtelarray file:\n \n data = dl0_to_dl1.get_events(args.datafile, False)\n\n \n #Apply the models to the data\n dl2 = dl1_to_dl2.apply_models(data, features, cls_gh, reg_energy, reg_disp)\n \n if args.storeresults==True:\n #Store results\n if not os.path.exists(args.outdir):\n os.mkdir(args.outdir)\n outfile = args.outdir + \"/dl2_events.hdf5\"\n dl2.to_hdf(outfile, key=\"dl2_events\", mode=\"w\")\n\n #Plot some results\n \n plot_dl2.plot_features(dl2)\n plt.show()\n plot_dl2.plot_E(dl2)\n plt.show()\n plot_dl2.plot_disp(dl2)\n plt.show()\n plot_dl2.plot_pos(dl2)\n plt.show()\n\n","sub_path":"scripts/lstpipe.py","file_name":"lstpipe.py","file_ext":"py","file_size_in_byte":3376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"200205703","text":"\"\"\"\nUtility Classes\n\"\"\"\n\nimport sys\nimport collections\n\n# From https://github.com/benjaminp/six/blob/master/six.py\nPY2 = sys.version_info[0] == 2\nPY3 = sys.version_info[0] == 3\n\nstring_types = str\nfrom io import StringIO\n\n# https://stackoverflow.com/questions/16176742/python-3-replacement-for-deprecated-compiler-ast-flatten-function\n\n\ndef flatten(x):\n result = []\n\n for el in x:\n if isinstance(x, collections.Iterable) and not isstr(el):\n result.extend(flatten(el))\n else:\n result.append(el)\n return result\n\n\ndef isstr(s):\n return isinstance(s, str)\n\n\nclass _KwargParser:\n \"\"\"\n Helper function to emulate Python 3 keyword-only arguments.\n\n Use as::\n\n def func(x1, **kwargs):\n kw = KwargParser('func', kwargs)\n a = kw.pop('a')\n b = kw.pop('b', 2)\n kw.reject_remaining()\n ...\n\n To emulate the Python 3 syntax::\n\n def func(x1, *, a, b=2):\n ...\n \"\"\"\n def __init__(self, func_name, kwargs):\n self._func_name = func_name\n self._kwargs = kwargs\n\n def pop(self, arg_name, *default):\n try:\n return self._kwargs.pop(arg_name, *default)\n except KeyError:\n pass\n raise TypeError(\n '{}() missing required keyword-only argument {!r}'\n .format(self._func_name, arg_name)\n )\n\n def reject_remaining(self):\n if self._kwargs:\n # match the error message to what Python 3 produces\n bad_arg = next(iter(self._kwargs))\n raise TypeError(\n '{}() got an unexpected keyword argument {!r}'\n .format(self._func_name, bad_arg)\n )\n","sub_path":"galgebra/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"444415935","text":"import pip\nimport pkg_resources\nimport click\nimport sys\n\nfrom floyd.log import logger as floyd_logger\nfrom conda import cli as conda_cli\n\n\nPROJECT_NAME = \"floyd-cli\"\n\n\ndef pip_upgrade():\n pip.main([\"install\", \"--upgrade\", PROJECT_NAME])\n\n\ndef conda_upgrade():\n conda_cli.main(\"install\", \"-y\", \"-c\", \"floydhub\", \"-c\", \"conda-forge\", \"floyd-cli\")\n\n\n@click.command()\ndef version():\n \"\"\"\n Prints the current version of the CLI\n \"\"\"\n version = pkg_resources.require(PROJECT_NAME)[0].version\n floyd_logger.info(version)\n\n\n@click.command()\ndef upgrade():\n \"\"\"\n Upgrade floyd command line\n \"\"\"\n try:\n if 'conda' in sys.version or 'ontinuum' in sys.version:\n conda_upgrade()\n else:\n pip_upgrade()\n except Exception as e:\n floyd_logger.error(e)\n","sub_path":"floyd/cli/version.py","file_name":"version.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"6240073","text":"from django.conf.urls import url\nfrom possiblebug.views import PublisherAutocomplete, AuthorAutocomplete\nfrom possiblebug import views\n\nurlpatterns = [\n\n\n url(\n r'^author-autocomplete/$',\n AuthorAutocomplete.as_view(),\n name='author-autocomplete',\n ),\n url(\n r'^publisher-autocomplete/$',\n PublisherAutocomplete.as_view(),\n name='publisher-autocomplete',\n ),\n\n url(r'^test/$', views.add_new_book, name=\"test\"),\n\n]","sub_path":"test_project/possiblebug/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"482928025","text":"# -*- coding: utf-8 -*-\nfrom PyQt5 import QtWidgets, QtGui, QtCore\nfrom MainWindow import Ui_MainWindow\n\nclass View(QtWidgets.QMainWindow):\n\n # Конструктор, створює базове представлення та пов'язує\n # Інтерфейс с Пред'явником\n def __init__(self, Presenter, Model, parent=None):\n super(QtWidgets.QMainWindow, self).__init__(parent)\n self.Presenter = Presenter\n self.Model = Model\n # Створення базового GUI\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n self.ui.tableCoef.cellChanged.connect(self.validate_cell)\n self.ui.tableFree.cellChanged.connect(self.validate_cell)\n self.ui.comboVarNumber.currentIndexChanged.connect(self.Presenter.change_varnum)\n self.ui.buttonGenerate.clicked.connect(self.Presenter.generate)\n self.ui.buttonSolveSystem.clicked.connect(self.Presenter.solve)\n self.n = 6\n self.ui.buttonSolveSystem.setDisabled(True)\n\n # Метод, що змінює Представлення у разі зміни моделі\n def model_changed(self, n=None, A=None, b=None):\n # Зміна таблиць та шрифту, якщо змінено розмір\n if n:\n self.ui.tableCoef.setRowCount(n)\n self.ui.tableCoef.setColumnCount(n)\n self.ui.tableFree.setRowCount(n)\n self.ui.tableVariables.setRowCount(n)\n self.n = n\n if n == 2:\n self.font = QtGui.QFont(\"Arial\", 27)\n elif n == 3:\n self.font = QtGui.QFont(\"Arial\", 24)\n elif n == 4:\n self.font = QtGui.QFont(\"Arial\", 21)\n elif n == 5:\n self.font = QtGui.QFont(\"Arial\", 18)\n elif n == 6:\n self.font = QtGui.QFont(\"Arial\", 15)\n # Заміна елементів у разі зміни СЛАУ (A і b)\n # Зміна шрифту у разі зміни розміру\n for i in range(self.n):\n if b:\n item = QtWidgets.QTableWidgetItem(str(b[i]))\n self.ui.tableFree.setItem(i, 0, item)\n else:\n item = self.ui.tableFree.item(i, 0)\n if item and n:\n item.setFont(self.font)\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n for j in range(self.n):\n if A:\n item = QtWidgets.QTableWidgetItem(str(A[i][j]))\n self.ui.tableCoef.setItem(i, j, item)\n else:\n item = self.ui.tableCoef.item(i, j)\n if item and n:\n item.setFont(self.font)\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n\n # Перевірка на коректність введених даних\n # У разі помилки виводить повідомлення\n # Інакше передає управління Пред'явнику\n def validate_cell(self, row, column):\n table_sender = self.sender()\n table_name = 0 if table_sender.columnCount() == 1 else 1\n item = table_sender.item(row, column)\n def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n if item:\n text = item.text()\n text = text.strip()\n if text == '':\n item.setText('')\n self.ui.buttonSolveSystem.setDisabled(True)\n self.Presenter.clear_variables()\n elif not is_number(text):\n QtWidgets.QMessageBox.warning(self, \"Помилка вводу!\", \"У матриці мають бути лише числа!\")\n item.setText('')\n self.ui.buttonSolveSystem.setDisabled(True)\n self.Presenter.clear_variables()\n else:\n self.Presenter.change_cell(row, column, table_name, float(text))\n item.setFont(self.font)\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n item.setText(text)\n\n # Встановлення таблиці змінних і інформації про час виконання\n def set_variables(self, X, eps, t):\n self.ui.tableVariables.setMaximumWidth(80 + eps * 15)\n for i in range(self.n):\n item = QtWidgets.QTableWidgetItem(str(round(X[i], eps)))\n item.setFont(self.font)\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.ui.tableVariables.setItem(i, 0, item)\n t = str(round(t * 100, 3))\n self.ui.labelInfo.setText(\"Система розв'язана за \" + t + \" мс.\")\n\n\n\n\n","sub_path":"View.py","file_name":"View.py","file_ext":"py","file_size_in_byte":4794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"198039685","text":"def create_intervals(data):\n \"\"\"\n Create a list of intervals out of set of ints.\n \"\"\"\n if len(data) == 0:\n return []\n if len(data) == 1:\n return [data[0],data[0]]\n out = []\n d = sorted(data)\n\n curr_lower = d[0]\n curr_upper = 0\n for i in range(1, len(d)):\n if d[i]-1 == d[i-1]:\n curr_upper = d[i]\n if i == len(d)-1:\n out.append((curr_lower, curr_upper))\n else:\n out.append((curr_lower, d[i-1]))\n curr_lower = d[i]\n if i == len(d)-1:\n out.append((curr_lower, d[-1]))\n\n return sorted(list(set(out)))\n\n\n# if __name__ == '__main__':\n# # These \"asserts\" using only for self-checking and not necessary for auto-testing\n# assert create_intervals({1, 2, 3, 4, 5, 7, 8, 12}) == [\n# (1, 5), (7, 8), (12, 12)], \"First\"\n# assert create_intervals({1, 2, 3, 6, 7, 8, 4, 5}) == [(1, 8)], \"Second\"\n# print('Almost done! The only thing left to do is to Check it!')\n\nprint(create_intervals([]), \"WHAT SHOULD BE THE RESULT\")\nprint(create_intervals({1, 2, 3, 4, 5, 7, 8, 12}), [(1, 5), (7, 8), (12, 12)])\nprint(create_intervals({1, 2, 3, 6, 7, 8, 4, 5}), [(1, 8)])\n","sub_path":"python/create-intervals.py","file_name":"create-intervals.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"73701353","text":"\"\"\" prinzipiell völlig richtig, lösbar in Zeit aber nur durch Einbau von C \"\"\"\nfrom collections import deque\nfrom sys import stdin, stdout\ninput = stdin.readline\nprin = stdout.write\nt = int (input ())\nfor _ in range (t):\n n, m = map (int, input ().split ())\n a = deque ([0]) + deque ([int (x) for x in input ().split ()])\n d = deque ([-1 for _ in range (len (a))])\n b = deque ([0]) + deque ([int (x) for x in input ().split ()])\n for i in range (1, m + 1):\n for j in range (i, n + 1, i):\n if a [j] < b [i] and d [j] == -1: d [j] = i\n d.popleft ()\n for h in d:\n prin (str (h) + \"\\n\")\n\"\"\"\nimport subprocess\nimport os\nfrom sys import stdin,stdout\nwith open(\"code.c\",\"w\") as f:\n f.write(r'''\n #include\n #include\n int main()\n {\n int t;\n scanf(\"%d\",&t);\n while(t--)\n {\n int n,m;\n scanf(\"%d%d\",&n,&m);\n int *arr = (int *)malloc(sizeof(int)*(n+1)),*brr = (int *)malloc(sizeof(int)*(m+1));\n int *crr = (int *)malloc(sizeof(int)*(n+1));\n for(int i=1;i<=n;i++)\n {\n scanf(\"%d\",&arr[i]);\n crr[i]=-1;\n }\n for(int i=1;i<=m;i++)\n {\n scanf(\"%d\",&brr[i]);\n }\n for(int i=1;i<=m;i++)\n {\n for(int j=i;j<=n;j+=i)\n if(brr[i]>=arr[j] && crr[j]==-1)\n crr[j]=i;\n }\n for(int i=1;i<=n;i++)\n printf(\"%d\\n\",crr[i]);\n free(arr);\n free(brr);\n free(crr);\n }\n \n return 0;\n }\n ''')\nsubprocess.check_output(['gcc','code.c','-o','code'])\nprocess = subprocess.Popen(['./code'],stdin=stdin,stdout=subprocess.PIPE)\nprint(process.communicate()[0].decode())\n\"\"\"\n","sub_path":"hackercup_quali/hackerearth/health_of_person.py","file_name":"health_of_person.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"531128551","text":"def how_many_days(year_start, year_end):\n\tif year_start > year_end: return 0\n\tdef add_up(year, d):\n\t\tr = year % d - 1\n\t\tif r < 0: return r + d\n\t\telse: return r\n\ttotal_years = year_end - year_start + 1\n\tfour_div_years = (total_years + add_up(year_start, 4)) // 4\n\th_div_years = (total_years + add_up(year_start, 100)) // 100\n\tfh_div_years = (total_years + add_up(year_start, 400)) // 400\n\tlunar_years = four_div_years - h_div_years + fh_div_years\n\treturn (total_years - lunar_years) * 365 + lunar_years * 366\n\nimport sys\nr = sys.stdin.readline\n\ncy, cm, cd = map(int, r().split())\nny, nm, nd = map(int, r().split())\n\nif ny - cy >= 1000:\n\tif nm > cm or nm == cm and nd > cd or nm == cm and nd == cd:\n\t\tprint(\"gg\")\n\t\tsys.exit(0)\n\ndays_dict = {\n\t1:31,\n\t2:28,\n\t3:31,\n\t4:30,\n\t5:31,\n\t6:30,\n\t7:31,\n\t8:31,\n\t9:30,\n\t10:31,\n\t11:30,\n\t12:31,\n}\n\ndef is_lunar(year):\n\tif how_many_days(year, year) == 366:\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef date_value_inclusive(y, m, d):\n\tbef = how_many_days(1, y-1)\n\tfor i in range(1,m): bef += days_dict[i]\n\tif m > 2 and is_lunar(y):\n\t\tbef += 1\n\treturn bef + d\n\ndef one_day_small(y,m,d):\n\tif d == 1:\n\t\tif m == 1:\n\t\t\ty -= 1\n\t\t\tm, d = 12, 31\n\t\telse:\n\t\t\tm -= 1\n\t\t\tif m == 2 and is_lunar(y): d = 29\n\t\t\telse: d = days_dict[m]\n\telse:\n\t\td -= 1\n\treturn y, m, d\n\t\ncy, cm, cd = one_day_small(cy, cm, cd)\nny, nm, nd = one_day_small(ny, nm, nd)\nprint(\"D-{}\".format(date_value_inclusive(ny,nm,nd)-date_value_inclusive(cy,cm,cd)))","sub_path":"1000/01308_baekjoon.py","file_name":"01308_baekjoon.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"292661097","text":"class QueryBuilder:\n def __init__(self, query, strings):\n \"\"\"Constructor of the class. Creates the attributes of the class and checks\n if the constraints are respected (see doc).\n Inputs :\n :query: A list containing strings. Corresponds to the query the user wants to do.\n This arguments must match the constraints of the \"Sparse Arrays\" problem (see doc).\n :strings: A list containing strings. Corresponds to the array the user wants to search into.\n This arguments must match the constraints of the \"Sparse Arrays\" problem (see doc).\n \"\"\"\n\n # Checks if the constraints are respected for the two inputs\n for arg_name, arg_value in {\"query\": query, \"strings\": strings}.items():\n # List type constraint\n if not isinstance(arg_value, (list, tuple)):\n raise TypeError(\"The argument \" + arg_name + \" has type \" + str(type(arg_value)) + \".\"\n + \" List or tuple was expected.\")\n if not 1 <= len(arg_value) <= 1000:\n raise TypeError(\"The argument \" + arg_name + \" has length \" + str(len(arg_value)) + \".\"\n + \" Expected a length between 1 and 1000.\")\n\n for i in range(len(arg_value)):\n # String type constraint\n if type(arg_value[i]) is not str:\n raise TypeError(\"Index \" + str(i) + \" of \" + arg_name\n + \" has type \" + str(type(arg_value[i])) + \".\"\n + \" String was expected.\")\n # Length of the strings constraint\n if not 1 <= len(arg_value[i]) <= 20:\n raise ValueError(\"Index \" + str(i) + \" of \" + arg_name\n + \" has \" + str(len(arg_value[i])) + \" characters.\"\n + \" Expected a length between 1 and 20.\")\n\n\n # Set the attributes\n self.query = query\n self.strings = strings\n\n def search(self):\n\n \"\"\"Searches for the number of occurrences of each queries in the strings attribute.\n Returns :\n :query_result: A dictionary with {string_query : string_occurrence}\n \"\"\"\n\n query_results = dict()\n\n for q in self.query:\n # Computes the number of occurrence of the query q in the input collection\n occurrence = self.strings.count(q)\n query_results[q] = occurrence\n\n return query_results\n","sub_path":"app/sparse_arrays.py","file_name":"sparse_arrays.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"245226759","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport warnings\nimport textract\nimport traceback\nimport extractEntities as entity\nfrom gensim.summarization import summarize\nimport PyPDF2\nimport jsonGetCategory as skills\nfrom extract_exp import ExtractExp\nfrom striprtf.striprtf import rtf_to_text\nfrom pathlib import Path\nimport json\nimport boto3\nfrom time import gmtime, strftime\nimport shutil\n\nfrom functools import partial\nimport dask\nfrom dask.diagnostics import ProgressBar\nimport numpy as np\nfrom multiprocessing import Process,Manager\n\n\nwarnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')\n\nglobal rootpath\nglobal bucket_name\nbucket_name = 'resume-rank-bucket' \nrootpath = \"resume-rank-bucket\"\nglobal pathSeprator\npathSeprator = '/'\n\nclass ResultElement:\n def __init__(self, jd, filename,totalExp, phoneNo, email, exp,\n finalRank,skills,nonTechskillList,min_qual,is_min_qual,candidateName,isJobTitlePresent,badWords):\n self.jd = jd\n self.filename = filename\n self.totalExp = totalExp\n self.phoneNo = phoneNo\n self.email = email\n self.exp = exp\n self.finalRank = finalRank\n self.primarySkills = skills\n self.softSkills = nonTechskillList\n self.min_qual = min_qual\n self.is_min_qual = is_min_qual\n self.candidateName = candidateName\n self.isJobTitlePresent = isJobTitlePresent\n self.badWords = badWords\n \n def toJSON(self):\n return json.dumps(self, default=lambda o: o.__dict__, \n sort_keys=True, indent=4)\n\ndef getfilepath(loc):\n temp = str(loc)\n temp = temp.replace('\\\\', '/')\n return temp\n\ndef _s3_download(s3,bucket_name,path_to_read_file,i):\n try:\n s3.Bucket(bucket_name).download_file(i,path_to_read_file)\n \n except Exception as e:\n print(e)\n\ndef threaded_process(resume_chunk,final_path,jobfile,skillset,min_qual,jd_exp,resumePath,flask_return,must_have_skill,job_title,\n jd_weightage,skill_weightage,min_qual_weightage,non_tech_weightage,exp_weightage,soft_skill=\"\",programming_skill=\"\"):\n\n not_found = 'Not Found'\n extract_exp = ExtractExp()\n\n \n for count,j in enumerate(resume_chunk):\n resume_text = \"\"\n temp_path = j.rsplit('/',1)\n i = final_path+pathSeprator+temp_path[1]\n Temp = i.rsplit('.',-1)\n \n if Temp[1] == \"pdf\" or Temp[1] == \"Pdf\" or Temp[1] == \"PDF\":\n try:\n Temp_pdf = [] \n with open(i,'rb') as pdf_file:\n \n read_pdf = PyPDF2.PdfFileReader(pdf_file,strict=False)\n # page = read_pdf.getPage(0)\n # page_content = page.extractText()\n # Resumes.append(Temp_pdf)\n\n number_of_pages = read_pdf.getNumPages()\n for page_number in range(number_of_pages): \n\n page = read_pdf.getPage(page_number)\n page_content = page.extractText()\n page_content = page_content.replace('\\n', ' ')\n # page_content.replace(\"\\r\", \"\")\n Temp_pdf = str(Temp_pdf) + str(page_content)\n # Temp_pdf.append(page_content)\n # print(Temp_pdf)\n resume_text = [Temp_pdf]\n Temp_pdf = ''\n \n except Exception as e: \n print(e)\n print(traceback.format_exc())\n \n elif Temp[1] == \"rtf\" or Temp[1] == \"Rtf\" or Temp[1] == \"RTF\":\n \n try:\n \n rtf_path = Path(i)\n with rtf_path.open() as source:\n docText = rtf_to_text(source.read())\n \n c = [docText]\n resume_text = c\n \n except Exception as e: print(e)\n \n elif Temp[1] == \"docx\" or Temp[1] == \"Docx\" or Temp[1] == \"DOCX\":\n try:\n a = textract.process(i)\n a = a.replace(b'\\n', b' ')\n a = a.replace(b'\\r', b' ')\n b = str(a)\n c = [b]\n resume_text = c\n except Exception as e: print(e)\n \n elif Temp[1] == \"txt\" or Temp[1] == \"Txt\" or Temp[1] == \"TXT\":\n try:\n f = open(i,'r')\n lines = f.readlines()\n a = \"\\n\".join(lines)\n c = [str(a)]\n resume_text = c\n f.close()\n except Exception as e: print(e) \n \n elif Temp[1] == \"ex\" or Temp[1] == \"Exe\" or Temp[1] == \"EXE\":\n print(\"This is EXE\" , i)\n pass\n\n temptext = str(resume_text).lower()\n tttt = str(resume_text).lower()\n \n \n try:\n if(skills.dndResume(temptext,must_have_skill)):\n continue\n try:\n tttt = summarize(tttt, word_count=100)\n except Exception:\n continue\n jd_rankDict = skills.JDkeywordMatch(jobfile+skillset, temptext, jd_weightage)\n \n badWords = skills.word_polarity(temptext)\n \n min_qual_score = skills.minQualificationScore(temptext,min_qual,min_qual_weightage)\n confidence = {}\n score = int((min_qual_score/min_qual_weightage)*100)\n confidence['confidence'] = score\n if score >= 60:\n confidence['min qual'] = 'Yes'\n elif score < 60 and score > 0:\n confidence['min qual'] = 'May Be'\n else:\n confidence['min qual'] = 'No'\n is_min_qual = confidence\n \n \n resume_skill_list = skills.skillSetListMatchedWithJD(temptext.lower(),jobfile+skillset,skill_weightage,programming_skill)\n experience = extract_exp.get_features(temptext)\n temp_applicantName = entity.extractPersonName(temptext)\n bool_jobTitleFound = entity.isJobTitleAvailable(job_title, temptext)\n temp_phone = entity.extract_phone_numbers(temptext)\n if(len(temp_phone) == 0):\n Resume_phoneNo_vector = not_found\n else:\n Resume_phoneNo_vector = list(set(temp_phone))\n temp_email = entity.extract_email_addresses(temptext)\n if(len(temp_email) == 0):\n Resume_email_vector = not_found\n else:\n Resume_email_vector = list(set(temp_email))\n \n \n Resume_exp_vector = extract_exp.get_exp_weightage(str(jd_exp),experience,exp_weightage)\n \n non_tech_Score = skills.NonTechnicalSkillScore(temptext,jobfile+skillset,non_tech_weightage)\n Resume_non_skill_list = skills.nonTechSkillSetListMatchedWithJD(temptext,jobfile+skillset,non_tech_Score,soft_skill)\n \n final_rating = jd_rankDict.get('rank')+resume_skill_list.get('rank')+non_tech_Score+extract_exp.get_exp_weightage(str(jd_exp),experience,exp_weightage)+min_qual_score\n \n res = ResultElement(jd_rankDict,j,experience,Resume_phoneNo_vector,Resume_email_vector,\n Resume_exp_vector,round(final_rating),resume_skill_list,\n Resume_non_skill_list,min_qual_score,is_min_qual,temp_applicantName,bool_jobTitleFound,badWords)\n flask_return.append(res)\n \n except Exception:\n print(traceback.format_exc())\n\n\ndef res(jobfile,skillset,jd_exp,min_qual, job_title,input_json,aws_path,must_have_skill, s3_resource, fs, bucket_name,soft_skill=\"\",programming_skill=\"\"):\n\n LIST_OF_FILES = []\n LIST_OF_FILES_PDF = []\n LIST_OF_FILES_DOC = []\n LIST_OF_FILES_DOCX = []\n s3 = boto3.resource('s3')\n root_path='temp/'\n jd_weightage = input_json[\"weightage\"][\"jd\"]\n skill_weightage = input_json[\"weightage\"][\"skill\"]\n min_qual_weightage = input_json[\"weightage\"][\"minimum_qualification\"]\n non_tech_weightage = input_json[\"weightage\"][\"soft_skill\"]\n \n exp_weightage = 0\n if (str(input_json[\"weightage\"][\"experience\"][\"required\"]).lower() == 'true'):\n exp_weightage = input_json[\"weightage\"][\"experience\"][\"allocation\"]\n \n resumePath = bucket_name+pathSeprator+aws_path+pathSeprator+'Upload-Resume'\n \n #print('length of resume list is ', len(resume_name_inS3))\n \n for file in fs.glob(resumePath+'/*.pdf'):\n LIST_OF_FILES_PDF.append(file)\n for file in fs.glob(resumePath+'/*.doc'):\n LIST_OF_FILES_DOC.append(file)\n for file in fs.glob(resumePath+'/*.docx'):\n LIST_OF_FILES_DOCX.append(file)\n for file in fs.glob(resumePath+'/*.rtf'):\n LIST_OF_FILES_DOCX.append(file)\n for file in fs.glob(resumePath+'/*.txt'):\n LIST_OF_FILES_DOCX.append(file) \n\n LIST_OF_FILES = LIST_OF_FILES_DOC + LIST_OF_FILES_DOCX + LIST_OF_FILES_PDF\n print(\"Resume File list size \",len(LIST_OF_FILES))\n \"\"\" here we are creating the directory under temp folder\"\"\"\n sub_dir = aws_path.split(pathSeprator)[0]\n final_path = root_path+sub_dir+strftime(\"%H%M%S\", gmtime())\n if not os.path.exists(final_path):\n os.makedirs(final_path)\n print(\"directory created\",final_path)\n \n print(\"Resume download process starts\")\n dask.config.set(scheduler='threads', num_workers=20)\n _download = partial(_s3_download, s3,bucket_name)\n delayed_futures = [] \n for count,i in enumerate(LIST_OF_FILES):\n i = i.replace(bucket_name+pathSeprator, \"\")\n head, fileName = os.path.split(i)\n path_to_read_file = final_path+pathSeprator+fileName\n delayed_futures.append(dask.delayed(_download)(path_to_read_file,i))\n with ProgressBar():\n dask.compute(*delayed_futures) \n\n flask_return = []\n \n n_threads = 5\n inputFileSize = len(LIST_OF_FILES)\n if inputFileSize == 1 or inputFileSize == 2:\n n_threads=1\n elif inputFileSize == 3 or inputFileSize == 4 or inputFileSize == 5:\n n_threads=2\n \n array_chunk = np.array_split(LIST_OF_FILES, n_threads)\n procs = []\n print(\"Resume processing started...\")\n with Manager() as manager:\n flask_return = manager.list()\n for thr in range(n_threads):\n # print(name)\n proc = Process(target=threaded_process, args=(array_chunk[thr],final_path,jobfile,skillset,min_qual,jd_exp,resumePath,flask_return,must_have_skill,job_title,jd_weightage,skill_weightage,min_qual_weightage,non_tech_weightage,exp_weightage,soft_skill,programming_skill))\n procs.append(proc)\n proc.start()\n \n for proc in procs:\n proc.join()\n \n flask_return = list(flask_return)\n try:\n shutil.rmtree(final_path, ignore_errors=True)\n except:\n print(\"unable to delete directory \",final_path)\n\n return flask_return","sub_path":"jsoncore.py","file_name":"jsoncore.py","file_ext":"py","file_size_in_byte":11056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"38732379","text":"from django.shortcuts import render, get_object_or_404, redirect\r\nfrom django.http import HttpResponse, HttpResponseRedirect\r\nfrom App.forms import NewStudentForm, StudentUpdateForm\r\nfrom App.models import Student\r\nfrom django.urls import reverse\r\nfrom django.contrib import messages\r\nfrom django.contrib.auth.decorators import login_required\r\n\r\n# our homepage.\r\ndef index(request):\r\n context = {\"user\": request.user, \"state\": True}\r\n return render(request, \"App/index.html\", context)\r\n\r\n\r\n# get all student records.\r\n@login_required\r\ndef all_students(request):\r\n if request.method == \"GET\":\r\n students = Student.objects.all()\r\n if students:\r\n context = {\"data\": students, \"state\": False}\r\n return render(request, \"App/allStudents.html\", context)\r\n else:\r\n context = {\"message\": \"No student records found\", \"option\": \"add\"}\r\n return render(request, \"App/404.html\", context)\r\n else:\r\n context = {\"message\": \"POST is not allowed!\"}\r\n return render(request, \"App/404.html\", context)\r\n\r\n\r\n# adds a new student record to the database.\r\n@login_required\r\ndef add_student(request):\r\n if request.method == \"POST\":\r\n student_form = NewStudentForm(request.POST)\r\n if student_form.is_valid():\r\n cd = student_form.cleaned_data\r\n # std = get_object_or_404(Student,Registration=cd['Registration'])\r\n if Student.objects.filter(Registration=cd[\"Registration\"]).exists():\r\n\r\n # get the instance to send to error page\r\n student = Student.objects.get(Registration=cd[\"Registration\"])\r\n\r\n context = {\"message\": \"Student already exists!\", \"data\": student}\r\n return render(request, \"App/404.html\", context)\r\n else:\r\n student_form.save()\r\n student_data = Student.objects.get(Registration=cd[\"Registration\"])\r\n # print(\r\n # \"{} : {}\".format(student_data.FirstName, student_data.Registration)\r\n # )\r\n context = {\"data\": student_data}\r\n return render(request, \"App/studentDetails.html\", context)\r\n else:\r\n context = {\"message\": \"Invalid form! Try to add again.\"}\r\n return render(request, \"App/404.html\", context)\r\n else:\r\n return HttpResponse(\"GET is not allowed!\")\r\n\r\n\r\n# updates our student records.\r\n@login_required\r\ndef update_student(request, slug):\r\n if request.method == \"POST\":\r\n student_update_form = StudentUpdateForm(request.POST)\r\n if student_update_form.is_valid():\r\n cd = student_update_form.cleaned_data\r\n Student.objects.filter(slug=slug).update(\r\n FirstName=cd[\"FirstName\"],\r\n SecondName=cd[\"SecondName\"],\r\n Registration=cd[\"Registration\"],\r\n Hostel=cd[\"Hostel\"],\r\n LaptopSerialNumber=cd[\"LaptopSerialNumber\"],\r\n ) \r\n # send message to front-end using dajngo messages frmaework.\r\n messages.info(request, \"{} {} updated successfully!\".format(cd[\"FirstName\"],cd[\"SecondName\"]))\r\n return redirect(\"index\")\r\n\r\n else:\r\n context = {\"message\": \"Form submitted is invalid\"}\r\n return render(request, \"App/404.html\", context)\r\n else:\r\n context = {\"message\": \"GET is not allowed!\"}\r\n return render(request, \"App/404.html\", context)\r\n\r\n\r\n# deletes student instance.\r\n@login_required\r\ndef delete_student(request, reg):\r\n student = Student.objects.get(slug=reg)\r\n student.delete()\r\n messages.info(request, \"{} deleted successfully!\".format(student.FirstName))\r\n return redirect(\"index\")\r\n\r\n\r\n# searches databases for student with unipue slug.\r\n@login_required\r\ndef search_student(request, reg):\r\n if request.method == \"GET\":\r\n try:\r\n student = Student.objects.get(slug__exact=reg)\r\n if student:\r\n context = {\"data\": student, \"state\": False}\r\n return render(request, \"App/studentDetails.html\", context)\r\n except:\r\n context = {\"message\": \"Student not found\"}\r\n return render(request, \"App/404.html\", context)\r\n else:\r\n context = {\"message\": \"POST is not allowed!\"}\r\n return render(request, \"App/404.html\", context)\r\n\r\n","sub_path":"HGMS/App/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"76260081","text":"\n\nfrom xai.brain.wordbase.verbs._complain import _COMPLAIN\n\n#calss header\nclass _COMPLAINING(_COMPLAIN, ):\n\tdef __init__(self,): \n\t\t_COMPLAIN.__init__(self)\n\t\tself.name = \"COMPLAINING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"complain\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_complaining.py","file_name":"_complaining.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"381855715","text":"\"\"\"\nCircle class that represents circle objects and handles exceptions\nCreated Spring 2019\nHomework 10\n@author: Ethan Walters (emw45)\n\"\"\"\n\nimport sys\nimport math\nimport turtle\n\n\nclass Circle:\n \"\"\"Initialize the Circle class constructor\"\"\"\n def __init__(self, x=0, y=0, radius=100, color='black', filled=False, window=turtle.Screen(),\n pen=turtle.Turtle()):\n if radius > 0:\n self.x = x\n self.y = y\n self.center = (x, y)\n self.radius = radius\n self.color = color\n self.filled = filled\n self.window = window\n self.pen = pen\n else:\n raise ValueError\n\n def __str__(self):\n \"\"\"Create a string method to print the Circle's current state and values\"\"\"\n\n return 'X Position: %s\\nY Position: %s\\nCenter Position: %s\\nRadius: %s' \\\n '\\nColor: %s\\nFilled: %s\\nArea: %s\\nCircumference: %s'\\\n % (self.x, self.y, self.center, self.radius, self.color, self.filled, self.get_area(),\n self.get_circumference())\n\n def get_area(self):\n \"\"\"Create an accessor method to get the area of the circle\"\"\"\n\n return round(math.pi * self.radius ** 2, 2)\n\n def get_circumference(self):\n \"\"\"Create an accessor method to get the circumference of the circle\"\"\"\n\n return round(2 * math.pi * self.radius, 2)\n\n def modify_radius(self, delta):\n \"\"\"Create a mutator method set the circle radius\"\"\"\n\n self.radius = delta\n\n def overlaps(self):\n \"\"\"Create a method to define which circles overlap\"\"\"\n\n distance = math.sqrt((self.x - self.x1) ** 2 + (self.y - self.y1) ** 2)\n radius1 = 130\n radii_sum = self.radius + radius1\n if distance < radii_sum:\n return True\n else:\n return False\n\n def render(self):\n \"\"\"Create a render method to draw the circle\"\"\"\n\n self.pen.hideturtle()\n self.pen.penup()\n self.pen.goto(self.center)\n self.pen.pendown()\n self.pen.circle(self.radius)\n self.window.exitonclick()\n\n\nif __name__ == '__main__':\n try:\n # Circle with sufficient values (positive radius)\n c1 = Circle(56, 34, 67, 'orange', False)\n c1.render()\n # Circle with insufficient values (negative radius)\n c2 = Circle(34, 67, -35, 'red', True)\n c2.render()\n except ValueError:\n print('Radius of circle must be greater than 0')\n sys.exit(-1)\n","sub_path":"homework10/circle.py","file_name":"circle.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"402203527","text":"import psycopg2, os, psycopg2.pool, psycopg2.extras, time\n\nclass Repo(object):\n pool = psycopg2.pool.ThreadedConnectionPool(5, 20, os.environ.get(\"DATABASE_URL\"))\n\n @classmethod\n def insert(klass, replay):\n conn = klass.pool.getconn()\n cursor = conn.cursor()\n\n query = \"INSERT INTO replays (id, url, version, map, played_at, players) values (%s, %s, %s, %s, %s, %s)\"\n arguments = [\n replay.id(),\n replay.url,\n replay.version(),\n replay.map,\n time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\", replay.played_at),\n psycopg2.extras.Json(map(lambda player: player.to_dict(), replay.players))\n ]\n\n cursor.execute(query, arguments)\n conn.commit()\n\n klass.pool.putconn(conn)\n\n return replay\n\n @classmethod\n def one(klass, query, args):\n conn = klass.pool.getconn()\n cursor = conn.cursor()\n cursor.execute(query, args)\n\n result = cursor.fetchone()\n\n klass.pool.putconn(conn)\n\n return result\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"105863516","text":"from zhak_projects.interface_request.safety_inspection import doc_wsmc_wszl_modulename\nfrom zhak_projects.interface_request.safety_inspection.document_interfaces import documents_controller\nfrom zhak_projects.interface_request.safety_inspection.document_interfaces.documents_models import all_documents_models\nfrom zhak_projects.interface_request.safety_inspection.interface_exception_print import myprint\n\n\ndef interface_test_documents_list():\n \"\"\"\n 测试 文书的 list\n \"\"\"\n exceptions_module_name = []\n documents_controller.pageSize = 1\n for doc in all_documents_models():\n print(doc.controller_name)\n try:\n content = documents_controller.list(doc.controller_name)\n myprint(content)\n except:\n exceptions_module_name.append(doc.controller_name)\n print(\"error_documents\", exceptions_module_name)\n\n\n# content = generate_documents_template.list(\"docRegistCaseAudit\")\n# print(json.dumps(content, ensure_ascii=False, sort_keys=True, indent=4, separators=(', ', ': ')))\n\n\ndef interface_test_random_exportPdf():\n exceptions_module_name = []\n exceptions_module_pdfs = []\n type_code = []\n \"\"\"\n 随机抽取个guid\n \"\"\"\n documents_controller.pageSize = 1\n content = None\n for doc in all_documents_models():\n print(doc.controller_name)\n if doc.controller_name == \"doc\":\n try:\n content = documents_controller.list(\"securityCase\")\n except:\n exceptions_module_name.append(doc.controller_name)\n\n else:\n try:\n content = documents_controller.list(doc.controller_name)\n except:\n exceptions_module_name.append(doc.controller_name)\n \"\"\"\n pdf查找\n \"\"\"\n if content:\n if content[\"data\"][\"items\"]:\n guid = content[\"data\"][\"items\"][0][\"guid\"]\n try:\n print(guid)\n content = documents_controller.exportPdf(doc.controller_name, guid, doc.interfaces.export_pdf,\n doc.interfaces.pdf_type)\n myprint(content)\n if content:\n if content[\"code\"] == 500:\n type_code.append(doc.controller_name)\n except:\n exceptions_module_pdfs.append(doc.controller_name)\n print(\"\\033[1;31m error {} \\033[0m\".format(doc.controller_name))\n\n else:\n print(\"\\033[1;36m error {} \\033[0m\".format(\"Item Empty\"))\n\n print(\"error_documents\", exceptions_module_name)\n print(\"error_documents_pdf\", exceptions_module_pdfs)\n print(\"error_documents_pdf\", type_code)\n\n\ninterface_test_random_exportPdf()\n# interface_test_documents_list()\n","sub_path":"zhak_projects/interface_request/safety_inspection/document_interfaces/interface_test_main.py","file_name":"interface_test_main.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"570663179","text":"import copy\nimport random\n\nfrom pommerman.constants import Action\nfrom pommerman.agents import DummyAgent\nfrom pommerman.forward_model import ForwardModel\nfrom pommerman import constants\nfrom pommerman import characters\nfrom pommerman.constants import Item, POSSIBLE_ACTIONS\n\nfrom .mcts import MCTSNode\nfrom .group05_utils import bomb_can_destroy_a_wooden_wall\n\nACCESSIBLE_TILES = [Item.Passage.value, Item.Kick.value, Item.IncrRange.value, Item.ExtraBomb.value]\n\n\nclass Node(MCTSNode):\n def __init__(self, state, agent_id):\n self.total_reward = 0\n self.visit_count = 0\n # state is a list of: 0. Board, 1. Agents, 2. Bombs, 3. Items, 4. Flames\n self.state = state\n self.agent_id = agent_id\n\n # here we need to think about pruning (for a particular node)\n # which action combinations do we really want to investigate in our search tree?\n self.action_combinations = [(a1, a2) for a1 in POSSIBLE_ACTIONS for a2 in POSSIBLE_ACTIONS\n if not self.prune((a1, a2))]\n self.children = dict()\n\n def prune(self, actions):\n # TODO: here you can think about more complex stategies to prune moves,\n # which allows you to create deeper search trees (very important!)\n # remember: two agents -> ids: 0 and 1\n own_agent = self.state[1][self.agent_id]\n opponent_agent = self.state[1][1 - self.agent_id]\n own_position = own_agent.position\n opponent_position = opponent_agent.position\n own_action = actions[self.agent_id]\n opponent_action = actions[opponent_agent.agent_id]\n\n # a lot of moves (e.g. bumping into a wall or wooden tile) actually result in stop moves\n # we do not have to consider, since they lead to the same result as actually playing a stop move\n if not self._is_legal_action(own_agent, own_position, own_action) or not self._is_legal_action(opponent_agent, opponent_position, opponent_action):\n return True # prune action\n\n man_dist = manhattan_dist(own_position, opponent_position)\n if man_dist > 6 and opponent_action != Action.Stop.value:\n # we do not model the opponent, if it is more than 6 steps away\n return True\n\n ## own extension\n #if own_action == Action.Bomb.value:\n # if not bomb_can_destroy_a_wooden_wall(own_position, ):\n # return True\n\n return False\n\n\n def _is_legal_action(self, agent, position, action):\n \"\"\" prune moves that lead to stop move\"\"\"\n if action == Action.Stop.value:\n return True\n board = self.state[0]\n bombs = self.state[2]\n bombs = [bomb.position for bomb in bombs]\n row = position[0]\n col = position[1]\n if action == Action.Bomb.value:\n #print(\"agent.agent_id=\", agent.agent_id, \"agent.blast_strength=\", agent.blast_strength)\n ## if ammo is 0 you cannot lay bombs\n if agent.ammo == 0:\n return False\n # if it a bomb move, check if there is already a bomb planted on this field\n if (row, col) in bombs:\n return False\n bomb_can_destroy_a_wooden_wall(board, position, agent.blast_strength)\n\n if action == Action.Up.value:\n row -= 1\n elif action == Action.Down.value:\n row += 1\n elif action == Action.Left.value:\n col -= 1\n elif action == Action.Right.value:\n col += 1\n\n if row < 0 or row >= len(board) or col < 0 or col >= len(board):\n return False\n\n if board[row, col] in [Item.Wood.value, Item.Rigid.value]:\n return False\n\n # own adding that agent cannot go on boms when he cant kick, or when he can kick, but bomb is at a wall\n if board[row, col] == Item.Bomb.value:\n if not agent.can_kick:\n return False\n else:\n # if bomb lays on a wall or at the outer border we cannot cick it\n if action == Action.Up.value and (row == 0 or board[row-1, col] in [Item.Wood.value, Item.Rigid.value]): #TODO maybe add enemy here because we cant kick if enemy standst there?\n return False\n elif action == Action.Down.value and (row == len(board)-1 or board[row+1, col] in [Item.Wood.value, Item.Rigid.value]):\n return False\n elif action == Action.Left.value and (col == 0 or board[row, col-1] in [Item.Wood.value, Item.Rigid.value]):\n return False\n elif action == Action.Right.value and (col == len(board)-1 or board[row, col+1] in [Item.Wood.value, Item.Rigid.value]):\n return False\n\n return True\n\n def find_children(self):\n \"\"\" expands all children \"\"\"\n for actions in self.action_combinations:\n if actions not in self.children.keys():\n self.children[actions] = self._forward(actions)\n\n def _forward(self, actions):\n \"\"\" applies the actions to obtain the next game state \"\"\"\n # since the forward model directly modifies the parameters, we have to provide copies\n board = copy.deepcopy(self.state[0])\n agents = _copy_agents(self.state[1])\n bombs = _copy_bombs(self.state[2])\n items = copy.deepcopy(self.state[3])\n flames = _copy_flames(self.state[4])\n board, curr_agents, curr_bombs, curr_items, curr_flames = ForwardModel.step(\n actions,\n board,\n agents,\n bombs,\n items,\n flames\n )\n return Node([board, curr_agents, curr_bombs, curr_items, curr_flames], self.agent_id)\n\n def find_random_child(self):\n \"\"\" returns a random child, expands the child if it was not already done \"\"\"\n actions = random.choice(self.action_combinations)\n if actions in self.children.keys():\n return self.children[actions]\n else:\n child = self._forward(actions)\n return child\n\n def get_children(self):\n return self.children\n\n def get_unexplored(self):\n \"\"\" returns a randomly chosen unexplored action pair, or None \"\"\"\n unexplored_actions = [actions for actions in self.action_combinations if actions not in self.children.keys()]\n if not unexplored_actions:\n return None\n actions = random.choice(unexplored_actions)\n child = self._forward(actions)\n self.children[actions] = child\n return child\n\n def is_terminal(self):\n alive = [agent for agent in self.state[1] if agent.is_alive]\n return len(alive) != 2\n\n def get_total_reward(self):\n \"\"\" Returns Total reward of node (Q) \"\"\"\n return self.total_reward\n\n def incr_reward(self, reward):\n \"\"\" Update reward of node in backpropagation step of MCTS \"\"\"\n self.total_reward += reward\n\n def get_visit_count(self):\n \"\"\" Returns Total number of times visited this node (N) \"\"\"\n return self.visit_count\n\n def incr_visit_count(self):\n self.visit_count += 1\n\n def reward(self, root_state):\n # we do not want to role out games until the end,\n # since pommerman games can last for 800 steps, therefore we need to define a value function,\n # which assigns a numeric value to state (how \"desirable\" is the state?)\n return _value_func(self.state, root_state, self.agent_id)\n\n\ndef manhattan_dist(pos1, pos2):\n return abs(pos1[0] - pos2[0]) + abs(pos1[1] - pos2[1])\n\n\ndef _value_func(state, root_state, agent_id):\n # TODO: here you need to assign a value to a game state, for example the evaluation can\n # be based on the number of blasted clouds, the number of collected items the distance to the opponent, ...\n # an example how a numerical value can be derived:\n board = state[0]\n agents = state[1]\n own_agent = agents[agent_id]\n opponent_agent = agents[1-agent_id]\n root_own_agent = root_state[1][agent_id]\n assert own_agent, root_own_agent\n # check if own agent is dead\n if not own_agent.is_alive:\n return -1.0\n # check if opponent has been destroyed\n elif not opponent_agent.is_alive:\n return 1.0\n\n score = 0.0 # game is not over yet, we have to think about additional evaluation criteria\n\n own_position = own_agent.position\n opponent_position = opponent_agent.position\n\n # if agent cannot move in any direction than its locked up either by a bomb,\n # or the opponent agent -> very bad position\n down_cond = own_position[0] + 1 >= len(board) or \\\n board[own_position[0] + 1][own_position[1]] not in ACCESSIBLE_TILES\n up_cond = own_position[0] - 1 < 0 or \\\n board[own_position[0] - 1][own_position[1]] not in ACCESSIBLE_TILES\n right_cond = own_position[1] + 1 >= len(board) or \\\n board[own_position[0]][own_position[1] + 1] not in ACCESSIBLE_TILES\n left_cond = own_position[1] - 1 < 0 or \\\n board[own_position[0]][own_position[1] - 1] not in ACCESSIBLE_TILES\n\n if down_cond and up_cond and right_cond and left_cond:\n score += -0.5\n\n # we want to push our agent towards the opponent\n man_dist = manhattan_dist(own_position, opponent_position)\n score += 0.005*(10-man_dist) # the closer to the opponent the better\n\n # we want to collect items (forward model was modified to make this easier)\n score += own_agent.picked_up_items * 0.05\n\n # since search depth is limited, we need to reward well placed bombs instead\n # of only rewarding collecting items\n for bomb in state[2]:\n # we only reward bombs placed next to wood - you can improve this\n loc = bomb.position\n if loc[0]-1 >= 0 and board[loc[0]-1][loc[1]] == Item.Wood.value:\n score += 0.02\n if loc[0]+1 < len(board) and board[loc[0]+1][loc[1]] == Item.Wood.value:\n score += 0.02\n if loc[1]-1 >= 0 and board[loc[0]][loc[1]-1] == Item.Wood.value:\n score += 0.02\n if loc[1]+1 < len(board) and board[loc[0]][loc[1]+1] == Item.Wood.value:\n score += 0.02\n return score\n\n\ndef _copy_agents(agents_to_copy):\n \"\"\" copy agents of the current node \"\"\"\n agents_copy = []\n for agent in agents_to_copy:\n agt = DummyAgent()\n agt.init_agent(agent.agent_id, constants.GameType.FFA)\n agt.set_start_position(agent.position)\n agt.reset(\n ammo=agent.ammo,\n is_alive=agent.is_alive,\n blast_strength=agent.blast_strength,\n can_kick=agent.can_kick\n )\n agt.picked_up_items = agent.picked_up_items\n agents_copy.append(agt)\n return agents_copy\n\n\ndef _copy_bombs(bombs):\n \"\"\" copy bombs of the current node \"\"\"\n bombs_copy = []\n for bomb in bombs:\n bomber = characters.Bomber()\n bombs_copy.append(\n characters.Bomb(bomber, bomb.position, bomb.life, bomb.blast_strength,\n bomb.moving_direction)\n )\n\n return bombs_copy\n\n\ndef _copy_flames(flames):\n \"\"\" copy flames of the current node \"\"\"\n flames_copy = []\n for flame in flames:\n flames_copy.append(\n characters.Flame(flame.position, flame.life)\n )\n return flames_copy\n","sub_path":"student_agents/group05/group05/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":11307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"408269542","text":"'''\n@author: Dallas Fraser\n@author: 2016-04-12\n@organization: MLSB API\n@summary: The views for player stats\n'''\nfrom flask_restful import Resource, reqparse\nfrom flask import Response\nfrom json import dumps\nfrom api import DB\nfrom api.model import Team, Game\nfrom datetime import datetime, date, time\nfrom sqlalchemy import or_\nparser = reqparse.RequestParser()\nparser.add_argument('year', type=int)\nparser.add_argument('league_id', type=int)\nparser.add_argument('team_id', type=int)\n\n\ndef post(team_id, year, league_id):\n if team_id is not None:\n team = single_team(team_id)\n else:\n team = team_stats(year, league_id)\n return team\n\n\ndef single_team(team_id):\n team_query = Team.query.get(team_id)\n if team_query is None:\n return {}\n games = (DB.session.query(Game)\n .filter(or_(Game.away_team_id == team_id,\n Game.home_team_id == team_id)\n ).all())\n team = {team_id: {'wins': 0,\n 'losses': 0,\n 'games': 0,\n 'ties': 0,\n 'runs_for': 0,\n \"runs_against\": 0,\n 'hits_for': 0,\n 'hits_allowed': 0,\n 'name': str(team_query)}\n }\n for game in games:\n # loop through each game\n scores = game.summary()\n if game.away_team_id == team_id:\n score = scores['away_score']\n hits = scores['away_bats']\n opp = scores['home_score']\n opp_hits = scores['home_bats']\n else:\n score = scores['home_score']\n hits = scores['home_bats']\n opp = scores['away_score']\n opp_hits = scores['away_bats']\n if score > opp:\n team[team_id]['wins'] += 1\n elif score < opp:\n team[team_id]['losses'] += 1\n elif scores['home_bats'] + scores['away_bats'] > 0:\n team[team_id]['ties'] += 1\n team[team_id]['runs_for'] += score\n team[team_id]['runs_against'] += opp\n team[team_id]['hits_for'] += hits\n team[team_id]['hits_allowed'] += opp_hits\n team[team_id]['games'] += 1\n return team\n\n\ndef team_stats(year, league_id):\n t = time(0, 0)\n games = DB.session.query(Game)\n teams = DB.session.query(Team)\n if year is not None:\n d1 = date(year, 1, 1)\n d2 = date(year, 12, 30)\n start = datetime.combine(d1, t)\n end = datetime.combine(d2, t)\n games = games.filter(Game.date.between(start, end))\n teams = teams.filter(Team.year == year)\n if league_id is not None:\n games = games.filter(Game.league_id == league_id)\n teams = teams.filter(Team.league_id == league_id)\n result = {}\n for team in teams:\n # initialize each team\n result[team.id] = {'wins': 0,\n 'losses': 0,\n 'games': 0,\n 'ties': 0,\n 'runs_for': 0,\n \"runs_against\": 0,\n 'hits_for': 0,\n 'hits_allowed': 0,\n 'name': str(team)}\n for game in games:\n # loop through each game (max ~400 for a season)\n score = game.summary()\n result[game.away_team_id]['runs_for'] += score['away_score']\n result[game.away_team_id]['runs_against'] += score['home_score']\n result[game.away_team_id]['hits_for'] += score['away_bats']\n result[game.away_team_id]['hits_allowed'] += score['home_bats']\n result[game.home_team_id]['runs_for'] += score['home_score']\n result[game.home_team_id]['runs_against'] += score['away_score']\n result[game.home_team_id]['hits_for'] += score['home_bats']\n result[game.home_team_id]['hits_allowed'] += score['away_bats']\n if score['away_bats'] + score['home_bats'] > 0:\n result[game.away_team_id]['games'] += 1\n result[game.home_team_id]['games'] += 1\n if score['away_score'] > score['home_score']:\n result[game.away_team_id]['wins'] += 1\n result[game.home_team_id]['losses'] += 1\n elif score['away_score'] < score['home_score']:\n result[game.home_team_id]['wins'] += 1\n result[game.away_team_id]['losses'] += 1\n elif score['away_bats'] + score['home_bats'] > 0:\n result[game.home_team_id]['ties'] += 1\n result[game.away_team_id]['ties'] += 1\n return result\n\n\nclass TeamStatsAPI(Resource):\n def post(self):\n \"\"\"\n GET request for Team Stats List\n Route: Route['player_stats']\n Parameters:\n year: the year (int)\n team_id: the team id (int)\n league_id: the league id (int)\n Returns:\n status: 200\n mimetype: application/json\n data: list of Teams\n \"\"\"\n year = None\n args = parser.parse_args()\n if args['team_id']:\n tid = args['team_id']\n team = post(tid, None, None)\n else:\n if args['year']:\n year = args['year']\n else:\n year = None\n if args['league_id']:\n league_id = args['league_id']\n else:\n league_id = None\n team = post(None, year, league_id)\n return Response(dumps(team),\n status=200,\n mimetype=\"application/json\")\n","sub_path":"api/advanced/team_stats.py","file_name":"team_stats.py","file_ext":"py","file_size_in_byte":5574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"568864900","text":"import os_setup\nimport threading\nimport os\nfrom random import randint\n\nimport requests\nfrom urllib3.exceptions import InsecureRequestWarning\n\nimport json\nimport time\nimport datetime\nimport pytz\nfrom random import choice\nfrom django.db.models import Q\nimport multiprocessing as mp\nfrom utils.slack import slack_notify\nfrom product.models import Product, ShopeeRating, ProductImage, ShopeeCategory,\\\n ProductSize, ProductColor, ProductExtraOption, ProductOption, ProductPattern,\\\n ShopeeColor, ShopeeSize, SourceExtraOption\nfrom helper.get_proxy_session import get_session\nfrom helper.clean_text import get_cleaned_text_from_color,\\\n get_cleaned_text, get_cleaned_text_from_pattern, \\\n get_cleaned_text_from_category, get_cleaned_text_from_size\nfrom django.shortcuts import get_object_or_404\nfrom store.models import Store, StorePost\n\n\nrequests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)\n_user_agents = [\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',\n 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1',\n 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Mobile Safari/537.36',\n 'Mozilla/5.0 (Linux; Android 8.0.0; SM-G960F Build/R16NW) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.84 Mobile Safari/537.36',\n 'Mozilla/5.0 (Linux; Android 6.0; HTC One X10 Build/MRA58K; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/61.0.3163.98 Mobile Safari/537.36',\n 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1',\n 'Mozilla/5.0 (X11; CrOS x86_64 8172.45.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.64 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, like Gecko) Version/9.0.2 Safari/601.3.9',\n 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0) Gecko/20100101 Firefox/15.0.1'\n]\n\n\nclass ShopeeScraper:\n def __init__(self, user_agents=None, proxy=None):\n self.user_agents = user_agents\n proxy_host = \"proxy.crawlera.com\"\n proxy_port = \"8010\"\n proxy_auth = os.environ.get('CRAWLERA_API_KEY')\n proxies = {\"https\": \"https://{}@{}:{}/\".format(proxy_auth, proxy_host, proxy_port),\n \"http\": \"http://{}@{}:{}/\".format(proxy_auth, proxy_host, proxy_port)}\n\n self.session = requests.Session()\n self.session.proxies.update(proxies)\n self.session.headers.update({'User-Agent': self.__random_agent(),\n 'X-Requested-With': 'XMLHttpRequest',\n })\n self.proxies = proxies\n self.session_refresh_count = 0\n\n def change_session(self):\n if self.session_refresh_count > 5:\n new_session, self.proxies = get_session('new')\n self.session_refresh_count = 0\n else:\n new_session, self.proxies = get_session(proxies=self.proxies)\n self.session = new_session\n self.session_refresh_count += 1\n return new_session\n\n def __random_agent(self):\n if self.user_agents and isinstance(self.user_agents, list):\n return choice(self.user_agents)\n return choice(_user_agents)\n\n def __request_url(self, store_id, limit='100', newest='0'):\n url = 'https://shopee.vn/api/v2/search_items/?by=pop&limit={limit}&match_id={store_id}&newest={newest}&order=desc&page_type=shop&shop_categoryids=&version=2'.format(\n limit=limit, store_id=store_id, newest=newest)\n # proxy_host = \"proxy.crawlera.com\"\n # proxy_port = \"8010\"\n # proxy_auth = os.environ.get('CRAWLERA_API_KEY')+':'\n # proxies = {\"https\": \"https://{}@{}:{}/\".format(proxy_auth, proxy_host, proxy_port),\n # \"http\": \"http://{}@{}:{}/\".format(proxy_auth, proxy_host, proxy_port)}\n # headers = {'X-Crawlera-Profile': 'desktop',\n # 'X-Crawlera-JobId': '999',\n # 'X-Crawlera-Max-Retries': '1',\n # 'Referer': 'https://shopee.vn/shop/{store_id}/search'.format(store_id=store_id),\n # }\n # try:\n # response = requests.get(url, proxies=proxies, verify=False,\n # headers=headers)\n headers = {'User-Agent': choice(_user_agents),\n 'X-Requested-With': 'XMLHttpRequest',\n 'Referer': 'https://shopee.vn/shop/{store_id}/search?shopCollection='.format(store_id=store_id),\n }\n try:\n response = requests.get(url, headers=headers)\n # response.raise_for_status()\n except requests.HTTPError as e:\n print(e)\n pass\n except requests.RequestException:\n pass\n else:\n return response\n\n def __request_url_item(self, store_id, item_id):\n url = \"https://shopee.vn/api/v2/item/get?itemid={item_id}&shopid={store_id}\".format(item_id=item_id, store_id=store_id)\n proxy_host = \"proxy.crawlera.com\"\n proxy_port = \"8010\"\n proxy_auth = os.environ.get('CRAWLERA_API_KEY')+':'\n proxies = {\"https\": \"https://{}@{}:{}/\".format(proxy_auth, proxy_host, proxy_port),\n \"http\": \"http://{}@{}:{}/\".format(proxy_auth, proxy_host, proxy_port)}\n headers = {'X-Crawlera-Profile': 'desktop',\n 'X-Crawlera-JobId': '999',\n 'X-Crawlera-Max-Retries': '1',\n 'Referer': 'https://shopee.vn/shop/' +\n str(store_id) +\n '/search',\n }\n try:\n response = requests.get(url,\n proxies=proxies,\n verify=False,\n headers=headers, timeout=10)\n response.raise_for_status()\n except requests.HTTPError as e:\n print(e)\n pass\n except requests.RequestException:\n pass\n else:\n return response\n\n def __update_category(self, obj_product, categories):\n for category in categories:\n if category:\n obj_cat, is_created = ShopeeCategory.objects.get_or_create(catid=int(category['catid']),\n display_name=category['display_name'])\n obj_product.shopee_category.add(obj_cat)\n obj_cat.no_sub = category['no_sub']\n obj_cat.is_valid = category['no_sub']\n obj_cat.is_default_subcat = category['is_default_subcat']\n obj_cat.save()\n # is_valid -> 최하위 카테고리 & 분류 된 상태\n if obj_cat.is_valid:\n if obj_cat.category:\n obj_product.category = obj_cat.category\n if obj_cat.sub_category:\n obj_product.sub_category = obj_cat.sub_category\n\n if obj_product.sub_category is None:\n obj_product.validation = 'R' # 카테고리 정상 분류시 Review 로 표시\n\n def __update_rating(self, obj_product, data, view_count=0):\n obj_rating, is_created = ShopeeRating.objects.get_or_create(\n product=obj_product)\n if data['liked_count']:\n obj_rating.shopee_liked_count = data['liked_count']\n if data['historical_sold']:\n obj_rating.shopee_sold_count = data['historical_sold']\n obj_rating.shopee_view_count = view_count\n if data['item_rating']['rating_star']:\n obj_rating.shopee_rating_star = data['item_rating']['rating_star']\n if data['item_rating']['rating_count']:\n obj_rating.shopee_5_star_count = data['item_rating']['rating_count'][0]\n obj_rating.shopee_4_star_count = data['item_rating']['rating_count'][1]\n obj_rating.shopee_3_star_count = data['item_rating']['rating_count'][2]\n obj_rating.shopee_2_star_count = data['item_rating']['rating_count'][3]\n obj_rating.shopee_1_star_count = data['item_rating']['rating_count'][4]\n obj_rating.shopee_review_count = data['item_rating']['rating_count'][0]+data['item_rating']['rating_count'][1] + \\\n data['item_rating']['rating_count'][2]+data['item_rating']['rating_count'][3] + \\\n data['item_rating']['rating_count'][4]\n obj_rating.save()\n\n def __update_extra_options(self, obj_product, variation):\n options = variation['options']\n images = variation['images']\n variation_group = variation['name']\n for key, option in enumerate(options):\n option_string = option.lower().strip()\n try:\n source = 'https://cf.shopee.vn/file/' + \\\n variation['images'][key]\n source_thumb = 'https://cf.shopee.vn/file/' + \\\n variation['images'][key]+'_tn'\n except:\n source = None\n source_thumb = None\n obj_extra_option, is_created = SourceExtraOption.objects.get_or_create(\n name=option_string, source=source, source_thumb=source_thumb, variation_group=variation_group)\n obj_product.source_extra_option.add(obj_extra_option)\n\n def __update_size(self, obj_product, options):\n for option in options:\n cleaned_text = get_cleaned_text(option)\n cleaned_text = get_cleaned_text_from_category(\n get_cleaned_text_from_pattern(get_cleaned_text_from_color(cleaned_text)))\n obj_size, is_created = ShopeeSize.objects.get_or_create(\n display_name=cleaned_text)\n obj_product.shopee_size.add(obj_size)\n for size_obj in obj_product.shopee_size.all():\n if size_obj.size:\n obj_product.size.add(size_obj.size)\n else: # 사이즈 정보 중 없는 정보가 있으면 R로 변경\n obj_product.validation = 'R'\n\n def __update_color(self, obj_product, options):\n for option in options:\n cleaned_text = get_cleaned_text(option)\n cleaned_text = get_cleaned_text_from_category(\n get_cleaned_text_from_pattern(get_cleaned_text_from_size(cleaned_text)))\n obj_color, is_created = ShopeeColor.objects.get_or_create(\n display_name=cleaned_text)\n obj_product.shopee_color.add(obj_color)\n for color_obj in obj_product.shopee_color.all():\n if color_obj.color:\n obj_product.color.add(color_obj.color)\n else: # 사이즈 정보 중 없는 정보가 있으면 R로 변경\n obj_product.validation = 'R'\n\n def __update_product_option(self, obj_product, option_list,\n color_index, size_index, has_extra_options):\n free_size_obj = ProductSize.objects.get(name='free')\n u_color_obj = ProductColor.objects.get(name='undefined')\n u_size_obj = ProductSize.objects.get(name='undefined')\n # 옵션이 없는 경우 FREE SIZE OPTION 생성\n if len(option_list) == 0:\n obj_option, is_created = ProductOption.objects.get_or_create(\n product=obj_product, shopee_item_id=obj_product.shopee_item_id)\n obj_option.stock = obj_product.stock\n if obj_product.stock > 0:\n obj_option.is_active = True\n obj_product.is_active = True\n obj_product.validation = 'V'\n else:\n obj_option.is_active = False\n obj_option.original_price = obj_product.original_price\n obj_option.discount_price = obj_product.discount_price\n obj_option.currency = obj_product.currency\n obj_option.size = free_size_obj\n obj_option.save()\n else:\n not_valid_information = False\n for option in option_list:\n obj_option, is_created = ProductOption.objects.get_or_create(\n product=obj_product,\n shopee_item_id=option['modelid'])\n if is_created:\n # 옵션 생성 후에는 기본 옵션 정보 선택이 필요.\n if color_index is None and size_index is None:\n if len(option_list) == 1:\n if option['name'] == '':\n obj_option.name = 'default option(ONE SIZE)'\n else:\n obj_option.name = option['name']\n obj_option.size = free_size_obj\n obj_option.color = u_color_obj\n else:\n not_valid_information = True\n break\n else:\n obj_option.name = option['name']\n splited_list = option['name'].lower().split(',')\n if color_index != None:\n cleaned_text = get_cleaned_text(splited_list[color_index])\n cleaned_text = get_cleaned_text_from_category(\n get_cleaned_text_from_pattern(get_cleaned_text_from_size(cleaned_text)))\n obj_color, is_created = ShopeeColor.objects.get_or_create(\n display_name=get_cleaned_text(cleaned_text))\n if obj_color.color:\n obj_option.color = obj_color.color\n else:\n obj_product.validation = 'R'\n not_valid_information = True\n break\n else:\n obj_option.color = u_color_obj\n\n if size_index != None:\n cleaned_text = get_cleaned_text(splited_list[size_index])\n cleaned_text = get_cleaned_text_from_category(\n get_cleaned_text_from_pattern(get_cleaned_text_from_color(cleaned_text)))\n obj_size, is_created = ShopeeSize.objects.get_or_create(\n display_name=cleaned_text)\n if obj_size.size:\n obj_option.size = obj_size.size\n else:\n obj_product.validation = 'R'\n not_valid_information = True\n break\n else:\n obj_option.size = u_size_obj\n obj_option.is_active = option['status']\n if option['price_before_discount'] > 0:\n obj_option.original_price = option['price_before_discount'] / 100000\n obj_option.discount_price = option['price'] / 100000\n else:\n obj_option.original_price = option['price'] / 100000\n obj_option.discount_price = 0\n obj_option.currency = option['currency']\n obj_option.stock = option['stock']\n if option['stock'] == 0:\n obj_option.is_active = False\n obj_option.shopee_sold_count = option['sold']\n try:\n obj_option.save()\n except:\n not_valid_information = True\n\n if has_extra_options or not_valid_information:\n for option in option_list:\n obj_option, is_created = ProductOption.objects.get_or_create(\n product=obj_product, shopee_item_id=option['modelid'])\n obj_option.name = option['name']\n obj_option.extra_option = option['name']\n if len(option_list) == 1 and option['name'] == '':\n obj_option.name = obj_product.name\n obj_option.extra_option = obj_product.name\n obj_option.size = u_size_obj\n obj_option.color = u_color_obj\n obj_option.is_active = option['status']\n if option['price_before_discount'] > 0:\n obj_option.original_price = option['price_before_discount'] / 100000\n obj_option.discount_price = option['price'] / 100000\n else:\n obj_option.original_price = option['price'] / 100000\n obj_option.currency = option['currency']\n obj_option.stock = option['stock']\n if option['stock'] == 0:\n obj_option.is_active = False\n obj_option.shopee_sold_count = option['sold']\n try:\n obj_option.save()\n except:\n obj_product.validation = 'R'\n\n def __update_price(self, obj_product, data):\n if data['show_discount'] == 0:\n obj_product.is_discount = False\n obj_product.original_price = data['price'] / 100000\n obj_product.discount_price = 0\n obj_product.discount_rate = 0\n obj_product.currency = data['currency']\n else:\n obj_product.is_discount = True\n # price_max_before_discount #price_before_discount\n obj_product.original_price = data['price_before_discount'] / 100000\n obj_product.discount_price = data['price'] / 100000\n obj_product.discount_rate = data['show_discount']\n obj_product.currency = data['currency']\n if (obj_product.original_price == 0 or obj_product.discount_price == 0):\n slack_notify('something wrong with ' + str(obj_product.pk))\n if (data['show_free_shipping']):\n obj_product.is_free_ship = data['show_free_shipping']\n obj_product.shipping_price = 0\n else:\n obj_product.shipping_price = None\n\n def __update_pattern(self, obj_product):\n pattern_list = ProductPattern.objects.all()\n for pattern_obj in pattern_list:\n name_string = get_cleaned_text(obj_product.name)\n if get_cleaned_text(pattern_obj.name) in name_string or get_cleaned_text(pattern_obj.display_name) in name_string:\n obj_product.pattern.add(pattern_obj)\n\n def __update_images(self, obj_product, data, is_created=True):\n obj_product.product_thumbnail_image = 'https://cf.shopee.vn/file/' + \\\n data['image'] + '_tn'\n if (is_created == False):\n previous_images = ProductImage.objects.filter(product=obj_product)\n for previous_image in previous_images:\n previous_image.delete()\n for product_image in data['images']:\n obj_image, image_is_created = ProductImage.objects.get_or_create(\n source='https://cf.shopee.vn/file/' + product_image,\n source_thumb='https://cf.shopee.vn/file/' + product_image+'_tn',\n product=obj_product,\n post_image_type='P')\n\n def get_or_create_product(self, store_obj, itemid, view_count=None):\n shopid = store_obj.shopee_numeric_id\n result = ''\n # 0. 상품 생성 및 호출\n # time.sleep(randint(0, 2))\n obj_product, is_created = Product.objects.get_or_create(\n shopee_item_id=itemid, store=store_obj)\n # print('https://dabivn.com/admin/product/product/'+str(obj_product.pk))\n # 0. 상품 json load & 정상 데이터인지 확인\n data = self.__request_url_item(shopid, itemid).json()['item']\n if data['price'] % 100 != 0:\n print(data['price'])\n print('error')\n time.sleep(600)\n slack_notify('Crawler is caught by Shopee')\n return\n else:\n print(shopid, ' ', itemid, ' ', end='')\n print('0')\n # 1. 상품 삭제 확인\n if data == None:\n result = 'd'\n print('d', end='')\n obj_product.is_active = False\n obj_product.validation = 'D'\n obj_product.name = '[DELETED FROM SOURCE PAGE]' + obj_product.name\n obj_product.save()\n else:\n # TODO 재고 재 생성 확인을 해야함.\n # 2. 신규 생성 상품 처리\n color_index = None\n size_index = None\n has_extra_options = False\n if is_created:\n # 2. 기본 정보 업데이트 (상품 링크 / 상품 생성 시간 / 상품 분류 / 이름 / 이미지)\n result = 'N'\n print('N', end='')\n obj_product.validation = 'V'\n self.__update_category(obj_product, data['categories'])\n obj_product.product_link = store_obj.shopee_url + '/' + str(itemid)\n obj_product.created_at = datetime.datetime.fromtimestamp(\n int(data['ctime']), pytz.UTC)\n obj_product.product_source = 'SHOPEE'\n obj_product.name = data['name']\n obj_product.description = data['description']\n # image\n self.__update_images(obj_product, data, is_created)\n # 2. 상품 사이즈 / 컬러 정보 업데이트\n if (data['size_chart'] != None):\n obj_product.size_chart = 'https://cf.shopee.vn/file/' + data['size_chart']\n for i, variation in enumerate(data['tier_variations']):\n variation_name = variation['name'].replace(' ', '').replace(':', '').lower().strip()\n if 'size' in variation_name or 'kích' in variation_name or 'kich' in variation_name:\n self.__update_size(obj_product, variation['options'])\n size_index = i\n elif 'màu' in variation_name or 'color' in variation_name or 'mau' in variation_name:\n self.__update_color(obj_product, variation['options'])\n color_index = i\n else:\n self.__update_extra_options(obj_product, variation)\n has_extra_options = True\n if obj_product.size.count() == 0:\n self.__update_size(obj_product, ['free'])\n # 2. 패턴 추가\n self.__update_pattern(obj_product)\n else:\n result = 'u'\n if (obj_product.product_thumbnail_image != 'https://cf.shopee.vn/file/' +\n data['image'] + '_tn'):\n result = 'i'\n print('i', end='')\n self.__update_images(obj_product, data, False)\n # 3. 기존 / 신규 상품 업데이트\n # 3. 가격 및 레이팅 업데이트\n obj_product.updated_at = datetime.datetime.now()\n self.__update_price(obj_product, data)\n if view_count:\n self.__update_rating(obj_product, data, view_count)\n\n # 3. 재고 및 품절 처리\n obj_product.stock = data['stock']\n if (obj_product.stock == 0):\n obj_product.is_active = False\n obj_product.stock_available = False\n else:\n obj_product.stock_available = True\n\n # 4. 옵션 생성 및 업데이트\n self.__update_product_option(obj_product, data['models'], color_index, size_index, has_extra_options)\n obj_product.save()\n\n # 5. 생성 후 최종 검증\n if is_created:\n obj_product.is_active = False\n obj_product.save()\n return obj_product, result\n\n def search_store(self, store_obj):\n i = 0\n pk = 0\n list_length = 100\n store_id = store_obj.insta_id\n while list_length == 100:\n try:\n try_count = 0\n while True and try_count < 1:\n try_count += 1\n try:\n response = self.__request_url(store_id=store_obj.shopee_numeric_id,\n limit=list_length, newest=i*100)\n product_list = response.json()['items']\n break\n except:\n time.sleep(10)\n print('R', end='')\n for j, product in enumerate(product_list):\n try_count = 0\n while True and try_count < 10:\n try:\n product_obj, result = self.get_or_create_product(\n store_obj, product['itemid'], product['view_count'])\n break\n except:\n print('r', end='')\n # new_session = self.change_session()\n try_count += 1\n pk += 1\n list_length = len(product_list)\n i = i+1\n except:\n print('\\nERROR\\n')\n # slack_notify('Failed to get product list from {} {} ~ {}'.format(store_obj.insta_id, i * 100, (i + 1) * 100))\n break\n return pk\n#\n\n def refactor_search_store(self, store_obj):\n i = 0\n empty_result = 0\n result_string = ''\n store_id = store_obj.insta_id\n while empty_result < 3:\n # time.sleep(1+randint(0, 5)) 문제 없었음\n # time.sleep(1+randint(0, 2)) 문제 없었음\n time.sleep(1+randint(0, 1))\n try:\n response = self.__request_url(store_id=store_obj.shopee_numeric_id,\n limit=1, newest=i)\n if (len(response.json()['items']) == 1):\n product_json = response.json()['items'].pop()\n product_obj, result = self.get_or_create_product(\n store_obj, product_json['itemid'], product_json['view_count'])\n result_string = result_string+result\n else:\n empty_result += 1\n except:\n print('R', end='')\n i = i + 1\n # time.sleep(randint(0, 2))\n return i, result_string\n\n\ndef update_shopee(start_index=0, end_index=None, reverse=False):\n obj = ShopeeScraper()\n store_list = Store.objects.filter(store_type='IS').filter(is_active=True)[start_index + 1:end_index]\n results_string = 'update shopee from ' + str(start_index)\n if (end_index):\n results_string += ' to ' + str(end_index)\n for i, store_obj in enumerate(store_list):\n print(\"\\n#\" + str(i) + ' update ' + str(store_obj) + ' ')\n results_string = results_string+(\"\\n#\" + str(i) + ' update ' + str(store_obj))\n try:\n updated, result_string = obj.refactor_search_store(store_obj)\n results_string = results_string+result_string.replace('uuuuuuuuuu', 'U').replace('UUUUU', '5')\n except:\n slack_notify('Failed to update store {}'.format(store_obj.insta_id))\n # time.sleep(10+randint(0, 100)) 문제없었음\n # time.sleep(5+randint(0, 10)) 문제없었음\n # time.sleep(2+randint(0, 3))\n slack_notify(results_string)\n\n\ndef validate_shopee(start_index=0, end_index=None, reverse=False):\n obj = ShopeeScraper()\n store_list = Store.objects.filter(store_type='IS').filter(is_active=True)[start_index:end_index]\n results_string = 'validate shopee from ' + str(start_index)\n if (end_index):\n results_string += ' to ' + str(end_index)\n for i, store_obj in enumerate(store_list):\n print(\"\\n#\" + str(i) + ' validate ' + str(store_obj))\n results_string += (\"\\n#\" + str(i) + ' validate ' + str(store_obj))\n product_list = Product.objects.filter(is_active=True, store=store_obj, product_source='SHOPEE')\n for product_obj in product_list:\n try_count = 0\n # obj.get_or_create_product(store_obj, product_obj.shopee_item_id)\n while True:\n if try_count == 5:\n product_obj.is_active = False\n break\n try:\n obj.get_or_create_product(store_obj, product_obj.shopee_item_id)\n break\n except:\n try_count += 1\n time.sleep(5+randint(0, 10))\n slack_notify(results_string)\n\n\nif __name__ == '__main__':\n # pool = mp.Pool(processes=64)\n # update_shopee()\n # pool.map(obj.search_store, store_list)\n # pool.close()\n obj = ShopeeScraper()\n # obj.refactor_search_store(Store.objects.get(insta_id='su._.storee'))\n # obj.get_or_create_product(Store.objects.get(insta_id='onlyqueen.666'), 4047719428)\n # validate_shopee(181, 183)\n","sub_path":"app/crawling/shopee_c.py","file_name":"shopee_c.py","file_ext":"py","file_size_in_byte":29571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"367321534","text":"import os\nfrom os.path import join\nimport click\nfrom PIL import Image\n\n\n@click.command()\n@click.argument('renders_path')\ndef main(renders_path):\n subfolders = os.listdir(renders_path)\n for sf in subfolders:\n renders = os.listdir(join(renders_path, sf))\n for render in renders:\n render = join(renders_path, sf, render)\n im = Image.open(render)\n w, h = im.size\n tw, th = 400, 400\n if w == tw and h == th:\n continue\n\n pw = (w - tw)//2\n ph = (h - th)//2\n res = im.crop((pw, ph, pw+tw, ph+th))\n print(render)\n res.save(render)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/resize_dataset.py","file_name":"resize_dataset.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"505675113","text":"import argparse\nimport gym\nimport os\nimport sys\nimport pickle\nimport time\nimport datetime\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n\nfrom utils import *\nfrom utils.args import *\nfrom plot.plot_logger import *\nfrom models.mlp_policy import Policy\nfrom models.mlp_critic import Value\nfrom models.mlp_policy_disc import DiscretePolicy\nfrom models.mlp_ltr import LtrPolicy\nfrom core.ppo import ppo_step\nfrom core.common import estimate_advantages\nfrom core.agent import Agent\n\ntry:\n path = os.path.join(assets_dir(), 'learned_models/{}_ppo.p'.format(args.env_name))\n models_file=open(path,'r')\n print(\"pre-trained models loaded.\")\n args.model_path = path\n print(\"model path: \", path)\nexcept IOError:\n print(\"pre-trained models not found.\")\n\nif args.log_plot is True:\n plotlogger = plot_logger()\n\ndtype = torch.float64\ntorch.set_default_dtype(dtype)\ndevice = torch.device('cuda', index=args.gpu_index) if torch.cuda.is_available() else torch.device('cpu')\nif torch.cuda.is_available():\n torch.cuda.set_device(args.gpu_index)\n\n\"\"\"environment\"\"\"\nenv = gym.make(args.env_name)\nstate_dim = env.observation_space.shape[0]\nis_disc_action = len(env.action_space.shape) == 0\n# running_state = ZFilter((state_dim,), clip=5)\n# running_reward = ZFilter((1,), demean=False, clip=10)\nrunning_state = None\n\n\"\"\"seeding\"\"\"\nseed = int(time.time())\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\nenv.seed(args.seed)\n\n\"\"\"define actor and critic\"\"\"\nif args.model_path is None:\n if is_disc_action:\n policy_net = DiscretePolicy(state_dim, env.action_space.n)\n else:\n policy_net = LtrPolicy(state_dim, env.action_space.shape[0], log_std=args.log_std, ltr_n=args.ltr_n)\n value_net = Value(state_dim)\nelse:\n policy_net, value_net, running_state = pickle.load(open(args.model_path, \"rb\"))\npolicy_net.to(device)\nvalue_net.to(device)\n\noptimizer_policy = torch.optim.Adam(policy_net.parameters(), lr=args.learning_rate)\noptimizer_value = torch.optim.Adam(value_net.parameters(), lr=args.learning_rate)\n\n# # optimization epoch number and batch size for PPO\n# optim_epochs = 10\n# optim_batch_size = 64\n\n\"\"\"create agent\"\"\"\nagent = Agent(env, policy_net, device, running_state=running_state, render=args.render, num_threads=args.num_threads)\n\n\ndef update_params(batch, i_iter):\n states = torch.from_numpy(np.stack(batch.state)).to(dtype).to(device)\n actions = torch.from_numpy(np.stack(batch.action)).to(dtype).to(device)\n rewards = torch.from_numpy(np.stack(batch.reward)).to(dtype).to(device)\n masks = torch.from_numpy(np.stack(batch.mask)).to(dtype).to(device)\n repeats = torch.from_numpy(np.stack(batch.repeat)).to(dtype).to(device)\n with torch.no_grad():\n values = value_net(states)\n fixed_log_probs, fixed_rpt_log_probs = policy_net.get_log_prob(states, actions, repeats)\n\n \"\"\"get advantage estimation from the trajectories\"\"\"\n advantages, returns = estimate_advantages(repeats, rewards, masks, values, args.gamma, args.tau, device)\n\n \"\"\"perform mini-batch PPO update\"\"\"\n optim_iter_num = int(math.ceil(states.shape[0] / optim_batch_size))\n for _ in range(optim_epochs):\n perm = np.arange(states.shape[0])\n np.random.shuffle(perm)\n perm = LongTensor(perm).to(device)\n\n states, actions, returns, advantages, repeats, fixed_log_probs, fixed_rpt_log_probs = \\\n states[perm].clone(), actions[perm].clone(), returns[perm].clone(), advantages[perm].clone(), repeats[perm].clone(), fixed_log_probs[perm].clone(), fixed_rpt_log_probs[perm].clone()\n\n for i in range(optim_iter_num):\n ind = slice(i * optim_batch_size, min((i + 1) * optim_batch_size, states.shape[0]))\n states_b, actions_b, advantages_b, returns_b, repeats_b, fixed_log_probs_b, fixed_rpt_log_probs_b = \\\n states[ind], actions[ind], advantages[ind], returns[ind], repeats[ind], fixed_log_probs[ind], fixed_rpt_log_probs[ind]\n\n ppo_step(policy_net, value_net, optimizer_policy, optimizer_value, 1, states_b, actions_b, returns_b, \n advantages_b, repeats_b, fixed_log_probs_b, fixed_rpt_log_probs_b, args.clip_epsilon, args.l2_reg)\n\n\ndef main_loop():\n for i_iter in range(args.max_iter_num):\n \"\"\"generate multiple trajectories that reach the minimum batch_size\"\"\"\n batch, log = agent.collect_samples(args.min_batch_size)\n t0 = time.time()\n update_params(batch, i_iter)\n t1 = time.time()\n\n if i_iter % args.log_interval == 0:\n print('{}\\tT_sample {:.4f}\\tT_update {:.4f}\\tR_min {:.2f}\\tR_max {:.2f}\\tR_avg {:.2f}'.format(\n i_iter, log['sample_time'], t1-t0, log['min_reward'], log['max_reward'], log['avg_reward']))\n if args.log_plot is True:\n plotlogger.log(n=i_iter, r_min=log['min_reward'], r_max=log['max_reward'], r_avg=log['avg_reward'])\n\n if args.save_model_interval > 0 and (i_iter+1) % args.save_model_interval == 0:\n to_device(torch.device('cpu'), policy_net, value_net)\n pickle.dump((policy_net, value_net, running_state),\n open(os.path.join(assets_dir(), 'learned_models/{}_ppo.p'.format(args.env_name)), 'wb'))\n to_device(device, policy_net, value_net)\n print(\"model saved!\")\n\n if args.log_plot is True and i_iter%args.log_plot_steps==0 and i_iter>=args.log_plot_steps:\n logplot_path = os.path.join(assets_dir(), 'learned_models/')\n with open(os.path.join(logplot_path+\"logplot\"+str(datetime.datetime.now())+\".pkl\"), \"wb\") as f: pickle.dump(plotlogger._log, f, pickle.HIGHEST_PROTOCOL)\n print(\"plot log succeed.\")\n args.log_plot = False\n exit()\n\n \"\"\"clean up gpu memory\"\"\"\n torch.cuda.empty_cache()\n\n\nmain_loop()\n","sub_path":"train/ltr.py","file_name":"ltr.py","file_ext":"py","file_size_in_byte":5888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"33149419","text":"import subprocess\nfrom pathlib import Path\n\nif __name__ == '__main__':\n path = Path(__file__)\n dir_path = path.parent.resolve()\n text_path = dir_path / 'text.txt'\n config_path = dir_path / 'config.yml'\n exec_path = dir_path.parent.parent / 'hercules-extraction.py'\n\n out_path = dir_path / 'translation_extraction_coreference_export_sample.ttl'\n\n cp = subprocess.run(['python', str(exec_path), '--file', str(text_path), '--config', str(config_path), '--out', str(out_path)], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)\n\n print('stdout:')\n print(cp.stdout)\n print('stderr:')\n print(cp.stderr)\n","sub_path":"sample/translation_extraction_coreference_export/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"141333195","text":"import random\nfrom revscoring.features import wikitext, revision_oriented, temporal\nfrom revscoring.languages import english\nfrom revscoring.extractors import api\nfrom revscoring.utilities.util import read_observations\nimport mwapi\nimport json\nimport sys, traceback\nimport mwreverts.api\n\nsession = mwapi.Session('https://en.wikipedia.org')\nrev_reverteds = []\nflag = True\nwhile flag == True:\n revid = random.randint(700000000, 900000000)\n try:\n _, reverted, reverted_to = mwreverts.api.check(\n session, revid, radius=5, # most reverts within 5 edits\n window=48 * 60 * 60, # 2 days\n rvprop={'user', 'ids'}) # Some properties we'll make use of\n flag = False\n except (RuntimeError, KeyError) as e:\n #sys.stderr.write(str(e))\n print('Revision ID ' + str(revid) + ' does not exist')\n \nif reverted is not None:\n reverted_doc = [r for r in reverted.reverteds if r['revid'] == revid][0]\n if 'user' not in reverted_doc or 'user' not in reverted.reverting:\n None\n self_revert = reverted_doc['user'] == reverted.reverting['user']\n # revisions that are reverted back to by others\n reverted_back_to = reverted_to is not None and 'user' in reverted_to.reverting and reverted_doc['user'] != reverted_to.reverting['user']\n # If we are reverted, not by self or reverted back to by someone else,\n # then, let's assume it was damaging.\n damaging_reverted = not (self_revert or reverted_back_to)\nelse:\n damaging_reverted = False\n \nif reverted is None:\n rev_reverteds.append(('N/A', revid, 'N/A', damaging_reverted)) # Before Rev, Current Rev, After Rev\nelif reverted is not None:\n\n rev_reverteds.append((reverted.reverteds[0]['parentid'], reverted.reverting['revid'], reverted.reverting['revid'],\n damaging_reverted))\n # Before Rev, Before User, Current Rev, Current User, After Rev, After User\n#sys.stderr.write(\"r\" if damaging_reverted else \".\")\n\nfeatures = [\n # Catches long key mashes like kkkkkkkkkkkk\n wikitext.revision.diff.longest_repeated_char_added,\n # Measures the size of the change in added words\n wikitext.revision.diff.words_added,\n # Measures the size of the change in removed words\n wikitext.revision.diff.words_removed,\n # Measures the proportional change in \"badwords\"\n english.badwords.revision.diff.match_prop_delta_sum,\n # Measures the proportional change in \"informals\"\n english.informals.revision.diff.match_prop_delta_sum,\n # Measures the proportional change meaningful words\n english.stopwords.revision.diff.non_stopword_prop_delta_sum,\n # Is the user anonymous\n revision_oriented.revision.user.is_anon,\n # Is the user a bot or a sysop\n revision_oriented.revision.user.in_group({'bot', 'sysop'}),\n # How long ago did the user register?\n temporal.revision.user.seconds_since_registration\n]\n\napi_extractor = api.Extractor(session)\ntry:\n revData = list(api_extractor.extract(revid, features))\n revObserv = {\"rev_id\": revid, \"cache\": revData}\nexcept:\n print('Revision Data Not Found')\n\n#revObserv = json.dumps(revObserv)\n#print(type(revObserv))\nprint('Revision Id: ' + str(revObserv['rev_id']))\nprint('Repeated Characters Added: ' + str(revObserv['cache'][0]))\nprint('Added Characters: ' + str(revObserv['cache'][1]))\nprint('Removed Characters: ' + str(revObserv['cache'][2]))\nprint('Proportional Number of Bad Words: ' + str(revObserv['cache'][3]))\nprint('Proportional Number of Informal Words: ' + str(revObserv['cache'][4]))\nprint('Proportional Change of Meaningful Words: ' + str(revObserv['cache'][5]))\nprint('User Anonymity: ' + str(revObserv['cache'][6]))\nprint('User Group: ' + str(revObserv['cache'][7]))\nprint('Registration Time: ' + str(revObserv['cache'][8]))","sub_path":"Interface.py","file_name":"Interface.py","file_ext":"py","file_size_in_byte":3765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"144642082","text":"#zyx\nimport requests\nimport csv\n\nurl = \"https://careers.tencent.com/tencentcareer/api/post/Query\"\n\nheaders = {\n \"user-agent\": \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36\",\n}\n\nparams = {'area': ' cn',\n 'attrId': ' ',\n 'bgIds': ' ',\n 'categoryId': ' ',\n 'cityId': ' ',\n 'countryId': ' ',\n 'keyword': ' ',\n 'language': ' zh-cn',\n 'pageIndex': ' 1',\n 'pageSize': ' 10',\n 'parentCategoryId': ' ',\n 'productId': ' ',\n 'timestamp': ' 1602211262824'}\n\n\ndef parse_json(url, params={}):\n \"\"\"解析url,得到字典\"\"\"\n response = requests.get(url=url, headers=headers, params=params)\n return response.json()\n\n\ndef get_position(data):\n \"\"\"获取职位数据\"\"\"\n item = {\n \"postion_name\":\"\",#职位名称\n \"postion_department\":\"\",#职位部门\n \"postion_location\":\"\",#职位所在地\n \"postion_country\":\"\",#职位所在国家\n \"postion_category\":\"\",#职位类别\n \"postion_responsibility\":\"\",#职位职责\n \"postion_url\":\"\",#职位url\n }\n data_list = data[\"Data\"][\"Posts\"]\n for data in data_list:\n item[\"postion_name\"] = data[\"RecruitPostName\"]\n item[\"postion_department\"] = data[\"BGName\"]\n item[\"postion_location\"] = data[\"LocationName\"]\n item[\"postion_country\"] = data[\"CountryName\"]\n item[\"postion_category\"] = data[\"CategoryName\"]\n item[\"postion_responsibility\"] = data[\"Responsibility\"]\n item[\"postion_url\"] = data[\"PostURL\"]\n\n save(item)\n print(item)\n print(\"保存完成\")\n\ndef save(item):\n \"\"\"将数据保存到csv中\"\"\"\n with open(\"./腾讯招聘.csv\", \"a\", encoding=\"utf-8\") as file:\n writer = csv.writer(file)\n writer.writerow(item.values())\n\ndef start():\n for i in range(1,635):\n params[\"pageIndex\"] = i\n data = parse_json(url,params)\n get_position(data)\n\nif __name__ == '__main__':\n start()","sub_path":"pythondemo123/tencent2.py","file_name":"tencent2.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"602287438","text":"from rest_framework import status\nfrom rest_framework import filters\nfrom rest_framework.generics import ListAPIView, CreateAPIView, GenericAPIView\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\n\nfrom likes.api.pagination import get_pagination_class\nfrom likes.api.serializers import (\n LikeListSerializer,\n LikeToggleSerializer,\n LikeContentTypeSerializer\n)\nfrom likes.models import Like\nfrom posts.models import Post\nfrom likes.selectors import get_liked_object_ids, get_users_who_liked_object, get_user_likes\nfrom likes.services import get_user_likes_count\n\nfrom posts.serializers import PostSerializer\n\n__all__ = (\n 'LikedCountAPIView',\n 'LikedIDsAPIView',\n 'LikeToggleView',\n 'LikeListAPIView',\n 'LikersListAPIView',\n)\n\n\nclass LikeToggleView(CreateAPIView):\n \"\"\"\n post:\n API View to like-unlike given object by authenticated user.\\n\n Possible payload:\\n\n {\n \"type\": \"app_label.model\", // object's content type's natural key joined string\n \"id\": 1 // object's primary key\n }\n \"\"\"\n permission_classes = (IsAuthenticated, )\n serializer_class = LikeToggleSerializer\n # serializer_class = PostSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n data = serializer.data\n data['is_liked'] = getattr(serializer, 'is_liked', True)\n return Response(\n data,\n status=status.HTTP_201_CREATED,\n headers=self.get_success_headers(serializer.data)\n )\n \nclass LikeAlbumToggleView(CreateAPIView):\n \"\"\"\n post:\n API View to like-unlike given object by authenticated user.\\n\n Possible payload:\\n\n {\n \"type\": \"app_label.model\", // object's content type's natural key joined string\n \"id\": 1 // object's primary key\n }\n \"\"\"\n permission_classes = (IsAuthenticated, )\n serializer_class = LikeToggleSerializer\n # serializer_class = PostSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n data = serializer.data\n data['is_liked'] = getattr(serializer, 'is_liked', True)\n # album = AlbumSerializer()\n return Response(\n data,\n status=status.HTTP_201_CREATED,\n headers=self.get_success_headers(serializer.data)\n )\n\n\n# class LikersListAPIView(ListAPIView):\n# permission_classes = (AllowAny, )\n#\n# def get(self, request, *args, **kwargs):\n# serializer = LikeContentTypeSerializer(data=request.GET)\n# serializer.is_valid(raise_exception=True)\n#\n# return Response(\n# data={\n# 'ids': get_users_who_liked_object(\n# user=self.request.user,\n# content_type=serializer.validated_data.get(\n# 'type'\n# )\n# )\n# }\n# )\n\n\nclass LikedCountAPIView(APIView):\n \"\"\"\n API View to return count of likes for authenticated user.\n \"\"\"\n permission_classes = (AllowAny, )\n\n def get(self, request, *args, **kwargs):\n serializer = LikeContentTypeSerializer(data=request.GET)\n serializer.is_valid(raise_exception=True)\n\n return Response(\n data={\n 'count': get_user_likes_count(\n user=request.user,\n content_type=(\n serializer.validated_data.get(\n 'type'\n )\n )\n )\n }\n )\n\n\nclass LikedIDsAPIView(APIView):\n \"\"\"\n User liked ids:\n API View to return liked objects ids for a given user.\n \"\"\"\n permission_classes = (AllowAny, )\n\n def get(self, request, *args, **kwargs):\n serializer = LikeContentTypeSerializer(data=request.GET)\n serializer.is_valid(raise_exception=True)\n post_serializer = PostSerializer\n return Response(\n data={\n 'ids': get_liked_object_ids(\n user=self.request.user,\n content_type=serializer.validated_data.get(\n 'type'\n )\n ),\n # 'posts': Post.objects.filter(likes__user=self.request.user)\n # 'posts': get_user_likes(user=self.request.user, content_type=post_serializer.validated_data.get('type'))\n }\n )\n #\n\n\nclass PostLikedByList(ListAPIView):\n # queryset = Post.objects.all()\n # serializer_class = PostSerializer\n\n\n # permission_classes = (IsAuthenticated,)\n # lookup_field = 'id'\n # def perform_create(self, serializer):\n # serializer.save(publisher=self.request.user)\n\n # def get_queryset(self):\n\n def get(self, request, id):\n \"\"\"\n /posts/:id/likes/\n \"\"\"\n\n post_id = Post.objects.get(id=id).likes.all()\n serializer = LikeListSerializer(post_id, many=True, context={'request': request})\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass LikeListAPIView(ListAPIView):\n \"\"\"\n List API View to return all likes for authenticated user.\n Possible payload:\\n\n {\n \"type\": \"app_label.model\", // object's content type's natural key joined string\n \"id\": 1 // object's primary key\n }\n \"\"\"\n pagination_class = get_pagination_class()\n permission_classes = (IsAuthenticated, )\n serializer_class = LikeListSerializer\n queryset = Like.objects.all()\n filter_backends = (filters.SearchFilter, )\n search_fields = (\n 'content_type__model',\n )\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .filter(\n user=self.request.user\n )\n .select_related('user')\n .distinct()\n )\n","sub_path":"likes/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"614374893","text":"from django.contrib import admin\nfrom .models import Quote, SchoolRelatedInformation, Subscriber\n\n\nclass QuoteAdmin(admin.ModelAdmin):\n list_display = ('quote', 'author', 'update_date')\n list_filter = ('update_date',)\n search_fields = ('author', 'quote')\n\n\nclass SubscriberAdmin(admin.ModelAdmin):\n list_display = ('email', 'date')\n list_filter = ('date',)\n\n\nadmin.site.register(Quote, QuoteAdmin)\nadmin.site.register(Subscriber, SubscriberAdmin)\nadmin.site.register(SchoolRelatedInformation)\n","sub_path":"home/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"321255453","text":"# Copyright 2020 Ericsson TEI, Fabio Ubaldi\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\nimport logging\nimport os\nimport re\nfrom typing import Dict, Tuple\nfrom urllib.parse import urlencode\n\nimport requests\nimport urllib3\nfrom urllib3.exceptions import InsecureRequestWarning\n\nfrom adaptation_layer.error_handler import ResourceNotFound, NsNotFound, \\\n BadRequest, ServerError, NsOpNotFound, NsdNotFound\nfrom .interface import Driver, Headers, BodyList, Body\n\nurllib3.disable_warnings(InsecureRequestWarning)\nTESTING = os.environ.get(\"TESTING\", False)\nPRISM_ALIAS = os.environ.get(\"PRISM_ALIAS\", \"prism-ever\")\n\nlogger = logging.getLogger('app.driver.ever')\n\nclass EVER(Driver):\n\n def __init__(self, rano_cred):\n self._ranoId = rano_cred[\"rano_id\"]\n self._host = rano_cred[\"host\"]\n self._port = rano_cred[\"port\"] if \"port\" in rano_cred else 8080\n self._headers = {\"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\"}\n\n if TESTING is False:\n self._base_path = 'http://{0}:{1}'.format(self._host, self._port)\n else:\n self._base_path = 'http://{0}:{1}'.format(PRISM_ALIAS, 9999)\n\n def _exec_delete(self, url=None, params=None, headers=None):\n logger.debug('#############execute delete######')\n logger.debug('url= ' + url)\n try:\n resp = requests.delete(url, params=params, verify=False, headers=headers)\n except Exception as e:\n raise ServerError(str(e))\n\n if resp.status_code in (200, 201, 202, 206):\n if 'application/json' in resp.headers['content-type']:\n return resp.json(), resp.headers\n else:\n return resp.text, resp.headers\n elif resp.status_code == 204:\n return None, resp.headers\n elif resp.status_code == 400:\n raise BadRequest()\n elif resp.status_code == 404:\n raise ResourceNotFound()\n else:\n error = resp.json()\n logger.debug('############')\n logger.debug('error: ' + error)\n logger.debug('###########')\n raise ServerError(error)\n\n def _exec_post(self, url=None, data=None, json=None, headers=None):\n logger.debug('#############execute post######')\n logger.debug('url= ' + url)\n try:\n resp = requests.post(url, data=data, json=json, verify=False, headers=headers)\n except Exception as e:\n raise ServerError(str(e))\n\n if resp.status_code in (200, 201, 202, 206):\n try:\n ctype = resp.headers['content-type']\n except KeyError:\n # success but no content\n return None, resp.headers\n if 'application/json' in ctype:\n return resp.json(), resp.headers\n else:\n return resp.text, resp.headers\n elif resp.status_code == 204:\n return None, resp.headers\n elif resp.status_code == 400:\n raise BadRequest()\n elif resp.status_code == 404:\n raise ResourceNotFound()\n else:\n if 'application/json' in resp.headers['content-type']:\n error = resp.json()\n else:\n error = resp.text\n logger.debug('############')\n logger.debug('error: ' + error)\n logger.debug('###########')\n raise ServerError(error)\n\n def _exec_get(self, url=None, params=None, headers=None):\n logger.debug('#############execute get######')\n logger.debug('url= ' + url)\n try:\n resp = requests.get(url, params=params, verify=False, headers=headers)\n except Exception as e:\n raise ServerError(str(e))\n\n if resp.status_code in (200, 201, 202, 206):\n if 'application/json' in resp.headers['content-type']:\n return resp.json(), resp.headers\n else:\n return resp.text, resp.headers\n elif resp.status_code == 204:\n return None, resp.headers\n elif resp.status_code == 400:\n raise BadRequest()\n elif resp.status_code == 404:\n raise ResourceNotFound()\n else:\n error = resp.json()\n logger.debug('############')\n logger.debug('error: ' + error)\n logger.debug('###########')\n raise ServerError(error)\n\n # all methods\n\n def get_ns_list(self, args=None) -> Tuple[BodyList, Headers]:\n _url = '{0}/instances'.format(self._base_path)\n _url = self._build_url_query(_url, args)\n ns_list, resp_headers = self._exec_get(_url, headers=self._headers)\n headers = self._build_headers(resp_headers)\n return ns_list, headers\n\n def create_ns(self, args=None) -> Tuple[Body, Headers]:\n _url = '{0}/create'.format(self._base_path)\n _url = self._build_url_query(_url, args)\n try:\n created_ns, resp_headers = self._exec_post(\n _url, json=args['payload'], headers=self._headers)\n except ResourceNotFound:\n nsd_Id = args['payload']['nsdId']\n raise NsdNotFound(nsd_id=nsd_Id)\n headers = self._build_headers(resp_headers)\n return created_ns, headers\n\n def get_ns(self, nsId: str, args=None, skip_sol=False) -> Tuple[Body, Headers]:\n _url = '{0}/instances/{1}'.format(self._base_path, nsId)\n _url = self._build_url_query(_url, args)\n try:\n ns_instance, resp_headers = self._exec_get(_url, headers=self._headers)\n except ResourceNotFound:\n raise NsNotFound(ns_id=nsId)\n headers = self._build_headers(resp_headers)\n return ns_instance, headers\n\n def delete_ns(self, nsId: str, args: Dict = None) -> Tuple[None, Headers]:\n _url = '{0}/delete/{1}'.format(self._base_path, nsId)\n _url = self._build_url_query(_url, args)\n try:\n empty_body, resp_headers = self._exec_delete(\n _url, params=None, headers={})\n except ResourceNotFound:\n raise NsNotFound(ns_id=nsId)\n headers = self._build_headers(resp_headers)\n return None, headers\n\n def instantiate_ns(self, nsId: str, args=None) -> Tuple[None, Headers]:\n _url = '{0}/instantiate/{1}'.format(self._base_path, nsId)\n _url = self._build_url_query(_url, args)\n instantiate_payload = {}\n try:\n instantiate_payload['SapData'] = args['payload']['SapData']\n except (TypeError, KeyError):\n logger.info('no SapData')\n try:\n empty_body, resp_headers = self._exec_post(\n _url, json=instantiate_payload, headers={})\n except ResourceNotFound:\n raise NsNotFound(ns_id=nsId)\n headers = self._build_headers(resp_headers)\n return None, headers\n\n def terminate_ns(self, nsId: str, args=None) -> Tuple[None, Headers]:\n _url = '{0}/terminate/{1}'.format(self._base_path, nsId)\n _url = self._build_url_query(_url, args)\n req_headers = copy.deepcopy(self._headers)\n try:\n del req_headers[\"Content-Type\"]\n except KeyError:\n pass\n try:\n del req_headers[\"Accept\"]\n except KeyError:\n pass\n try:\n emtpy_body, resp_headers = self._exec_post(_url, headers=req_headers)\n except ResourceNotFound:\n raise NsNotFound(ns_id=nsId)\n headers = self._build_headers(resp_headers)\n return None, headers\n\n def scale_ns(self, nsId: str, args=None) -> Tuple[None, Headers]:\n pass\n\n def get_op_list(self, args: Dict = None) -> Tuple[BodyList, Headers]:\n _url = \"{0}/ns_lcm_op_occs\".format(self._base_path)\n _url = self._build_url_query(_url, args)\n op_list, resp_headers = self._exec_get(_url, headers=self._headers)\n headers = self._build_headers(resp_headers)\n return op_list, headers\n\n def get_op(self, nsLcmOpId, args: Dict = None) -> Tuple[Body, Headers]:\n _url = '{0}/ns_lcm_op_occs/{1}'.format(self._base_path, nsLcmOpId)\n _url = self._build_url_query(_url, args)\n try:\n lcm_op, resp_headers = self._exec_get(_url, headers=self._headers)\n except ResourceNotFound:\n raise NsOpNotFound(ns_op_id=nsLcmOpId)\n headers = self._build_headers(resp_headers)\n return lcm_op, headers\n\n @staticmethod\n def _build_url_query(base, args):\n if args and args['args']:\n url_query = urlencode(args['args'])\n return \"{0}?{1}\".format(base, url_query)\n return base\n\n def _build_headers(self, resp_headers):\n headers = {}\n if 'location' in resp_headers:\n re_res = re.findall(\n r\"/(instances|ns_lcm_op_occs)/([A-Za-z0-9\\-]+)\", resp_headers['location'])\n if len(re_res):\n headers['location'] = '/rano/{0}/ns_lcm_op_occs/{1}'.format(self._ranoId, re_res[0][1])\n return headers\n","sub_path":"adaptation_layer/driver/ever.py","file_name":"ever.py","file_ext":"py","file_size_in_byte":9580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"457340374","text":"import sys\n\ndef ask_for_type():\n decided = False\n while decided == False:\n enc_or_dec = input(\"Do you want to (e)ncode or (d)ecode a word?\\n\")\n enc_or_dec = enc_or_dec.lower()\n\n if enc_or_dec == 'e':\n encode = True\n decided = True\n elif enc_or_dec == 'd':\n encode = False\n decided = True\n else:\n print(\"Invalid input, try again...\")\n return encode\n\ndef ask_for_word():\n word = input(\"Which word shall I process?\\n\")\n return word\n\ndef ask_for_key():\n key = input(\"With which key shall I process the word?\\n\")\n return int(key)\n\ndef encode(word, key, enc):\n if enc == False:\n key = -key\n new_word = []\n for c in word:\n if c.isupper() == True:\n ascii_shift = 65\n else: # lowercase char\n ascii_shift = 97\n number = ord(c) - ascii_shift\n key_transformed = (number + key) % 26\n final_character = chr(key_transformed + ascii_shift)\n new_word.append(final_character)\n return ''.join(new_word)\n\n\n# main\n\nprint(\"Welcome to caesar cypher!\")\nenc = ask_for_type()\nword = ask_for_word()\nkey = ask_for_key()\noutput = encode(word, key, enc)\nprint(output)\n","sub_path":"easy/003/caesarcypher.py","file_name":"caesarcypher.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"204263610","text":"import numpy as np\nimport matplotlib.pylab as plt\nimport decision_tree_library\n\n# Gradient boosting: gather an ensemble of weak learners (trees).\n# Each new learner learns what all previous learners missed\n# to learn: each new learner uses residual (= initial target - best\n# previous prediction) as target.\n\ndef generate_data():\n # Generate 1D dataset.\n x = np.arange(0, 50)\n a, b = 30, 20\n y = a*x + b\n sigma = 100.\n y = np.random.normal(y, sigma, size=y.shape)\n\n # Shape data.\n x = x.reshape(-1, 1)\n y = y.reshape(-1, 1)\n\n return x, y\n\ndef gradient_boosting_training(x_train, y_train):\n # Gradient boosting: training (skeleton algorithm).\n grad_boost = []\n y_pred_cumulated = np.zeros(y_train.shape)\n residual = y_train # Initialize residuals to y_train.\n max_depth, learning_rate, n_estimators = 10, 0.1, 50\n for i in range(n_estimators):\n # Gradient boosting: i-th step.\n train_set = np.concatenate((x_train, residual), axis=1) # Use residual as target.\n if i == 0: # First tree is a leaf (max_depth = 1).\n lr = 1. # Learning rate is always 1. for the first tree (= leaf).\n tree = decision_tree_library.build_tree(train_set, 1, 2, False)\n else:\n lr = learning_rate\n tree = decision_tree_library.build_tree(train_set, max_depth, 2, False)\n grad_boost.append((lr, tree))\n y_pred = np.zeros(y_train.shape)\n for idx, row in enumerate(train_set):\n pred = decision_tree_library.predict_tree(tree, row)\n y_pred[idx] = pred\n y_pred_cumulated = y_pred_cumulated + lr*y_pred\n residual = y_train - y_pred_cumulated # Use residual as target.\n\n # Plotting results: i-th step.\n if i%10 == 0 or i == n_estimators-1:\n _, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 5))\n ax1.scatter(x_train, y_train, c='k', label='data')\n ax1.set_title('Data')\n ax1.set_xlabel('x')\n ax1.set_ylabel('y')\n ax1.legend()\n ax2.scatter(x_train, y_train, c='k', label='data')\n ax2.scatter(x_train, y_pred_cumulated, c='b', label='predict')\n ax2.set_title(f'Prediction (Iteration {i+1})')\n ax2.set_xlabel('x')\n ax2.set_ylabel('y')\n ax2.legend()\n ax3.scatter(x_train, residual, c='r', label='residual')\n ax3.set_title(f'Residuals vs. x (Iteration {i+1})')\n ax3.set_xlabel('x')\n ax3.set_ylabel('Residuals')\n ax3.legend()\n plt.suptitle('Gradient Boosting - Training')\n plt.show()\n\n return grad_boost\n\ndef gradient_boosting_testing(grad_boost, x_test, y_test):\n # Gradient boosting: testing.\n test_set = np.concatenate((x_test, y_test), axis=1)\n y_pred = np.zeros(y_test.shape)\n for idx, row in enumerate(test_set):\n for gb in grad_boost:\n lr, tree = gb\n pred = decision_tree_library.predict_tree(tree, row)\n y_pred[idx] = y_pred[idx] + lr*pred\n\n # Plotting.\n _, ax = plt.subplots(1, 1, figsize=(5, 5))\n ax.scatter(x_test, y_test, c='k', label='data')\n ax.scatter(x_test, y_pred, c='b', label='predict')\n ax.set_title('Prediction')\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.legend()\n plt.suptitle('Gradient Boosting - Testing')\n plt.show()\n\ndef main():\n # Gradient boosting: training.\n x_train, y_train = generate_data()\n grad_boost = gradient_boosting_training(x_train, y_train)\n\n # Gradient boosting: testing.\n x_test, y_test = generate_data()\n gradient_boosting_testing(grad_boost, x_test, y_test)\n\nif __name__ == '__main__':\n main()\n","sub_path":"1.supervised/3.decision_tree/gradient_boosting_regression.py","file_name":"gradient_boosting_regression.py","file_ext":"py","file_size_in_byte":3711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"342655260","text":"import datetime as dt\nimport pandas as pd\nimport ctypes\nimport yfinance as yf\nimport openpyxl\nimport concurrent.futures\n\n\nclass Options:\n def __init__(self, stock_tickers=None, initial_data=None, quote_data=None, rate=None):\n self.stock_tickers = stock_tickers\n self.initial_data = initial_data\n self.quote_data = quote_data\n self.rate = rate\n self.option_value = {}\n for ticker in stock_tickers:\n self.option_value[ticker] = []\n\n def thread_marshaller(self):\n with concurrent.futures.ThreadPoolExecutor(max_workers=len(self.stock_tickers)) as executor:\n for stock in self.stock_tickers:\n executor.submit(self.options, stock)\n\n return self.option_value\n\n def options(self, stock):\n today = dt.date.today()\n url = f\"../ALGO/Daily Stock Analysis/Options/{stock} Options Data {today}.xlsx\"\n\n handle = ctypes.cdll.LoadLibrary(r\"C:\\Users\\fabio\\source\\repos\\CallPricingDll\\CallPricingDll\\x64\\Rel\"\n r\"ease\\CallPricingDll.dll\")\n\n handle.CallPricing.argtypes = [ctypes.c_float, ctypes.c_float, ctypes.c_float, ctypes.c_float,\n ctypes.c_float]\n handle.CallPricing.restype = ctypes.c_double\n handle.PutPricing.argtypes = [ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_double,\n ctypes.c_double]\n handle.PutPricing.restype = ctypes.c_double\n\n wb = openpyxl.Workbook()\n wb.save(url)\n book = openpyxl.load_workbook(url)\n writer = pd.ExcelWriter(url, engine='openpyxl')\n writer.book = book\n writer.sheets = dict((ws.title, ws) for ws in book.worksheets)\n try:\n i = 0\n yfticker = yf.Ticker(stock)\n expiration_dates = yfticker.options\n dividend = self.initial_data[stock][0]\n spot = self.quote_data[stock][0]\n\n for expiry in expiration_dates:\n exp = dt.datetime.strptime(expiry, '%Y-%m-%d').date()\n days_expiration = exp - today\n time_to_expiry = int(days_expiration.days)\n\n bond_yield = float(self.rate[0])\n if 30 <= time_to_expiry <= 60:\n bond_yield = float(self.rate[1])\n elif 60 < time_to_expiry <= 91:\n bond_yield = float(self.rate[2])\n elif 91 < time_to_expiry <= 182:\n bond_yield = float(self.rate[3])\n elif time_to_expiry > 182:\n continue\n\n options_chain = yfticker.option_chain(expiry)\n call_table = options_chain.calls\n put_table = options_chain.puts\n call_table['option_value'] = 0.00\n put_table['option_value'] = 0.00\n\n self.option_value[stock].append({expiry: {'overvalued_call_options': 0, 'undervalued_call_options': 0,\n 'overvalued_put_options': 0, 'undervalued_put_options': 0}})\n # calls_well_priced = 0\n # total_calls = 0\n # puts_well_priced = 0\n # total_puts = 0\n\n bond_yield -= dividend # dividend should be factored in\n bond_yield -= 0.02 # nominal inflation rate\n\n for index, row in call_table.iterrows():\n sigma = row['impliedVolatility']\n if sigma < 0.0001 or row['bid'] < 0.05 or row['volume'] < 10 or row['openInterest'] < 10:\n continue\n\n strike = row['strike']\n option_price = handle.CallPricing(spot, strike, bond_yield, time_to_expiry, sigma)\n\n call_table.at[index, 'option_value'] = option_price\n spread = (row['bid'] + row['ask']) / 2\n call_table.at[index, 'lastPrice'] = spread\n\n # error = ((option_price - spread) / spread)\n # if -0.05 < error < 0.05:\n # calls_well_priced += 1\n # total_calls += 1\n\n if option_price > spread:\n self.option_value[stock][i][expiry]['undervalued_call_options'] += 1\n if option_price < spread:\n self.option_value[stock][i][expiry]['overvalued_call_options'] += 1\n\n for index, row in put_table.iterrows():\n sigma = row['impliedVolatility']\n if sigma == 0.00 or row['bid'] < 0.05 or row['volume'] < 10 or row['openInterest'] < 10:\n continue\n strike = row['strike']\n\n option_price = handle.PutPricing(spot, strike, bond_yield, time_to_expiry, sigma)\n\n put_table.at[index, 'option_value'] = float(option_price)\n spread = (row['bid'] + row['ask']) / 2\n put_table.at[index, 'lastPrice'] = spread\n\n # error = ((option_price - spread) / spread)\n # if -0.05 < error < 0.05:\n # puts_well_priced += 1\n # total_puts += 1\n\n if option_price > spread:\n self.option_value[stock][i][expiry]['undervalued_put_options'] += 1\n if option_price < spread:\n self.option_value[stock][i][expiry]['overvalued_put_options'] += 1\n\n # pct_well_priced = (calls_well_priced / total_calls) * 100\n # pct_well_priced_2 = (puts_well_priced / total_puts) * 100\n # print(f\"{round(pct_well_priced, 2)}% of calls well priced (within 5% of the bid/ask spread) \"\n # f\"for {stock} options expiring {expiry}\")\n # print(f\"{round(pct_well_priced_2, 2)}% of puts well priced (within 5% of the bid/ask spread) \"\n # f\"for {stock} options expiring {expiry}\")\n i += 1\n call_table.to_excel(writer, sheet_name=f'{stock} Calls {expiry}')\n put_table.to_excel(writer, sheet_name=f'{stock} Puts {expiry}')\n except Exception as e:\n print(e)\n finally:\n try:\n sheet = book['Sheet']\n book.remove(sheet)\n except KeyError:\n pass\n book.save(url)\n","sub_path":"Algorithmic Trader Project/module/options_pricing.py","file_name":"options_pricing.py","file_ext":"py","file_size_in_byte":6477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"20151649","text":"from debug_toolbar.panels import DebugPanel\nfrom django.contrib.auth.models import User, Group\n#from django.template.context import get_standard_processors\nfrom django.template.loader import render_to_string\n\ndef get_debug_users():\n \"\"\"\n Returns a list if context switchable users based on the criteria outlined\n in settings\n \"\"\"\n users = User.objects.all()\n\n try:\n from settings import DEBUG_TOOLBAR_CONFIG\n if 'USER_EXCLUDE' in DEBUG_TOOLBAR_CONFIG:\n users = users.exclude(**DEBUG_TOOLBAR_CONFIG['USER_EXCLUDE'])\n if 'USER_INCLUDE' in DEBUG_TOOLBAR_CONFIG:\n users = users.filter(**DEBUG_TOOLBAR_CONFIG['USER_INCLUDE'])\n except:\n pass\n \n return users\n\nclass UserDebugPanel(DebugPanel):\n \"\"\"\n A panel to show info about the current user and allow to switch user\n access.\n \"\"\"\n\n name = 'User'\n has_content = True\n\n def title(self):\n return 'Users'\n\n def url(self):\n return ''\n\n def process_request(self, request):\n self.request = request\n\n def content(self):\n\n\n\n groups = Group.objects.all()\n context = {\n 'groups': groups,\n 'active_user': self.request.user,\n 'all_users': get_debug_users(),\n }\n return render_to_string('debug_toolbar/panels/users.html', context)\n\n def get_custom_permissions(self):\n\n from django.contrib.contenttypes.models import ContentType\n# from django.contrib.auth.models import Permission\n from django.db.models import get_models\n\n permissions = []\n\n for klass in get_models():\n if klass._meta.permissions:\n ctype = ContentType.objects.get_for_model(klass)\n permissions.append((ctype, klass._meta.permissions ))\n\n return permissions\n#from django.conf import settings\nfrom settings import DEBUG\n\nclass UserDebugPanelAuthentication:\n\n \"\"\"\n This authentication module will accept any login so long as the\n settings.DEBUG variable is set to True.\n \"\"\"\n def authenticate(self, user_id=None):\n try:\n from settings import DEBUG_TOOLBAR_PANELS\n except:\n return None\n\n if not DEBUG \\\n or not 'debug_toolbar.panels.user.UserDebugPanel' \\\n in DEBUG_TOOLBAR_PANELS:\n assert False\n return None\n\n return User.objects.get(pk=user_id)\n\n def get_user(self, user_id):\n try:\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None\n\n","sub_path":"debug_toolbar/panels/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"107079045","text":"from sklearn.feature_extraction.text import TfidfVectorizer\nfrom nltk.corpus import cess_esp as cess\nfrom nltk import UnigramTagger as ut\n\nimport pandas as pd\nfrom scipy import spatial\nimport numpy as np\nfrom sklearn.metrics import pairwise_distances\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom scipy.spatial.distance import cosine\nimport nltk\n\ndef getPOSFromResponse(string):\n # li = list(string.split(\" \"))\n string = str(string).replace(\"\", \"\").replace(\"
\", \"\")\n tokens = nltk.word_tokenize(string)\n # str2 = uni_tag.tag(tokens)\n arr = nltk.pos_tag(tokens)\n str2=\"\"\n for(pos,tag) in arr:\n str2 = str2 + tag + \" \"\n newStr = str(str2).replace(\"[\", \"\").replace(\"]\", \"\").replace(\"'\", \"\").replace(\",\", \" \").strip()\n return newStr\n\ndef getData(fpInputYear,fpOutputYear):\n my_csv = pd.read_csv(fpInputYear)\n # filtered = my_csv.Score.str.match(\"I-\",na=False)\n # my_csv3 = my_csv2[my_csv2.Score != \"I-UR\"]\n columnResponse = my_csv.Response\n columnScore = my_csv.Score\n columnTestid = my_csv.Testid\n columnTopic = my_csv.Topic\n columnUserName=my_csv.Username\n columnPOS = my_csv.POS\n listSeparateTopic=my_csv.Topic.unique()\n dictTopicResponse = {}\n dictTopicString = {}\n\n strMF = 'I-MF'\n strMM = 'I-MM'\n strSE = 'I-SE'\n strNE = 'I-NE'\n dictScoreResponse = {strMF: [], strMM: [], strSE: [], strNE: []}\n print(listSeparateTopic)\n print(str(len(listSeparateTopic)))\n for item in listSeparateTopic:\n dictTopicResponse[str(item)]=[]\n\n print(str(len(columnResponse)),'\\t',str(len(columnScore)))\n i=-1\n listIndexInExcel=[]\n for item in columnResponse:\n # print(columnScore[i])\n i=i+1\n strScore = str(columnScore[i])\n strScore.strip()\n\n if not (strScore.startswith('I-') and strScore != 'I-UR'):\n continue\n listIndexInExcel.append(i)\n # columnResponse[i]=columnResponse[i].replace(\"\", \"\").replace(\"
\", \"\")\n # strPOS=getPOSFromResponse(columnResponse[i])\n strResponse = str(columnResponse[i]).replace(\"\", \"\").replace(\"
\", \"\").replace(\"
\", \"\")\n # print(strResponse)\n strTopic=str(columnTopic[i])\n\n # if strScore == strMF:\n # dictScoreResponse[strMF].append(strResponse)\n # # dictScoreResponse['I-MF'].append(' ')\n # # dictScoreResponse['I-MF'].append(strPOS)\n # elif strScore == strMM:\n # dictScoreResponse[strMM].append(strResponse)\n # # dictScoreResponse['I-MM'].append(' ')\n # # dictScoreResponse['I-MM'].append(strPOS)\n # elif strScore == strSE:\n # dictScoreResponse[strSE].append(strResponse)\n # # dictScoreResponse['I-SE'].append(' ')\n # # dictScoreResponse['I-SE'].append(strPOS)\n # elif strScore == strNE:\n # dictScoreResponse[strNE].append(strResponse)\n # # dictScoreResponse['I-NE'].append(' ')\n # # dictScoreResponse['I-NE'].append(strPOS)\n dictTopicResponse[strTopic].append(strResponse)\n\n\n strTotalIMF = ' '.join(dictScoreResponse[strMF])\n strTotalIMM = ' '.join(dictScoreResponse[strMM])\n strTotalISE = ' '.join(dictScoreResponse[strSE])\n strTotalINE = ' '.join(dictScoreResponse[strNE])\n for item in listSeparateTopic:\n strContentEachTopic = ' '.join(dictTopicResponse[item]).strip()\n if strContentEachTopic:\n dictTopicString[item]=strContentEachTopic\n\n numOfLabelTopic=0\n\n # print(strTotalIMF)\n # corpus = [str(strTotalIMF), str(strTotalIMM), str(strTotalISE), str(strTotalINE)]\n corpus=[]\n for item in dictTopicString:\n corpus.append(str(dictTopicString[item]))\n numOfLabelTopic=numOfLabelTopic+1\n\n for i in range(len(columnResponse)):\n strScore = str(columnScore[i])\n strScore.strip()\n if not (strScore.startswith('I-') and strScore != 'I-UR'):\n continue\n strResponse = str(columnResponse[i]).replace(\"\", \"\").replace(\"
\", \"\").replace(\"
\", \"\")\n corpus.append(strResponse)\n\n vectorizer = TfidfVectorizer(ngram_range=(1, 4))\n X = vectorizer.fit_transform(corpus)\n arrFeatureNames = vectorizer.get_feature_names()\n print('names: ' + str(len(arrFeatureNames)) + ' ' + str(arrFeatureNames))\n dictTopicVectors = {}\n indexNumTopic=0\n columnTitleRow = \"no,username,testId,topic,\"\n for item in dictTopicString:\n dictTopicVectors[item] = X[indexNumTopic].todense()\n indexNumTopic=indexNumTopic+1\n columnTitleRow=''.join([columnTitleRow,item,\",\"])\n columnTitleRow = ''.join([columnTitleRow, \"\\n\"])\n csv = open(fpOutputYear, 'w')\n\n\n\n csv.write(columnTitleRow)\n\n print(str(len(corpus)))\n for i in range(numOfLabelTopic, len(corpus)):\n vectori = X[i].todense()\n rowList=\"\"\n for item in dictTopicString:\n distItem = cosine_similarity(vectori, dictTopicVectors[item])[0][0]\n rowList = ''.join([rowList, str(distItem), \",\"])\n\n indexCorpus=listIndexInExcel[i-numOfLabelTopic]\n expectedResult = columnTopic[indexCorpus]\n strUsername=columnUserName[indexCorpus]\n strTestId=str(columnTestid[indexCorpus])\n strTopic=str(columnTopic[indexCorpus])\n # strResponse = str(columnResponse[indexCorpus])\n # strPOS = str(columnPOS[indexCorpus])\n row = str(i - numOfLabelTopic+1)+\",\"+strUsername+\",\"+strTestId+\",\"+strTopic+\",\";\n row = ''.join([row,rowList, \"\\n\"])\n print(str(len(corpus))+\" index \"+str(indexCorpus))\n csv.write(row)\n # if(indexCorpus>50):\n # break\n print(listSeparateTopic)\n print(str(len(listSeparateTopic)))\n # indexItemLabel=0\n # for item in dictTopicString:\n # row = ''.join([row, str(lst[indexItemLabel]), \",\"])\n # indexItemLabel=indexItemLabel+1\n # + ',' + str(strTestId)+ ',' + str(strTopic) + ',' + str(distIMF) + ',' + str(distIMM) + ',' + str(distISE) + ',' + str(\n # distINE) + ',' + str(\n # maxDist) + ',' + str(classResult) + ',' + str(expectedResult) + '\\n'\n\n #\n # if distIMF == maxDist:\n # classResult = strMF\n # elif distIMM == maxDist:\n # classResult = strMM\n # elif distISE == maxDist:\n # classResult = strSE\n # else:\n # classResult = strNE\n\n # print(\n # str(i) + '\\t' + str(distIMF) + '\\t' + str(distIMM) + '\\t' + str(distISE) + '\\t' + str(distINE) + '\\t' + str(\n # maxDist) + '\\t' + str(classResult))\n # lst.append(distItem)\n\n # distIMM = cosine_similarity(vectori, dictTopicVectors[strMM])[0][0]\n # distISE = cosine_similarity(vectori, dictTopicVectors[strSE])[0][0]\n # distINE = cosine_similarity(vectori, dictTopicVectors[strNE])[0][0]\n\n # distIMF = cosine_similarity(vectori, vectori)[0][0]\n # distIMM = cosine_similarity(vectori, vectori)[0][0]\n # distISE = cosine_similarity(vectori, vectori)[0][0]\n # distINE = cosine_similarity(vectori, vectori)[0][0]\n\n # maxDist = max(lst)\n\n # classResult = strNE\n\ndef main():\n fpInputCombine = 'rpf_combine_2018_2019.csv'\n fpOutputCombine = 'vector_combine.csv'\n fpInputNewYear2019 = 'ratingPositionFilter_newData_2019.csv'\n fpOutputNewYear2019 = 'vector_topic_newData_2019.csv'\n getData(fpInputCombine,fpOutputCombine)\n # getData(fpInputNewYear2019, fpOutputNewYear2019)\n\nmain()\n","sub_path":"replicationPackages/code/spring2020/topicClassification/responseTopic.py","file_name":"responseTopic.py","file_ext":"py","file_size_in_byte":7444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"72969740","text":"import requests\r\nfrom you_tango import settings\r\n\r\n\r\ndef run_query(search_terms, size=10):\r\n webhose_api_key = settings.WEBHOSE_API_KEY\r\n root_url = settings.WEBHOSE_ROOT\r\n\r\n try:\r\n res = requests.get(root_url, {\r\n 'token': webhose_api_key,\r\n 'format': 'json',\r\n 'sort': 'relevancy',\r\n 'size': size,\r\n 'q': '{} language:chinese'.format(search_terms),\r\n })\r\n results = res.json()['posts']\r\n\r\n return [{'title': item['title'], 'link': item['url'], 'summary': item['text'][:200]} for item in results]\r\n except Exception as e:\r\n print('run_query error: {}'.format(e))\r\n return []\r\n\r\n\r\nif __name__ == '__main__':\r\n ss = run_query('oppo r11s')\r\n pass\r\n","sub_path":"rango/webhose.py","file_name":"webhose.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"646589971","text":"from data_importers.management.commands import BaseHalaroseCsvImporter\n\n\nclass Command(BaseHalaroseCsvImporter):\n council_id = \"MEN\"\n addresses_name = (\n \"2021-03-22T10:35:38.602035/Mendip polling_station_export-2021-03-20.csv\"\n )\n stations_name = (\n \"2021-03-22T10:35:38.602035/Mendip polling_station_export-2021-03-20.csv\"\n )\n elections = [\"2021-05-06\"]\n csv_delimiter = \",\"\n\n def address_record_to_dict(self, record):\n uprn = record.uprn.strip().lstrip(\"0\")\n\n if uprn in [\n \"250002342\", # 15 UNDERHILL CLOSE, STREET\n \"250002337\", # 191B HIGH STREET, STREET\n \"250045108\", # ORCHARD BYRE, POLSHAM, WELLS\n \"250070118\", # NEW MANOR FARM, POLSHAM, WELLS\n \"250011489\", # HONEYSUCKLE COTTAGE, HAVYATT, GLASTONBURY\n \"250044905\", # SUGAR LOAF BARN, KEWARD, WELLS\n \"250054828\", # THE HUNTERS, TADHILL, LEIGH UPON MENDIP, RADSTOCK\n \"250062887\", # THE ANNEXE WITHAM HALL FARM WITHAM HALL FARM TO BUNNS LANE, WITHAM FRIARY, FROME\n \"250030360\", # LILLEYS CIDER, ROEWOOD FARM ESTATE, BUNNS LANE, WEST WOODLANDS, FROME\n \"250060445\", # RIVERSIDE, BUNNS LANE, WEST WOODLANDS, FROME\n \"250072443\", # FROME MEDICAL CENTRE, ENOS WAY, FROME\n \"250040297\", # LITTLE ORCHARD, RUDGE ROAD, STANDERWICK, FROME\n \"250040299\", # MERRION, RUDGE ROAD, STANDERWICK, FROME\n \"250038119\", # MILL COTTAGE, IRON MILL LANE, OLDFORD, FROME\n \"250043259\", # 5 RED HOUSE HOLIDAY HOMES WHITE POST TO CHARLTON ROAD, STRATTON ON THE FOSSE, SHEPTON MALLET\n \"250040953\", # MOUNT PLEASANT, CHILCOMPTON, RADSTOCK\n ]:\n return None\n\n if record.housepostcode in [\n \"BA5 1RJ\",\n \"BA5 3QR\",\n \"BA6 9DH\",\n \"BA6 8DA\",\n \"BA6 8AP\",\n \"BA4 4BT\",\n \"BA4 4DP\",\n \"BA4 5HB\",\n \"BA3 4DN\",\n \"BA16 0BG\",\n \"BA16 0BD\",\n \"BA16 0JL\",\n \"BA16 0NU\",\n \"BA5 2FF\",\n \"BA11 2ED\",\n \"BA11 2AU\",\n \"BA11 2XG\",\n \"BA11 5EP\",\n \"BA11 2TQ\",\n \"BA11 4AJ\",\n \"BA11 4FJ\",\n \"BA11 5HA\",\n \"BA11 5BT\",\n \"BA3 5QE\",\n \"BA11 4NY\",\n \"BA16 0GJ\",\n \"BA6 8PE\",\n \"BA11 5FE\",\n \"BA5 3DS\",\n ]:\n return None\n\n return super().address_record_to_dict(record)\n","sub_path":"polling_stations/apps/data_importers/management/commands/import_mendip.py","file_name":"import_mendip.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"553273468","text":"import json\nimport pandas as pd\nimport spacy\nfrom tqdm import tqdm\nimport os\nimport pdb\n\n\n\n# filename_train = 'data/cleaned_posts.csv'\n\ndef get_freq(word, vocab):\n if word in vocab.keys():\n return vocab[word]\n return 0\ndef eval():\n\n if not os.path.exists('results/eval_raw/'):\n os.makedirs('results/eval_raw/')\n\n lst_file = os.listdir('data/test')\n cpt_file = 0\n\n f = open(\"data/raw/vocabulary.json\", \"r\", encoding='utf-8')\n vocab_all = json.load(f)\n\n f = open(\"data/raw/vocab0.json\", \"r\", encoding='utf-8')\n vocab_0 = json.load(f)\n\n f = open(\"data/raw/vocab1.json\", \"r\", encoding='utf-8')\n vocab_1 = json.load(f)\n\n f = open(\"data/raw/vocab2.json\", \"r\", encoding='utf-8')\n vocab_2 = json.load(f)\n\n df_eval = pd.DataFrame(columns=['text', 'label'])\n\n total_count = 0\n for key in vocab_all.keys():\n total_count += vocab_all[key]\n print('total word count:', total_count)\n # df_txt = pd.read_csv(filename_train)\n\n cpt = 0\n for filename_test in lst_file:\n desc = 'valid_'+str(cpt)\n cpt += 1\n df_txt = pd.read_csv(os.path.join('data/test/', filename_test), header=None)\n\n df_txt.columns = ['text', 'label']\n print('columns', df_txt.columns)\n\n for cpt_row in tqdm(range(len(df_txt)), desc=desc):\n\n tokens = tokenizer(df_txt['text'][cpt_row])\n label = df_txt['label'][cpt_row]\n word_count = 0\n freq = 0\n pred = [0, 0, 0]\n for token in tokens:\n token = token.text\n if token in vocab_all.keys():\n word_count = vocab_all[token]\n freq = 1 - word_count/total_count\n\n tmp = get_freq(token, vocab_0)\n pred[0] += tmp/word_count * freq\n\n tmp = get_freq(token, vocab_1)\n pred[1] += tmp/word_count * freq\n\n tmp = get_freq(token, vocab_2)\n pred[2] += tmp/word_count * freq\n\n\n prediction = pred.index(max(pred))\n\n # df_eval.loc[cpt_row] = [df_txt['text'][cpt_row], df_txt['label'][cpt_row]]\n df_eval.loc[cpt_row] = [df_txt['text'][cpt_row], prediction]\n\n df_eval.to_csv(os.path.join('results/eval_raw/', 'evaled_' + filename_test), index=False, header=False)\n\n\nif __name__ == '__main__':\n\n nlp = spacy.load('en_core_web_sm')\n tokenizer = nlp.Defaults.create_tokenizer(nlp)\n eval()","sub_path":"validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"273535015","text":"import requests\nfrom bs4 import BeautifulSoup\nimport books_server as bs\nimport books as b\n\n# b.add_column_to_table('books','description')\nbook_ids = bs.get_book_ids('description') #book_ids where there is no description\nprint(book_ids)\nfor book_reference_number in book_ids:\n page = requests.get('https://www.goodreads.com/book/show/' +str(book_reference_number[0]))\n#\n soup = BeautifulSoup(page.content, 'html.parser')\n try:\n book_description = (soup.find(id=\"descriptionContainer\").find(style=\"display:none\").get_text())\n print(book_description)\n b.updateTable(book_reference_number[0],'description', book_description)\n except(Exception, AttributeError) as error:\n print(error)","sub_path":"backend/Scrapers/BookDescriptionScraper.py","file_name":"BookDescriptionScraper.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"219097088","text":"import matplotlib\nimport matplotlib.pyplot as plt\nfrom random import randint\nfrom random import randint, shuffle\nfrom io import BytesIO\nimport base64\nimport requests\nimport json\n\nclass Q_type():\n multichoice = \"multichoice\"\n singlechoice = \"singlechoice\"\n photo = \"photo\"\n shortreply = \"shortreply\"\n\ndef find_x_angel_in_qualdrilateral():\n # setting matplotlib\n font = {'family': 'normal',\n 'weight': 'bold',\n 'size': 16}\n\n matplotlib.rc('font', **font)\n matplotlib.rcParams.update({'text.color': \"white\",\n 'axes.labelcolor': \"blue\"})\n # creating the plot\n x = [1, 0.75, 2.5, 3.5]\n y = [0.5, 1.25, 2, 1]\n fig = plt.figure(figsize=(8, 8))\n plt.axis('equal')\n plt.axis('off')\n\n # generating the question\n question = \"The diagram shows a quadrilateral. Find the value of X (NOT TO SCALE)\"\n a = []\n while not(200 < sum(a) < 340):\n a = [randint(30, 150), randint(30, 150), randint(30, 150)]\n\n answer = 360-sum(a)\n wrongs = set()\n while len(wrongs) < 4:\n a_wrong = randint(30, 150)\n if a_wrong != answer:\n wrongs.add(a_wrong)\n\n options = list(wrongs)+[answer]\n text = \"{q}---{a}\\n\\n\".format(q=question, a=answer)\n for i, o in enumerate(options):\n text += \"{i}) {o}\\n\".format(i=chr(i+65), o=o)\n\n # adding the texts\n plt.fill(x, y)\n plt.text(x[0], y[0]+0.1, a[0])\n plt.text(x[1]+0.1, y[1]-0.1, a[1])\n plt.text(x[2]-0.15, y[2]-0.2, a[2])\n plt.text(x[3]-0.3, y[3]+0.07, 'x')\n\n ax = fig.add_subplot(111)\n ax.text(0.5, 2, text, fontsize=13, color='black')\n\n # this is to convert the graph to the text format to send\n image = BytesIO()\n plt.savefig(image, format='png')\n image.seek(0)\n string_image1 = base64.encodebytes(image.getvalue()).decode()\n\n return {\"photo\": string_image1,\n \"q_type\": Q_type.singlechoice,\n \"hint\": [],\n \"solution\": []}\n\n","sub_path":"findX.py","file_name":"findX.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"354486241","text":"import pprint\nimport re\nimport subprocess\n\nfrom PyQt5.QtCore import Qt # , QAbstractTableModel, QVariant\n# from PyQt5.QtWidgets import QWidget\nfrom PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout # QGridLayout\nfrom PyQt5.QtWidgets import QDialog, QDialogButtonBox\nfrom PyQt5.QtWidgets import QAbstractItemView\nfrom PyQt5.QtWidgets import QPushButton, QLineEdit # QLabel, QMessageBox\n\nimport listofformatswidget\n\n\nclass AddDlg(QDialog):\n \"\"\"AddDlg.\"\"\"\n def __init__(self, model, url, parent=None):\n super(AddDlg, self).__init__(parent)\n self.parent = parent\n self.model = model\n self.formats = []\n\n self.urlEdit = QLineEdit()\n self.btnGetInfo = QPushButton(\"&Info\")\n self.btnGetInfo.clicked.connect(self.btnGetInfoClick)\n\n urlBox = QHBoxLayout()\n urlBox.addWidget(self.urlEdit)\n urlBox.addWidget(self.btnGetInfo)\n\n self.listOfFormatsWidget = listofformatswidget.ListOfFormatsWidget()\n self.listOfFormatsWidget.setSelectionMode(QAbstractItemView.ExtendedSelection)\n self.listOfFormatsWidget.itemClicked.connect(self.listOfFormatsWidget.clicked)\n self.listOfFormatsWidget.currentItemChanged.connect(self.listOfFormatsWidget.itemChanged)\n\n addButton = QPushButton(\"&Add\")\n addButton.setDefault(True)\n\n buttonBox = QDialogButtonBox(QDialogButtonBox.Cancel)\n buttonBox.addButton(addButton, QDialogButtonBox.AcceptRole)\n buttonBox.accepted.connect(self.addButtonClick)\n buttonBox.rejected.connect(self.reject)\n\n grid = QVBoxLayout()\n grid.addLayout(urlBox)\n grid.addWidget(self.listOfFormatsWidget)\n grid.addWidget(buttonBox)\n\n self.setLayout(grid)\n self.setWindowTitle(\"Add movie\")\n self.setWindowModality(Qt.ApplicationModal)\n self.urlEdit.setText(url)\n self.resize(600, 400)\n # self.exec_()\n\n def addButtonClick(self):\n # TODO(me): add getting of real props instead of 'Кухня 88'\n audio = ''\n video = ''\n print(self.urlEdit.text())\n rows = self.listOfFormatsWidget.selectionModel().selectedRows()\n for row in rows:\n # print(row.data(), \"\\n\")\n matchLine = re.match(r'^(\\d+)\\s', row.data(), re.M | re.I)\n if matchLine:\n # print(\" @@@>>> \", matchLine.group(1))\n matchVideo = re.match(r'^(\\d+)\\s.*DASH video', row.data(), re.M | re.I)\n if matchVideo:\n print(\" @@@ Video >>> \", matchVideo.group(1))\n video = matchLine.group(1)\n matchAudio = re.match(r'^(\\d+)\\s.*DASH audio', row.data(), re.M | re.I)\n if matchAudio:\n print(\" @@@ Audio >>> \", matchAudio.group(1))\n audio = matchLine.group(1)\n\n pp = pprint.PrettyPrinter(indent=4, width=1024)\n # pp.pprint(self.formats)\n \n for f in self.formats:\n print(f)\n\n self.model.addrow(['Кухня 88', video, audio, '0%', self.urlEdit.text(), self.formats, ])\n self.model.layoutChanged.emit()\n\n self.accept()\n\n def btnGetInfoClick(self):\n # Load movie's info\n # https://www.youtube.com/watch?v=G6bSu02Fmxo - Кухня\n # https://www.youtube.com/watch?v=1IEfCoGnTow - 100 Years of Flight Attendant Uniforms\n # default format: -o '%(title)s-%(id)s.%(ext)s'\n # youtube-dl -j --flat-playlist\n\n videoUrl = self.urlEdit.text()\n process_output = subprocess.check_output([\"youtube-dl\", \"-F\", videoUrl], universal_newlines=True)\n # print(process_output + \"\\n\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\\n\")\n lines = process_output.split(\"\\n\")\n for line in lines:\n matchLine = re.match(r'^(\\d+)\\s', line, re.M | re.I)\n if matchLine:\n # print (matchLine.group(1), \" -> \", line)\n self.listOfFormatsWidget.addItem(matchLine.group(1) + \" -> \" + line)\n self.formats.append(line)\n\n fw = 2 * self.listOfFormatsWidget.frameWidth()\n self.listOfFormatsWidget.setFixedSize(self.listOfFormatsWidget.sizeHintForColumn(0) + fw,\n self.listOfFormatsWidget.sizeHintForRow(0) * self.listOfFormatsWidget.count() + fw)\n # QMessageBox.information(self, \"btnGetInfoClick\", \"You clicked: \")\n","sub_path":"AddDlg.py","file_name":"AddDlg.py","file_ext":"py","file_size_in_byte":4422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"232004226","text":"\"\"\"Module for entity related stuff\"\"\"\n#Python imports\nfrom time import time\n\n#Game imports\nfrom vectors import Vector\nfrom utilities import class_name\nfrom graphics import COLOR_WHITE\nfrom utilities import class_name, encode_word, decode_word, encode_long, decode_long, encode_string, decode_string, get_logger\n\nclass EntityManager(object):\n \"\"\"Stores all entities and controls the pool, also propagates the update and draw functions\"\"\"\n def __init__(self, world):\n self.__log = get_logger(\"entity_manager\")\n self.__world = world\n\n def start(self):\n \"\"\"Initializes the entity storages and uids\"\"\"\n self.__entities = {} #All entities existing, non deleted and deleted, categorized by their uid\n self.__reusable = {} #Entities that are with Delete flag, each class name has a list of reusable entities, same as __entities organization\n self.__uid = 0 #Global uid for entities\n self.__entity_classes = {}\n for entity_cls in ENTITY_CLASSES:\n name = class_name(entity_cls)\n self.__entity_classes[name] = entity_cls\n\n def close(self):\n \"\"\"Called when the entity manager is closed\"\"\"\n self.__entities = None\n self.__reusable = None\n self.__uid = None\n self.__entity_classes = None\n\n def __get_uid(self):\n \"\"\"Generates a unique id for a entity\"\"\"\n uid = self.__uid\n self.__uid = uid + 1\n return uid\n\n def use_entity(self, entity_class, force_uid = None):\n \"\"\"Creates or reused a entity of same class\"\"\"\n name = class_name(entity_class)\n entity = None\n reused = False #True if the entity is reused\n if name in self.__reusable.keys():\n reusable = self.__reusable[name] #Get all entities that are reusable with the same classname\n if len(reusable) != 0: #Get an reusable entity from the list\n entity = reusable.pop()\n reused = True\n else: #If there is no an list for the classname, make one\n self.__reusable[name] = []\n\n if entity == None: #There was no luck getting a reusable entity, lets create a new one\n uid = self.__get_uid() if force_uid == None else force_uid\n entity = entity_class(uid, self.__world)\n\n if reused:\n text = \"Reused\"\n else:\n text = \"Created\"\n self.__log.debug(\"%s entity '%s' in '%s' with uid '%i'\" % (text, name, hex(id(entity)), entity.get_uid()))\n\n self.__entities[entity.get_uid()] = entity\n\n return entity, reused\n\n def deserialize_entity(self, uid, data):\n \"\"\"deserializes a entity\"\"\"\n index = 0\n name, index = decode_string(data, index)\n data = data[index:] #Remove the UID and class name from data\n if uid in self.__entities.keys(): #UID exists already\n entity = self.__entities[uid]\n else:\n cls = self.__entity_classes[name]\n entity = self.use_entity(cls, uid)[0]\n\n if uid != entity.get_uid():\n raise ValueError(\"Entity %s has uid %s but serialized data was for %s\" % (entity, entity.get_uid(), uid))\n if name != class_name(entity):\n raise ValueError(\"Entity %s has cls %s but serialized data was for %s\" % (entity, class_name(entity), name))\n\n def serialize_entity(self, uid):\n \"\"\"serializes a entity\"\"\"\n entity = self.__entities[uid]\n #Start with the class name\n data = encode_string(class_name(entity))\n #Sum the entity serialization data\n data = data + entity.serialize()\n return data\n\n def update(self):\n \"\"\"Calls update for all active entities and moves to self.__reusable if they are with delete flag\"\"\"\n for entity in self.__entities.values():\n name = class_name(entity)\n if entity not in self.__reusable[name]: #Check if is not in reusable dict\n if entity.delete: #Deleted and not in reusable dict? append it\n self.__reusable[name].append(entity)\n else:\n entity.update()\n\n def draw(self):\n \"\"\"Calls draw for all active entities\"\"\"\n for entity in self.__entities.values():\n if not entity.delete: #Render if is not deleted\n entity.draw()\n\n def get_entities(self):\n \"\"\"Get entity list\"\"\"\n return self.__entities\n\n def get_entity(self, uid):\n \"\"\"Returns the asociated entity instance by uid\"\"\"\n return self.__entities[uid]\n\nclass Entity(object): # pylint: disable=R0902\n \"\"\"The entity class which all entities in the world are subclassed, the only authorized to say \"I'm your father\" quote to everybody\"\"\"\n def __init__(self, uid, world):\n self.__uid = uid\n self.__world = world\n self.__load()\n self.init()\n\n def __load(self):\n \"\"\"Loads data when entity is created, doesn't run when entity is reused, ideal for image loading for example\"\"\"\n pass\n\n def init(self):\n \"\"\"Values that are used for managing the entity in update(), also they are reset for reusing the object instance\"\"\"\n self.health = 100 #Health of entity\n self.max_health = 100 #Maximum health level\n self.nodamage = False #Makes the entity indestructible (sets the health to maximum every update and ignores health < 0)\n self.team = None #Team of the entity belongs\n self.delete = False #Deletion flag, when its True, means that it can be reused, also with this true, update and draw is not called\n\n #Private or immutable values,\n self.__pos = Vector(0, 0) #The actual position of the unit\n self.__prev_pos = Vector(0, 0) #The previous position before a net update, used for interpolation\n self.__heading = 0 #Heading of the unit\n self.__prev_heading = 0 #Previous heading of unit, used for interpolation\n\n #Values that are not send during serialization\n self.__image = None #Contains Surface of actual frame\n self.__net_contact = time() #The last network update, used for interpolation\n\n def get_uid(self):\n \"\"\"Returns the entity's unique id\"\"\"\n return self.__uid\n\n @property\n def pos(self):\n \"\"\"Get the position\"\"\"\n return self.__pos\n\n @pos.setter\n def pos(self, pos, update_prev = True):\n \"\"\"Set the position\"\"\"\n if update_prev:\n self.__prev_pos = pos\n self.__pos = Vector(pos)\n\n @property\n def heading(self):\n \"\"\"Get the heading\"\"\"\n return self.__heading\n\n @heading.setter\n def heading(self, heading, update_prev = True):\n \"\"\"Set the heading\"\"\"\n if update_prev:\n self.__prev_heading = heading\n self.__heading = heading\n\n def update(self):\n \"\"\"Called when the entity is updated\"\"\"\n self.health_update()\n\n def health_update(self):\n \"\"\"Checks health\"\"\"\n if self.nodamage:\n self.health = self.max_health\n elif (self.health <= 0):\n self.delete = True\n\n def draw(self):\n \"\"\"Called when the entity needs to be drawed\"\"\"\n graphics = self.__world.graphics\n graphics.draw_circle(COLOR_WHITE, self.__pos.round(), 5)\n\n def serialize(self):\n \"\"\"Called when needs to serialize this entity\"\"\"\n #Add the positions and heading\n x, y = self.pos.round()\n data = encode_word(x)\n data = data + encode_word(y)\n data = data + encode_word(self.heading)\n return data\n\n def deserialize(self, data):\n \"\"\"Called when needs to deserialize information on this entity\"\"\"\n #Get the x y coordinates and heading in first 3 words bytes\n index = 0\n self.x = decode_word(data[index:index+2])\n index = index + 2\n self.y = decode_word(data[index:index+2])\n index = index + 2\n self.heading = decode_word(data[index:index+2])\n\n def net_update(self):\n \"\"\"Update the network contact time\"\"\"\n self.__net_contact = time()\n\nclass Unit(Entity):\n \"\"\"A class for generic tank\"\"\"\n def __load(self):\n pass\n\nclass Tank(Unit):\n \"\"\"A class for generic tank\"\"\"\n def __load(self):\n pass\n\nENTITY_CLASSES = (\n Entity,\n Unit,\n Tank,\n)\n\nif __name__ == '__main__': from main import init_main; init_main()\n","sub_path":"src/entities.py","file_name":"entities.py","file_ext":"py","file_size_in_byte":8493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"37939944","text":"from rest_framework import serializers\n\nfrom .models import Author\n\n\nclass AuthorSerializer(serializers.ModelSerializer):\n picture = serializers.SerializerMethodField(source=\"picture\", method_name=\"get_picture\")\n class Meta:\n model = Author\n fields = [\"id\", \"name\", \"picture\"]\n read_only_fields = [\"id\", \"picture\"]\n\n def get_picture(self, obj):\n if not obj.picture:\n return None\n\n return self.context[\"request\"].build_absolute_uri(obj.picture.url)\n\n\nclass PictureAuthorSerializer(serializers.ModelSerializer):\n class Meta:\n model = Author\n fields = [\"picture\"]\n","sub_path":"app/author/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"453220338","text":"from JChaves.Tools.CMSTools import *\nimport JChaves.Tools.Samples as Samples\nimport os,glob,sys\nimport FWCore.ParameterSet.Config as cms\n\nttbar = 'ttbar' in sys.argv\ndyjets = 'dyjets' in sys.argv\nsubmit = 'submit' in sys.argv\n\nprocess = cms.Process(\"ANA\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 1000\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\n\nprocess.source = cms.Source(\"PoolSource\",\n fileNames=cms.untracked.vstring('root://cmsxrootd-site.fnal.gov//store/mc/Summer12/WRToNuLeptonToLLJJ_MW-3500_MNu-1750_TuneZ2star_8TeV-pythia6-tauola/GEN-SIM/START50_V13-v2/0000/04317A88-1FA2-E111-8D7A-485B39897242.root'),\n )\n# This only works interactively\nmass = '_'\nif not ttbar and not dyjets:\n for m in sys.argv:\n if '00' in m:\n mass = '_'+m\ndset = ''\nfor s in Samples.signal_samples:\n if mass in s.name:\n dset = s.dataset\n \n#process.source.fileNames = file_list(dset+'step4_PAT*.root',True)\n#process.source.fileNames = file_list('/DYJetsToLL_M-50_13TeV-madgraph-pythia8/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM',False)\n#process.source.fileNames = file_list('/eos/uscms/store/user/jchaves/Nstep_EE_2000_reco/EXO-Phys14DR-00009_*.root',True)\nprocess.source.fileNames = file_list('/eos/uscms/store/user/jchaves/Nstep_MUMU_2000_reco/EXO-Phys14DR-00009_*.root',True)\n#process.source.fileNames = file_list('/eos/uscms/store/user/jchaves/Nstep_MUMU_3000_reco/EXO-Phys14DR-00009_*.root',True)\n#process.source.fileNames = file_list('/eos/uscms/store/user/jchaves/Nstep_MUMU_4000_reco/EXO-Phys14DR-00009_*.root',True)\n#process.source.fileNames = file_list('/eos/uscms/store/user/jchaves/Nstep_MUMU_5000_reco/EXO-Phys14DR-00009_*.root',True)\n#process.source.fileNames = file_list('/eos/uscms/store/user/jchaves/Nstep_MUMU_6000_reco/EXO-Phys14DR-00009_*.root',True)\noutfile = 'resolution_EXO_MUMU.root'\n\nif ttbar:\n process.source.fileNames = file_list(Samples.ttbar_samples[0].dataset,False)\n outfile = 'resolution_ttbar.root'\n process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(10000) )\n\nif dyjets:\n process.source.fileNames = file_list(Samples.dyjets_samples[0].dataset,False)\n outfile = 'resolution_dyjets.root'\n process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(10000) )\n\nprocess.TFileService = cms.Service('TFileService', fileName = cms.string(outfile))\n\nprocess.load('JChaves.WR_Analyzer.resolution_cfi')\n\nprocess.p = cms.Path(process.ana)\n\n\n###################################################################################################\n###################################################################################################\n# Use my CRABSubmitter for now:\n# crab2_submit(name,nevents,name modifiers)\n# name = nstep -> MakeSamples\n# name = ttbar\n# name = dyjets\nif __name__ == '__main__' and hasattr(sys, 'argv') and submit:\n from JChaves.Tools.CRABSubmitter import *\n \n if ttbar:\n #crab2_submit('ttbar',-1,'all')\n crab3_submit('ttbar',-1,'all')\n if dyjets:\n #for x in ['HT-100To200','HT-200To400','HT-400To600','HT-600ToInf']:\n for x in ['M-200To400','M-400To800','M-800To1400','M-1400To2300','M-3500To4500','M-4500To6000','M-6000To7500','M-7500To8500',]:\n #crab2_submit('dyjets',-1,x)\n crab3_submit('dyjets',-1,x)\n","sub_path":"WR_Analyzer/test/resolution.py","file_name":"resolution.py","file_ext":"py","file_size_in_byte":3432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"407218396","text":"from rest_framework import generics, views, status\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.response import Response\n\n\nfrom .models import User\nfrom .permissions import AdminPermission\nfrom .serializers import (\n UserSerializer,\n RestrictedUserSerializer,\n UserRegisterSerializer,\n UserConfirmSerializer,\n)\n\n\nclass UserList(generics.ListCreateAPIView):\n\n serializer_class = UserSerializer\n permission_classes = [AdminPermission, ]\n queryset = User.objects.all()\n\n\nclass UserDetail(generics.RetrieveUpdateDestroyAPIView):\n\n serializer_class = UserSerializer\n permission_classes = [AdminPermission, ]\n queryset = User.objects.all()\n\n\nclass RestrictedUserDetail(generics.RetrieveUpdateAPIView):\n\n serializer_class = RestrictedUserSerializer\n\n def get_object(self):\n return self.request.user\n\n\nclass UserRegisterView(views.APIView):\n\n permission_classes = [AllowAny,]\n serializer_class = UserRegisterSerializer\n\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n if not serializer.is_valid():\n return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)\n serializer.save()\n\n return Response(serializer.data, status.HTTP_200_OK)\n\n\nclass UserConfirmView(views.APIView):\n\n permission_classes = [AllowAny,]\n serializer_class = UserConfirmSerializer\n\n def put(self, request):\n serializer = self.serializer_class(data=request.data)\n if not serializer.is_valid():\n return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)\n email = serializer.validated_data.get('email')\n token = serializer.validated_data.get('app_id')\n user = User.objects.filter(email=email, app_id=token).first()\n if not user:\n body = {'deteils': 'INCORRECT_TOKEN'}\n return Response(body, status.HTTP_400_BAD_REQUEST)\n user.is_confirmed = True\n user.save()\n return Response(serializer.data, status.HTTP_200_OK)\n","sub_path":"test_app/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"593709688","text":"# -*- coding: utf8 -*-\nfrom alascrapy.spiders.base_spiders.ala_spider import AlaSpider\n\nTEST_SCALE = 100\n\n# Note that OriginalCategoryName is hardcoded to electronik.\n\n\nclass TestberichteBlogger_deSpider(AlaSpider):\n name = 'testberichte-blogger_de'\n allowed_domains = ['testberichte-blogger.de']\n start_urls = ['http://www.testberichte-blogger.de/elektronik']\n\n def parse(self, response):\n sub_category_urls = self.extract_list(response.xpath(\n \"//div[@class='randrund']/p/a/@href|//div[\"\n \"@class='content']/p/a/@href\"))\n\n if sub_category_urls:\n for sub_category_url in sub_category_urls:\n yield response.follow(sub_category_url,\n callback=self.parse_category)\n\n def parse_category(self, response):\n date_xpath = self.extract(response.xpath(\n \"substring-after(//div/h1, '(')\"))\n date = date_xpath.split(')')\n date = date[0]\n category_item_xpath = \"//td/a/@href\"\n category_item_urls = self.extract_list(response.xpath(\n category_item_xpath))\n\n for category_item_url in category_item_urls:\n yield response.follow(category_item_url,\n callback=self.parse_review_product,\n meta={'date': date})\n\n def get_product_name(self, response):\n name_xpath = self.extract(response.xpath(\"//h1/text()\"))\n name = name_xpath.split(u'–')\n if name[0]:\n productname = name[0].replace('Test', '')\n else:\n productname = name_xpath.replace('Test', '')\n\n return productname\n\n def parse_review_product(self, response):\n product_xpaths = {\n 'PicURL': '(//div/a/img)[1]/@src',\n 'source_internal_id': \"substring-after(//link[@rel='shortlink']\"\n \"/@href, '=')\"\n }\n\n product = self.init_item_by_xpaths(response, 'product', product_xpaths)\n\n product[\"ProductName\"] = self.get_product_name(response)\n\n product[\"OriginalCategoryName\"] = \"elektronik\"\n\n yield product\n\n review_xpaths = {\n 'TestTitle': '//h1/text()',\n 'source_internal_id': \"substring-after(//link[@rel='shortlink']\"\n \"/@href, '=')\",\n \"SourceTestRating\": \"substring-before(\"\n \"//td/strong/strong/text(),'%')\",\n 'TestSummary': '//p/i/text()',\n 'TestPros': \"//div/span[contains(text(),'+')]/text()|\"\n \"//div/span/font/font[contains(text(),'+')]/text()\",\n 'TestCons': \"//div/span[contains(text(),' - ')]/text()|\"\n \"//div/span/font/font[contains(text(),' - ')]/text()|\"\n u\"//div/span[contains(text(),'– ')]/text()\"\n }\n\n review = self.init_item_by_xpaths(response, 'review', review_xpaths)\n\n date = response.meta['date']\n\n if date:\n review[\"TestDateText\"] = date\n\n if review[\"SourceTestRating\"]:\n review[\"SourceTestScale\"] = TEST_SCALE\n\n if review[\"TestCons\"]:\n review[\"TestCons\"] = review[\"TestCons\"].replace('-', ''\n ).replace(u'–', '')\n\n if review[\"TestPros\"]:\n review[\"TestPros\"] = review[\"TestPros\"].replace('+', '')\n\n review[\"ProductName\"] = self.get_product_name(response)\n\n review['DBaseCategoryName'] = 'PRO'\n\n yield review\n","sub_path":"alascrapy/spiders/testberichte-blogger_de.py","file_name":"testberichte-blogger_de.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"18902621","text":"from bs4 import BeautifulSoup\r\nfrom imutils import paths\r\nimport os\r\n\r\n# initialize the base path for the logos dataset\r\nBASE_PATH = \"./\"\r\n\r\nannot_path = \"SIMS_VOC_Annotations2/all_annot_xml/\"\r\n\r\n# build the path to the annotations and input images\r\ntrain_images_path = \"SIMS_Dataset/train_images/\"\r\nval_images_path = \"SIMS_Dataset/validation_images/\"\r\ntest_images_path = \"SIMS_Dataset/test_images/\"\r\n\r\n# build the path to the output training and test .csv files\r\ntrain_csv = os.path.sep.join([BASE_PATH, 'train.csv'])\r\nval_csv = os.path.sep.join([BASE_PATH, 'val.csv'])\r\ntest_csv = os.path.sep.join([BASE_PATH, 'test.csv'])\r\n\r\n# build the path to the output classes CSV files\r\nclasses_csv = os.path.sep.join([BASE_PATH, 'classes.csv'])\r\n\r\n# build the path to the output predictions dir\r\noutput_dir = os.path.sep.join([BASE_PATH, 'predictions'])\r\n\r\ntrain_image_paths = list(paths.list_files(train_images_path))\r\nval_image_paths = list(paths.list_files(val_images_path))\r\ntest_image_paths = list(paths.list_files(test_images_path))\r\n# print(val_image_paths)\r\n\r\n# create the list of datasets to build\r\ndataset = [ (\"train\", train_image_paths, train_csv),\r\n (\"val\", val_image_paths, val_csv),\r\n (\"test\", test_image_paths, test_csv)]\r\n# initialize the set of classes we have\r\nCLASSES = set()\r\n\r\nfor (dType, imagePaths, outputCSV) in dataset:\r\n # load the contents\r\n print(\"[INFO] creating '{}' set...\".format(dType))\r\n print(\"[INFO] {} total images in '{}' set\".format(len(imagePaths), dType))\r\n\r\n # open the output CSV file\r\n csv = open(outputCSV, \"w\")\r\n\r\n # loop over the image paths\r\n for imagePath in imagePaths:\r\n # build the corresponding annotation path\r\n fname = imagePath.split(os.path.sep)[-1]\r\n fname = \"{}.xml\".format(fname[:fname.rfind(\".\")])\r\n annotPath = os.path.join(annot_path, os.path.basename(fname))\r\n\r\n # load the contents of the annotation file and buid the soup\r\n contents = open(annotPath).read()\r\n soup = BeautifulSoup(contents, \"html.parser\")\r\n\r\n # extract the image dimensions\r\n w = int(soup.find(\"width\").string)\r\n h = int(soup.find(\"height\").string)\r\n\r\n for o in soup.find_all(\"object\"):\r\n #extract the label and bounding box coordinates\r\n label = o.find(\"name\").string\r\n xMin = int(float(o.find(\"xmin\").string))\r\n yMin = int(float(o.find(\"ymin\").string))\r\n xMax = int(float(o.find(\"xmax\").string))\r\n yMax = int(float(o.find(\"ymax\").string))\r\n\r\n # truncate any bounding box coordinates that fall outside\r\n # the boundaries of the image\r\n xMin = max(0, xMin)\r\n yMin = max(0, yMin)\r\n xMax = min(w, xMax)\r\n yMax = min(h, yMax)\r\n\r\n # ignore the bounding boxes where the minimum values are larger\r\n # than the maximum values and vice-versa due to annotation errors\r\n if xMin >= xMax or yMin >= yMax:\r\n continue\r\n elif xMax <= xMin or yMax <= yMin:\r\n continue\r\n\r\n # write the image path, bb coordinates, label to the output CSV\r\n row = [os.path.abspath(imagePath),str(xMin), str(yMin), str(xMax),\r\n str(yMax), str(label)]\r\n csv.write(\"{}\\n\".format(\",\".join(row)))\r\n\r\n # update the set of unique class labels\r\n CLASSES.add(label)\r\n\r\n # close the CSV file\r\n csv.close()\r\n\r\n print(\"[INFO] writing classes...\")\r\n csv = open(classes_csv, \"w\")\r\n rows = [\",\".join([c, str(i)]) for (i, c) in enumerate(CLASSES)]\r\n csv.write(\"\\n\".join(rows))\r\n csv.close()","sub_path":"Faster RCNN/voc_2_csv.py","file_name":"voc_2_csv.py","file_ext":"py","file_size_in_byte":3687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"169647869","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport argparse\nimport numpy as np\nimport spacy_udpipe\nimport pandas as pd\nimport copy\n\nfrom MWEPreProcessor import MWEPreProcessor\nfrom WordEmbedding import set_fastText_word_embeddings\nfrom Operations import load_pickle, get_logger, dump_pickle\nfrom MWEIdentifier import MWEIdentifier\n\nclass ERMIEvaluate:\n def __init__(self, root_path):\n self.lang = 'TR'\n self.tag = 'gappy-crossy'\n self.embedding_type = 'headend'\n self.root_path = root_path\n self.input_path = self.root_path\n self.output_path = os.path.join(self.root_path, 'output', self.lang)\n gensim_name = \"gensim_\" + self.lang.lower()\n self.gensim_we_path = os.path.join(self.root_path, 'TR_model/Embeddings', gensim_name)\n\n self.mwe_write_path = os.path.join(self.root_path, \"output\")\n\n if not os.path.exists(self.mwe_write_path):\n os.makedirs(self.mwe_write_path)\n\n self.mwe_train_path = os.path.join(self.root_path, 'TR_model', 'train.pkl')\n\n self.logger = get_logger(os.path.join(self.root_path, 'TR_model'))\n\n self.params = { 'TR': {'n_units': 20, 'dropout': [0.1, 0.1], 'batch_size': 32, 'epochs': 20},\n }\n\n self.mwe = load_pickle(self.mwe_train_path)\n \n self.mwe_identifier = MWEIdentifier(self.lang, self.embedding_type, self.mwe, self.logger, self.mwe_write_path)\n self.mwe_identifier.set_params(self.params[self.lang])\n self.mwe_identifier.set_train()\n self.mwe_identifier.build_model()\n \n def evaluate(self, sentence):\n \n nlp = spacy_udpipe.load_from_path(lang=\"tr\",\n path=\"./turkish-imst-ud-2.4-190531.udpipe\",\n meta={\"description\": \"Custom 'tr' model\"})\n text = sentence\n\n doc = nlp(text)\n udpiped_sentence = [(token.i + 1, token.text, token.lemma_, token.pos_, \"_\", \"_\", str(token.head), token.dep_.lower(), \"_\", \"_\", \"_\") for token in doc]\n self.mwe.test_sentences = [udpiped_sentence]\n new_corpus = pd.DataFrame(udpiped_sentence, columns=['ID', 'FORM', 'LEMMA', 'UPOS', 'XPOS', 'FEATS', 'HEAD', 'DEPREL',\n 'DEPS', 'MISC', 'PARSEME:MWE'])\n new_corpus['BIO'] = copy.deepcopy(new_corpus['PARSEME:MWE'])\n new_corpus[new_corpus['BIO'].isnull()] = 'space'\n new_corpus['BIO'] = copy.deepcopy(new_corpus['BIO'].apply(lambda x: x.strip()))\n space_row = {'ID':'space', 'FORM':'space', \"LEMMA\":'space', \"UPOS\":'space', \"XPOS\":'space', \"FEATS\":'space', \"HEAD\":'space', \"DEPREL\":'space', \"DEPS\":'space', \"MISC\":'space', \"PARSEME:MWE\":'space', \"BIO\":'space'}\n test_corpus = new_corpus.append(space_row, ignore_index=True)\n self.mwe._test_corpus = test_corpus\n \n self.mwe_identifier.mwe = self.mwe\n self.mwe_identifier.set_test()\n reload_path = os.path.join(self.root_path, 'TR_model', 'teacher-weights-last.hdf5')\n lines = self.mwe_identifier.predict_test_custom_model(reload_path)\n \n return lines\n","sub_path":"ERMIEvaluate.py","file_name":"ERMIEvaluate.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"585729984","text":"# Copyright (2015-2017) Hewlett Packard Enterprise Development LP\n# Copyright (2015-2017) Universidade Federal de Campina Grande\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport retrying\n\nfrom oslo_log import log as logging\n\nfrom ironic_oneviewd.conf import CONF\nfrom ironic_oneviewd.node_manager.manage import NodeManager\n\nLOG = logging.getLogger(__name__)\n\n\ndef do_manage_ironic_nodes():\n \"\"\"Show a list of OneView servers to be created as nodes in Ironic.\"\"\"\n node_manager = NodeManager()\n retry_interval_in_ms = CONF.DEFAULT.retry_interval * 1000\n\n @retrying.retry(wait_fixed=retry_interval_in_ms)\n def execute():\n try:\n node_manager.pull_ironic_nodes()\n except Exception as ex:\n LOG.error(ex)\n raise Exception(\"Continue trying...\")\n execute()\n","sub_path":"ironic-oneviewd/ironic_oneviewd/node_manager/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"558701672","text":"import logging\nimport requests\nimport json\nfrom datetime import date\n\nclass EstadoPingAPI:\n def __init__(self, env, catalog):\n self._state = 'INITIAL'\n self._status = 200\n self._text = ''\n self._env = env\n if (env == 'pro'):\n self._gw = 'apicp-gateway-vf.internal.vodafone.com'\n else:\n self._gw = 'apict-gateway-vf.internal.vodafone.com'\n self._catalog = catalog\n\n def ping(self):\n resul = ''\n try:\n response = requests.get(\"https://\" + self._gw + \"/vodafone-spain/\"\n + self._catalog + \"/ping/ping\", verify=False)\n if (response.status_code != self._status):\n resul = '[' + self._env + '][' + self._catalog + '][' + self._state + '] Status changed from ' + str(self._status) + ' to ' + str(response.status_code)\n self._status = response.status_code\n self._text = ''\n else:\n self._status = response.status_code\n if (response.status_code == 200):\n txt = json.dumps(response.json())\n state = self._state\n self.evalFecha(response.json()[\"fecha\"])\n if (state != self._state or self._text != txt):\n self._text = txt\n resul = '[' + self._env + '][' + self._catalog + '][' + self._state + '] ' + txt\n except:\n logging.exception('Exception calling API')\n resul: 'Error de conexión'\n finally:\n return resul\n \n def evalFecha(self, fecha):\n if (len(fecha) and fecha[6:8] == date.today().strftime('%d')):\n self._state = 'OK'\n else:\n self._state = 'KO'\n","sub_path":"apiConnect/EstadoPingAPI.py","file_name":"EstadoPingAPI.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"186109733","text":"import sys\n\nfrom generators import Generator\nfrom plugins import load_plugins\n\n\ndef find_parser(url, args):\n plugins = load_plugins()\n for plugin in plugins:\n if plugin.can_handle(url):\n return plugin.get_parser(url, args)\n raise Exception(\"No plugin for URL: %s\" % url)\n\n\ndef parse_args():\n args_len = len(sys.argv)\n if args_len > 1:\n url = sys.argv[1]\n else:\n raise Exception(\"First argument: URL\")\n args = args_len > 2 and sys.argv[2:]\n return url, args\n\n\nif __name__ == '__main__':\n url, args = parse_args()\n parser = find_parser(url, args)\n generator = Generator(parser)\n generator.write_xml(sys.stdout)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"491709676","text":"import hashlib\nimport itertools\nimport os\nimport re\nfrom pathlib import Path\n\nimport frontmatter\nimport genanki\nimport markdown\nimport typer\nfrom bs4 import BeautifulSoup, Comment\nfrom bs4.element import Tag\nfrom genanki.deck import Deck\nfrom genanki.model import Model\n\nfrom markdown_anki_decks.sync import sync_deck, sync_model\nfrom markdown_anki_decks.utils import print_success\n\napp = typer.Typer()\n\n\ndef version_callback(value: bool):\n from . import __version__\n\n if value:\n typer.echo(f\"Markdown Anki Decks: {__version__}\")\n raise typer.Exit()\n\n\n@app.command(\"convert\")\ndef convertMarkdown(\n input_dir: Path = typer.Argument(\n ...,\n help=\"The input directory. Contains markdown files which will be converted to anki decks.\",\n ),\n output_dir: Path = typer.Argument(\n ..., help=\"The output directory. Anki .apkg files will be written here.\"\n ),\n sync: bool = typer.Option(\n False,\n \"--sync\",\n help=\"Whether or not to synchronize the output with anki using anki connect.\",\n ),\n deck_title_prefix: str = typer.Option(\n \"\",\n \"--prefix\",\n help=\"Can be used to make your markdown decks part of a single subdeck. Anki uses `::` to indicate sub decks. `markdown-decks::` could be used to make all generated decks part of a single root deck `markdown-decks`\",\n ),\n delete_cards: bool = typer.Option(\n False,\n \"--delete\",\n help=\"Whether to delete cards from anki during sync. If sync is false this has no effect.\",\n ),\n cloze: bool = typer.Option(\n False,\n \"--cloze\",\n help=\"Whether to support cloze syntax\",\n ),\n version: bool = typer.Option(\n False, \"--version\", callback=version_callback, help=\"Show version information\"\n ),\n):\n\n # iterate over the source directory\n for root, _, files in os.walk(input_dir):\n for file in files:\n if is_markdown_file(file):\n deck = parse_markdown(\n os.path.join(root, file), deck_title_prefix, cloze\n )\n package = genanki.Package(deck)\n # add all image files to the package\n package.media_files = image_files(input_dir)\n path_to_pkg_file = os.path.join(output_dir, f\"{Path(file).stem}.apkg\")\n package.write_to_file(path_to_pkg_file)\n print_success(f\"Created apkg for deck {deck.name}\")\n if sync:\n sync_deck(deck, Path(path_to_pkg_file), delete_cards)\n for model in deck.models.values():\n sync_model(model)\n\n\nANKI_CLOZE_REGEXP = re.compile(r\"{{c\\d+::[\\s\\S]+?}}\")\n\n\ndef has_clozes(text):\n \"\"\"Checks whether text actually has cloze deletions.\"\"\"\n return bool(ANKI_CLOZE_REGEXP.search(text))\n\n\n# check if a tag is a question\ndef is_question_tag(tag: Tag):\n return tag.name == \"h2\" or (isinstance(tag, Tag) and tag.has_attr(\"data-question\"))\n\n\ndef parse_markdown(\n file: str, deck_title_prefix: str, generate_cloze_model: bool\n) -> Deck:\n metadata, markdown_string = frontmatter.parse(read_file(file))\n html = markdown.markdown(\n markdown_string,\n extensions=[\"fenced_code\", \"sane_lists\", \"tables\", \"codehilite\", \"md_in_html\"],\n )\n\n soup = BeautifulSoup(html, \"html.parser\")\n\n # strip all comments from the html\n comments = soup.findAll(text=lambda text: isinstance(text, Comment))\n for comment in comments:\n comment.extract()\n\n # get the deck title\n deck_title = Path(file).stem\n h1 = soup.h1\n if h1 is not None and h1.text:\n deck_title = h1.text\n deck_title = deck_title_prefix + deck_title\n\n # model for an anki deck\n model = genanki.Model(\n model_id=integer_hash(f\"{deck_title} model\"),\n name=f\"{deck_title} model\",\n fields=[{\"name\": \"Question\"}, {\"name\": \"Answer\"}, {\"name\": \"Guid\"}],\n templates=[\n {\n \"name\": \"Card 1\",\n \"qfmt\": '',\n \"afmt\": '',\n },\n ],\n css=read_css(file, metadata),\n model_type=Model.FRONT_BACK,\n )\n\n # model for an anki deck\n cloze_model = genanki.Model(\n model_id=integer_hash(f\"{deck_title} cloze model\"),\n name=f\"{deck_title} cloze model\",\n fields=[{\"name\": \"Question\"}, {\"name\": \"Answer\"}, {\"name\": \"Guid\"}],\n templates=[\n {\n \"name\": \"Card 1\",\n \"qfmt\": '',\n \"afmt\": '{{cloze:Question}}
{{Answer}}
',\n },\n ],\n css=read_css(file, metadata),\n model_type=Model.CLOZE,\n )\n\n # create the deck\n deck_id = integer_hash(deck_title)\n deck = genanki.Deck(deck_id=deck_id, name=deck_title)\n\n # add model to deeck\n deck.add_model(model)\n if generate_cloze_model:\n deck.add_model(cloze_model)\n\n # get the notes\n note_headers = soup.find_all(is_question_tag, recursive=False)\n for header in note_headers:\n # the question is the header\n question = header\n\n # the contents are everything until the next header\n contents = list(\n itertools.takewhile(\n lambda el: not is_question_tag(el), header.next_siblings\n )\n )\n\n # wrap the contents in a section tag. the section is the answer.\n answer = soup.new_tag(\"section\")\n if len(contents) > 0:\n contents[0].wrap(answer)\n for content in contents[1:]:\n answer.append(content)\n\n # create the note using the simple model\n note = FrontIdentifierNote(\n deck_id,\n model=(\n cloze_model\n if generate_cloze_model\n and has_clozes(soup_to_plaintext_string(question))\n else model\n ),\n fields=[soup_to_html_string(question), soup_to_html_string(answer)],\n )\n deck.add_note(note)\n\n return deck\n\n\n# genanki Note which has a unique id based on the deck and the question\n# also has a field for the guid so the guid can be accessed in queries\nclass FrontIdentifierNote(genanki.Note):\n def __init__(self, deck_id, model=None, fields=None, sort_field=None, tags=None):\n guid = genanki.guid_for(fields[0], deck_id)\n if fields is not None:\n fields.append(guid)\n super().__init__(\n model=model, fields=fields, sort_field=sort_field, tags=tags, guid=guid\n )\n\n\n# convert beautiful soup object to a string\ndef soup_to_html_string(soup):\n return soup.prettify(formatter=\"html5\")\n\n\ndef soup_to_plaintext_string(soup):\n return soup.get_text()\n\n\n# convert a file to a string\ndef read_file(file):\n with open(file, \"r\", encoding=\"utf-8\") as f:\n markdown_string = f.read()\n return markdown_string\n\n\n# check if a file is a markdown file\ndef is_markdown_file(file):\n # TODO(lukemurray): parameterize markdown extensions?\n return file.endswith(\".md\")\n\n\n# convert a string into a random integer from 0 to 1<<31 exclusive\n# used to create model and deck ids\n# from https://stackoverflow.com/a/42089311/11499360\ndef integer_hash(s: str):\n return int(hashlib.sha256(s.encode(\"utf-8\")).hexdigest(), 16) % (1 << 31)\n\n\n# get all the image files in a directory\ndef image_files(source: Path):\n return list(\n str(p)\n for p in itertools.chain(\n source.rglob(\"*.jpg\"),\n source.rglob(\"*.jpeg\"),\n source.rglob(\"*.png\"),\n source.rglob(\"*.gif\"),\n )\n )\n\n\ndef read_css(file: str, metadata: dict) -> str:\n # merge the css files\n markdown_css = Path(__file__).parent / \"./styles/markdown.css\"\n pygments_css = Path(__file__).parent / \"./styles/pygments.css\"\n pygments_dark_css = Path(__file__).parent / \"./styles/pygments-dark.css\"\n custom_css_contents = []\n if \"css\" in metadata:\n custom_css_paths = metadata[\"css\"]\n if not isinstance(custom_css_paths, list):\n custom_css_paths = [custom_css_paths]\n for custom_css_path in custom_css_paths:\n custom_css_contents.append(\n (Path(file).parent / custom_css_path).read_text(\"utf-8\")\n )\n\n custom_css = \"\\n\".join(custom_css_contents)\n\n return f'{markdown_css.read_text(\"utf-8\")}\\n{pygments_css.read_text(\"utf-8\")}\\n{pygments_dark_css.read_text(\"utf-8\")}\\n{custom_css}'\n\n\ndef main():\n app()\n","sub_path":"markdown_anki_decks/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":8762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"563817072","text":"import fnmatch\nimport os\n\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import QMainWindow, QAction, QFileDialog\nfrom PyQt5.QtCore import pyqtSlot\n\nfrom models.tasksModel import TasksModel\nfrom models.studentsModel import StudentsModel\n\nfrom views.mainWindow_ui import Ui_MainWindow\n\nclass MainView(QMainWindow):\n def __init__(self, model, main_controller):\n super().__init__()\n\n self._model = model\n self._main_controller = main_controller\n self._ui = Ui_MainWindow()\n self._ui.setupUi(self)\n self.initUI()\n self.con()\n\n\n def initUI(self):\n self.statusBar().showMessage('Ready')\n\n openFile = QAction(QIcon('open.png'), 'Открыть', self)\n openFile.setShortcut('Ctrl+O')\n openFile.setStatusTip('Открыть директорию')\n openFile.triggered.connect(self.showDialog)\n\n menubar = self.menuBar()\n fileMenu = menubar.addMenu('Файл')\n fileMenu.addAction(openFile)\n\n def con(self):\n self._ui.themeComboBox.currentIndexChanged.connect(self.fun)\n\n def showDialog(self):\n dir = str(QFileDialog.getExistingDirectory(self, 'Выберите папкy', '/home/valerie/University/Diploma/Math-packages-course-automatization/mp2017-collect'))\n if dir:\n self._model.setDirectory(dir)\n self.statusBar().showMessage(dir)\n listOfTasks = sorted(getListOfFiles(dir, \"*.m\"))\n i = 0\n while(i < len(listOfTasks)):\n self._ui.themeComboBox.addItem(listOfTasks[i].split(dir)[1].split('/')[1])\n i = i + 1\n\n self.tasksModel = TasksModel(listOfTasks, dir)\n self._ui.tasksTableView.setModel(self.tasksModel)\n self._ui.tasksTableView.horizontalHeader().hide()\n self._ui.tasksTableView.horizontalHeader().setStretchLastSection(True)\n\n problem = '/octave-1/'\n self.showStudents(dir, problem)\n\n def showStudents(self, dir, problem):\n dirName = dir + problem\n listOfStudentsDir = getListOfFiles(dirName, \"*.m\")\n\n studentsList = list()\n for i in range(len(listOfStudentsDir)):\n files = os.listdir(listOfStudentsDir[i])\n for entry in files:\n if fnmatch.fnmatch(entry, '*.txt'):\n studentsList.insert(i, entry.split(\"STUDENT - \")[1].split(\".txt\")[0])\n print(studentsList)\n\n self.studentsModel = StudentsModel(studentsList)\n self._ui.studentsTableView.setModel(self.studentsModel)\n self._ui.studentsTableView.horizontalHeader().hide()\n self._ui.studentsTableView.horizontalHeader().setStretchLastSection(True)\n\n def fun(self, i):\n print(i)\n\n #@pyqtSlot(str)\n #def on_directory_changed(self, value):\n # print(\"smth\")\n # self._ui.programPlainTextEdit.setPlainText(value)\n\n'''\n # connect widgets to controller\n self._ui.spinBox_amount.valueChanged.connect(self._main_controller.change_amount)\n self._ui.pushButton_reset.clicked.connect(lambda: self._main_controller.change_amount(0))\n\n # listen for model event signals\n self._model.amount_changed.connect(self.on_amount_changed)\n self._model.even_odd_changed.connect(self.on_even_odd_changed)\n self._model.enable_reset_changed.connect(self.on_enable_reset_changed)\n\n # set a default value\n self._main_controller.change_amount(42)\n\n @pyqtSlot(int)\n def on_amount_changed(self, value):\n self._ui.spinBox_amount.setValue(value)\n\n @pyqtSlot(str)\n def on_even_odd_changed(self, value):\n self._ui.label_even_odd.setText(value)\n\n @pyqtSlot(bool)\n def on_enable_reset_changed(self, value):\n self._ui.pushButton_reset.setEnabled(value)\n'''\n\ndef getListOfFiles(dirName, pattern):\n # create a list of file and sub directories\n # names in the given directory\n listOfFile = os.listdir(dirName)\n allFiles = list()\n\n # Iterate over all the entries\n for entry in listOfFile:\n # Create full path\n fullPath = os.path.join(dirName, entry)\n # If entry is a directory then get the list of files in this directory\n if os.path.isdir(fullPath) & fnmatch.fnmatch(entry, pattern):\n allFiles = allFiles + getListOfFiles(fullPath)\n else:\n allFiles.append(fullPath)\n return allFiles\n","sub_path":"views/main_view.py","file_name":"main_view.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"485950884","text":"import subprocess\nimport re\n\nfrom goopy_sched import GoopyTask\n\n\nclass WlStat(GoopyTask):\n\n def __init__(self, model, ifname, interval=5, delay=0):\n self._ifname = ifname\n self._model = model\n self._sp = re.compile('Signal level=([0-9]+)/100')\n\n self._update_model(False, 0)\n\n super().__init__(interval=interval, delay=delay)\n\n def fire(self):\n\n up = False\n signal = 0\n\n try:\n out = subprocess.check_output(['/sbin/iwconfig', self._ifname],\n universal_newlines=True,\n stderr=subprocess.DEVNULL)\n\n up = True\n for line in out.splitlines():\n if line.find('unassociated') > -1:\n up = False\n break\n else:\n match = self._sp.search(line)\n if match:\n signal = match.group(1)\n\n except subprocess.CalledProcessError:\n pass\n\n self._update_model(up, signal)\n\n def _update_model(self, up, signal):\n\n self._model.wl_up = up\n self._model.wl_signal = signal\n\n\nclass ConnStat(GoopyTask):\n\n def __init__(self, model, hostname, interval=30, delay=0):\n self._model = model\n self._hostname = hostname\n self._update_model(False)\n\n super().__init__(interval=interval, delay=delay)\n\n def fire(self):\n\n ok = False\n code = subprocess.call(['/bin/ping', '-q', '-c1', self._hostname],\n stderr=subprocess.DEVNULL,\n stdout=subprocess.DEVNULL)\n if code == 0:\n ok = True\n\n self._update_model(ok)\n\n def _get_interval(self):\n return super()._get_interval() if self._model.conn_ok else 5\n\n def _update_model(self, ok):\n\n self._model.conn_ok = ok\n","sub_path":"goopy_net.py","file_name":"goopy_net.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"515614461","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ### Remarks / TODO:\n# -there are a few food items in MISC. TRANS\n# - add the nutrients present under different names and ids, such as: carbohydrates or carbohydrates by difference\n\nimport re\nimport os\nimport nltk\nimport time\nimport pickle\nimport sys\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nsys.path.insert(1, './utilities/')\nfrom health_functions import *\n\n# # Import Data\ndef import_data(path1, path2):\n dfList = {}\n for r, d, f in os.walk(path1):\n for file in f:\n if '.csv' in file:\n print(file)\n dfList[file] = pd.read_csv(os.path.join(r, file))\n\n products_df = dfList['product.csv']\n transaction_data_df = dfList['transaction_data.csv']\n\n dfList = {}\n for r, d, f in os.walk(path2):\n for file in f:\n if '.csv' in file:\n # print(file)\n dfList[file] = pd.read_csv(os.path.join(r, file))\n\n # link the nutrient id with its name\n nutrient_df = dfList['nutrient.csv']\n # contains the food articles name and their id test commit\n food_df = dfList['food.csv']\n # contains the nutrients for each food article\n food_nutrients_df = dfList['food_nutrient.csv']\n # linke the food articles ids to their category\n food_category_df = dfList['food_category.csv']\n return products_df,transaction_data_df, nutrient_df, food_df, food_nutrients_df, food_category_df\n\n# ### Most sold items\ndef compute_most_sold_items(products_df,transaction_data_df,selected_categories):\n #select all the items sold at least 1000 times\n sales_qte_df = transaction_data_df[['PRODUCT_ID','QUANTITY']] .groupby(['PRODUCT_ID']).sum().sort_values(by=['QUANTITY'],ascending=False)\n sales_qte_df = sales_qte_df[sales_qte_df['QUANTITY'] > 1000]\n sales_qte_df.head(5)\n\n #select only the categories which are food related and sort them\n products_sales_df = products_df.loc[(products_df['DEPARTMENT'].isin(selected_categories))].join(sales_qte_df, on='PRODUCT_ID', how='inner')\n products_sales_df.sort_values(by='QUANTITY',ascending=False,inplace=True)\n\n #we put all the description in a ingredients column\n products_sales_df['ingredients'] = products_sales_df.COMMODITY_DESC + \" \" + products_sales_df.SUB_COMMODITY_DESC\n products_sales_df.drop([\"DEPARTMENT\",\"BRAND\",\"COMMODITY_DESC\",\"SUB_COMMODITY_DESC\"],axis = 1, inplace = True)\n products_sales_df.ingredients = products_sales_df.ingredients.apply(parse_words)\n\n return products_sales_df\n\ndef clean_dfs(food_nutrients_df,nutrient_df,food_category_df,food_df,list_relevant_nutrients):\n #drop unnecessary columns and rename to be more understandable\n food_nutrients_df.drop([\"data_points\",\"min\",\"max\",\"median\",\"footnote\",\"min_year_acquired\",\"derivation_id\"],axis=1,inplace=True)\n nutrient_df.drop([\"nutrient_nbr\",\"rank\"],axis=1,inplace=True)\n food_category_df.drop([\"code\"],axis=1,inplace=True)\n food_df.drop([\"publication_date\"],axis=1,inplace=True)\n\n food_category_df.rename(columns={'id':'food_category_id','description':'category'},inplace= True)\n #filter out only the necessary food nutrients\n nutrient_df = nutrient_df[nutrient_df.name.isin(list_relevant_nutrients)]\n\n #simplyfy and normalize the nutrient names\n simplified_names = nutrient_df.name.apply(trim_nutrient_name)\n nutrient_df.loc[:,\"name\"] = simplified_names\n # add the names of the nutrients contained in the food\n\n return food_nutrients_df, nutrient_df, food_category_df, food_df\n\n\ndef complete_food_dfs(food_nutrients_df, food_df):\n food_nutrients_df = food_nutrients_df.join(nutrient_df.set_index('id'), on='nutrient_id', how='inner')\n\n #takes a long time to run\n #food_nutrients_df.amount = food_nutrients_df[[\"amount\",\"unit_name\"]].apply(get_amount, axis=1)\n #food_nutrients_df.drop(\"unit_name\",axis=1,inplace=True))\n\n #energy is duplicated because we have both kcal and kj, we take only kcal\n food_nutrients_df = food_nutrients_df.pivot_table(index='fdc_id', columns='name', values='amount',aggfunc='first')\n food_nutrients_df.fillna(value=0, inplace=True)\n\n #add categories to the food df\n food_df = food_df.join(food_category_df.set_index(\"food_category_id\"),on=\"food_category_id\",how=\"left\")\n food_df.drop([\"food_category_id\"],axis=1,inplace=True)\n food_df.description = food_df.description.apply(parse_words)\n\n return food_nutrients_df, food_df\n\nif __name__ == \"__main__\":\n DUNNHUMBY_PATH = '../data/dunnhumby - The Complete Journey CSV/'\n HEALTH_PATH = '../data/health'\n products_df, transaction_data_df, nutrient_df, food_df, food_nutrients_df, food_category_df = import_data(DUNNHUMBY_PATH,HEALTH_PATH)\n\n food_related_categories = np.array(\n ['NUTRITION', 'GROCERY', 'PASTRY', 'MEAT-PCKGD', 'SEAFOOD-PCKGD', 'PRODUCE', 'DELI', 'MEAT', 'SALAD BAR',\n 'GRO BAKERY', 'FROZEN GROCERY', 'SPIRITS', 'RESTAURANT'])\n list_relevant_nutrients = [\"Protein\", \"Total Carbohydrate\", \"Total lipid (fat)\", \"Sucrose\", \"Glucose (dextrose)\",\n \"Sugars, total including NLEA\", \"Fatty acids, total monounsaturated\",\n \"Fatty acids, total polyunsaturated\", \"Fatty acids, total trans\",\n \"Fatty acids, total saturated\", \"Cholesterol\", \"Vitamin E, added\",\n \"Vitamin K (phylloquinone)\", \"Vitamin B-12\", \"Vitamin B-6\", \"Vitamin D\",\n \"Vitamin A, RAE\", \"Sodium, Na\", \"Total fat (NLEA)\", \"Fiber, total dietary\", \"Energy\",\n \"Carbohydrate, by summation\", \"Fructose\"]\n\n products_sales_df = compute_most_sold_items(products_df, transaction_data_df, food_related_categories)\n food_nutrients_df, nutrient_df, food_category_df, food_df = clean_dfs(food_nutrients_df, nutrient_df, food_category_df, food_df, list_relevant_nutrients)\n food_nutrients_df, food_df = complete_food_dfs(food_nutrients_df, food_df)\n\n all_information_df = food_df.join(food_nutrients_df, on='fdc_id', how='inner')\n all_information_df.drop([\"data_type\", \"description\", \"category\"], axis=1, inplace=True)\n\n #Compute word importance for algo\n # all words present in the nutrition dataset\n all_words_nutrition = get_allwords(food_df.description)\n # all words present in the product dataset\n all_words_supermarket = get_allwords(products_sales_df.ingredients)\n\n # #### Inner merge between the 2 sets of words:\n common_words = pd.merge(all_words_supermarket, all_words_nutrition, left_on='name', right_on='name',\n suffixes=('_supermarket', '_nutrition'))\n DIC_SCORE = construct_dic_score(common_words)\n\n #matching the two datasets\n #there is an error in the code, so for now we only using the top 10 items\n temp_df = products_sales_df.head(10).copy()\n find_food1 = lambda list_words: find_food(list_words, food_df, DIC_SCORE)\n temp_df[\"ref_fdc_id\"] = temp_df.ingredients.apply(find_food1).fdc_id\n\n #create our final df with the nutrient information of the supermarket items\n all_df = temp_df.merge(all_information_df, how=\"left\", left_on=\"ref_fdc_id\", right_on=\"fdc_id\")\n all_df.drop([\"MANUFACTURER\", \"ref_fdc_id\", \"fdc_id\"], axis=1, inplace=True)\n all_df.set_index(\"PRODUCT_ID\", inplace=True)\n\n # saves results of this lengthy computation\n all_df.to_pickle(\"../data/results/products_with_link_to_nutrients_df.pickle\")\n\n print(\"done\")\n","sub_path":"src/Health/.ipynb_checkpoints/Health2-checkpoint.py","file_name":"Health2-checkpoint.py","file_ext":"py","file_size_in_byte":7501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"615188238","text":"# Реализовать функцию my_func(), которая принимает три позиционных аргумента,\n# и возвращает сумму наибольших двух аргументов.\n\n\ndef my_func(first_number, second_number, third_number):\n \"\"\"\n функция считает сумму наибольших аргументов\n :param first_number: первое число\n :param second_number: второе число\n :param third_number: третье число\n :return: ничего не возвращает и сразу выводит на экран\n # переводим в список чтобы воспользоваться функцией max\n # списох чтобы сохранить наибольшие числа, чтобы потом использовать функцию sum\n # ищем 1ое максимальное число\n # записываем в список для подсчета суммы\n # удаляем из списка аргументов 1е максимально число чтобы найти 2е максимальное\n # добавляем в список для подсчета суммы и потом выводим сумму чисел\n # находим второе максимальное число\n \"\"\"\n my_list = [first_number, second_number, third_number]\n my_sum = []\n max_number = max(my_list)\n my_sum.append(max_number)\n my_list.remove(max_number)\n max_number = max(my_list)\n my_sum.append(max_number)\n print(sum(my_sum))\n\n\nmy_func(-1, -2, 4)\n","sub_path":"HW_3/HW_3.3.py","file_name":"HW_3.3.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"48101626","text":"#!/usr/bin/python\n#\n# Copyright 2018, International Business Machines Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n############################################################################\n\"\"\"AIX NIM viosupgrade: tool to upgrade VIOSes in NIM environment\"\"\"\n\nimport os\nimport re\nimport subprocess\nimport threading\nimport logging\nimport time\nimport distutils.util\n\n# Ansible module 'boilerplate'\nfrom ansible.module_utils.basic import AnsibleModule\n\n\nDOCUMENTATION = \"\"\"\n---\nmodule: nim_upgradeios\nauthors: Vianney Robin, Alain Poncet, Pascal Oliva\nshort_description: Perform upgrade operation on a list of targets\nusing viosupgrade (perl) tool\n\"\"\"\n\n\n# -----------------------------------------------------------------------------\ndef exec_cmd(cmd, module, exit_on_error=False, debug_data=True, shell=False):\n\n \"\"\"\n Execute the given command\n\n Note: If executed in thread, fail_json does not exit the parent\n\n args:\n - cmd array of the command parameters\n - module the module variable\n - exit_on_error use fail_json if true and cmd return !0\n - debug_data prints some trace in DEBUG_DATA if set\n - shell execute cmd through the shell if set (vulnerable to shell\n injection when cmd is from user inputs). If cmd is a string\n string, the string specifies the command to execute through\n the shell. If cmd is a list, the first item specifies the\n command, and other items are arguments to the shell itself.\n return\n - ret return code of the command\n - output output and stderr of the command\n - errout command stderr\n \"\"\"\n\n global DEBUG_DATA\n global CHANGED\n global OUTPUT\n\n ret = 0\n output = ''\n errout = ''\n\n th_id = threading.current_thread().ident\n stderr_file = '/tmp/ansible_upgradeios_cmd_stderr_{}'.format(th_id)\n\n logging.debug('command:{}'.format(cmd))\n if debug_data is True:\n DEBUG_DATA.append('exec_cmd:{}'.format(cmd))\n try:\n myfile = open(stderr_file, 'w')\n output = subprocess.check_output(cmd, stderr=myfile, shell=shell)\n myfile.close()\n s = re.search(r'rc=([-\\d]+)$', output)\n if s:\n ret = int(s.group(1))\n output = re.sub(r'rc=[-\\d]+\\n$', '', output) # remove the rc of c_rsh with echo $?\n\n except subprocess.CalledProcessError as exc:\n myfile.close()\n errout = re.sub(r'rc=[-\\d]+\\n$', '', exc.output) # remove the rc of c_rsh with echo $?\n ret = exc.returncode\n\n except OSError as exc:\n myfile.close()\n errout = re.sub(r'rc=[-\\d]+\\n$', '', exc.args[1]) # remove the rc of c_rsh with echo $?\n ret = exc.args[0]\n\n except IOError as exc:\n # generic exception\n myfile.close()\n msg = 'Command: {} Exception: {}'.format(cmd, exc)\n ret = 1\n module.fail_json(changed=CHANGED, msg=msg, output=OUTPUT)\n\n # check for error message\n if os.path.getsize(stderr_file) > 0:\n myfile = open(stderr_file, 'r')\n errout += ''.join(myfile)\n myfile.close()\n os.remove(stderr_file)\n\n if debug_data is True:\n DEBUG_DATA.append('exec_cmd rc:{}, output:{} errout:{}'\n .format(ret, output, errout))\n logging.debug('retrun rc:{}, output:{} errout:{}'\n .format(ret, output, errout))\n\n if ret != 0 and exit_on_error is True:\n msg = 'Command: {} RetCode:{} ... stdout:{} stderr:{}'\\\n .format(cmd, ret, output, errout)\n module.fail_json(changed=CHANGED, msg=msg, output=OUTPUT)\n\n return (ret, output, errout)\n\n\n# ----------------------------------------------------------------\ndef get_ios_mksysb(module):\n\n \"\"\"\n Get all resources of type ios_mksysb and the associated\n spot resources and ioslevel\n defined on the nim master.\n Arguments:\n module: {}\n Return: info_hash = {}\n info_hash[ios_mksysb_name]['spot'] = (String)spot_name\n info_hash[ios_mksysb_name]['ioslevel'] = (String)ioslevel\n \"\"\"\n global CHANGED\n global OUTPUT\n info_hash = {}\n cmd = 'LC_ALL=C lsnim -t ios_mksysb -l'\n (ret, std_out, std_err) = exec_cmd(cmd, module, shell=True)\n if ret != 0:\n msg = 'Cannot list NIM ios_mksysb objects: {}'.format(std_err)\n logging.error(msg)\n module.fail_json(changed=CHANGED, msg=msg, meta=OUTPUT)\n # mksysb_name name and associated spot\n ios_mksysb_name = \"\"\n for line in std_out.split('\\n'):\n line = line.strip()\n match_key = re.match(r\"^(\\S+):\", line)\n if match_key:\n ios_mksysb_name = match_key.group(1)\n info_hash[ios_mksysb_name] = {}\n continue\n\n match_key = re.match(r\"^ioslevel\\s+=\\s+(.*)$\", line)\n if match_key:\n ioslevel = match_key.group(1)\n info_hash[ios_mksysb_name]['ioslevel'] = ioslevel\n continue\n match_key = re.match(r\"^extracted_spot\\s+=\\s+(.*)$\", line)\n if match_key:\n spot = match_key.group(1)\n info_hash[ios_mksysb_name]['spot'] = spot\n continue\n\n logging.debug('ios_mksysb={}'.format(info_hash))\n return info_hash\n\n\n# ----------------------------------------------------------------\ndef get_nim_user_res(module):\n\n \"\"\"\n Get the list of resources of type resolv_conf, script,\n fb_script, file_res, image_data, and log\n defined on the nim master.\n Arguments:\n module: {}\n\n Return: Dictionary of reources key=name valu=type\n type=resolv_conf|script|fb_script|file_res|image_data|and log\n \"\"\"\n global CHANGED\n global OUTPUT\n std_out = ''\n nim_user_res = {}\n\n cmd = 'LC_ALL=C lsnim -t resolv_conf; lsnim -t script; lsnim -t fb_script; '\\\n 'lsnim -t file_res; lsnim -t image_data; lsnim -t log'\n (ret, std_out, std_err) = exec_cmd(cmd, module, shell=True)\n if ret != 0:\n msg = 'Cannot list NIM resource: {}'.format(std_err)\n logging.error(msg)\n module.fail_json(changed=CHANGED, msg=msg, meta=OUTPUT)\n\n for line in std_out.split('\\n'):\n line = line.strip()\n match_key = re.match(r\"^(\\S+)\\s+\\S+\\s+(\\S+)$\", line)\n if match_key:\n nim_user_res[match_key.group(1)] = match_key.group(2)\n continue\n\n return nim_user_res\n\n\n# ----------------------------------------------------------------\ndef get_nim_clients_info(module):\n \"\"\"\n Get the list of vios defined on the nim master, and get their\n associated cstate and hostname.\n Arguments:\n module: {}\n Return: info_hash = {}\n info_hash[vios_name]['cstate'] = (String) vios cstate\n info_hash[vios_name]['host_name'] = (String) hostname to access vios\n \"\"\"\n global CHANGED\n global OUTPUT\n std_out = ''\n info_hash = {}\n\n cmd = 'LC_ALL=C lsnim -t vios -l'\n (ret, std_out, std_err) = exec_cmd(cmd, module, shell=True)\n if ret != 0:\n msg = 'Cannot list NIM vios objects: {}'.format(std_err)\n logging.error(msg)\n module.fail_json(changed=CHANGED, msg=msg, meta=OUTPUT)\n\n vios_name = \"\"\n for line in std_out.split('\\n'):\n line = line.strip()\n match_key = re.match(r\"^(\\S+):\", line)\n if match_key:\n vios_name = match_key.group(1)\n info_hash[vios_name] = {}\n continue\n\n match_cstate = re.match(r\"^Cstate\\s+=\\s+(.*)$\", line)\n if match_cstate:\n cstate = match_cstate.group(1)\n info_hash[vios_name]['cstate'] = cstate\n continue\n\n # Get VIOS interface info in case we need c_rsh\n match_if = re.match(r\"^if1\\s+=\\s+\\S+\\s+(\\S+)\\s+.*$\", line)\n if match_if:\n info_hash[vios_name]['vios_ip'] = match_if.group(1)\n continue\n\n return info_hash\n\n\n# ----------------------------------------------------------------\ndef get_cluster_status(module, vios):\n\n \"\"\"\n get the status of the vios node in the cluster of vios\n Arguments:\n module: {}\n vios: {}\n Return: integer 0 or 1\n \"\"\"\n\n rc = 1\n if not vios[\"cluster_id\"]:\n return 0\n # get cluster status\n cmd = ['/usr/lpp/bos.sysmgt/nim/methods/c_rsh', vios[\"host_name\"],\n 'LC_ALL=C /usr/ios/cli/ioscli cluster -status -fmt :']\n # 'LC_ALL=C /usr/ios/cli/ioscli cluster -status -field '\\\n # 'node_name node_state pool_state node_upgrade_status -verbose\"']\n (ret, std_out, std_err) = exec_cmd(cmd, module)\n # parse std_out\n for line in std_out.split('\\n'):\n line = line.strip()\n match_key = re.match(r\"^(\\S+):(\\S+):(\\S+):(\\S+):(\\d+):(\\S+):(.*)\", line)\n if match_key:\n if match_key.group(6) == \"DOWN\" or match_key.group(7) == \"DOWN\":\n return 1\n else:\n rc = 0\n return rc\n\n\n# ----------------------------------------------------------------\ndef get_viosupgrade_status(module, vios):\n\n \"\"\"\n Run lsnim command to get the vios status during upgrade process\n set vios[\"status\"] = DONE | RUNNING | ERROR\n Arguments:\n module: {}\n vios: {}\n Return: String status = SUCCESS-UPGRADE | RUNNING | FAILURE-UPGRADE\n \"\"\"\n global ERROR\n global RUNNING\n global DONE\n status = RUNNING\n std_out = \"\"\n cmd = 'LC_ALL=C /usr/sbin/lsnim -l {}'.format(vios[\"name\"])\n (ret, std_out, std_err) = exec_cmd(cmd, module, shell=True)\n if ret != 0:\n msg = 'Viosupgrade error on vios: {} :{}'\\\n .format(vios[\"name\"], std_err)\n logging.error(msg)\n status = ERROR\n vios[\"status\"] = ERROR\n else:\n # parse std_out\n # wait for strings:\n # Cstate = ready for a NIM operation\n # Mstate = currently running\n # Cstate_result = success\n # info = m..e..s..s..a..g..e..\n # err_info = m..e..s..s..a..g..e..\n Mstate = \"\"\n Cstate = \"\"\n Cstate_result = \"\"\n info = \"\"\n for line in std_out.split('\\n'):\n line = line.strip()\n match_key = re.match(r\"^(\\S+)\\s+=\\s+(.*)\", line)\n if match_key:\n if match_key.group(1) == \"Mstate\":\n Mstate = match_key.group(2)\n\n if match_key.group(1) == \"Cstate\":\n Cstate = match_key.group(2)\n\n if match_key.group(1) == \"Cstate_result\":\n Cstate_result = match_key.group(2)\n\n if match_key.group(1) == \"info\":\n info = match_key.group(2)\n continue\n if match_key.group(1) == \"err_info\":\n status = ERROR\n vios[\"status\"] = ERROR\n messages = std_out.split(\"\\n\", 1)\n OUTPUT.append(\"NIM error info {})\".format(messages[0]))\n for line in messages[1].split(\"\\n\"):\n OUTPUT.append(line)\n return status\n\n if not info and Cstate == \"ready for a NIM operation\"\\\n and (Cstate_result == \"success\" or Cstate_result == \"reset\")\\\n and (Mstate == \"currently running\" or Mstate == \"ready for use\"):\n status = DONE\n else:\n status = RUNNING\n vios[\"status\"] = status\n return status\n\n\n# ----------------------------------------------------------------\ndef build_viosupgrade_cmd(vios, validate):\n\n \"\"\"\n Build the viosupgrade command for a specific vios\n with apropriate parameters in a restricted use.\n\n viosupgrade -t bosinst -n hostname -m mksysbname -p spotname\n {-a RootVGCloneDisk: ... | -s} [-c] [-e Resources: ...] [-v]\n\n viosupgrade -t altdisk -n hostname -m mksysb_name -a RootVGCloneDisk\n [-c] [-e Resources: ...] [-v]\n\n Used Flags:\n -t Specifies the type of install. Supported types are: bosinst, altdisk.\n -n Specifies the target VIOS hostname or IP address to perform VIOS upgrade operation.\n -m Specifies the MKSYSB resource name.\n -p Specifies the SPOT resource name.\n -a Specifies the alternate physical volume. if install type is 'bosinst' then\n the disk(s) will be used to take backup of current rootvg.\n For 'altdisk' type installation disk(s) will be used to install the provided image.\n -s Specify to skip cloning of the current rootvg disk(s) to alternate disk(s).\n -c Specify if VIOS is part of the cluster.\n -e Specifies configuration resource(s) to apply as part of the installation.\n supported resources are resolv_conf, script, fb_script, file_res, image_data, log.\n -v Validates the input data for the given VIO Server(s).\n\n Not Used Flags:\n -b Specifies VIOS configuration backup file resource name.\n -r Specifies the new rootvg physical volume to install the provided image.\n -f Specifies file name which contains the list of nodes.\n -q Check the status of triggered upgrade operation.\n\n Arguments:\n vios: {} Dictionary of attributes for the vios object\n validate: boolean\n\n return: string command with flags and parameters\n \"\"\"\n cmd = '/usr/sbin/viosupgrade -t {} -n {} -m {} '\\\n .format(vios[\"action\"], vios[\"name\"], vios[\"ios_mksysb\"])\n if vios[\"action\"] == \"bosinst\":\n cmd = cmd + \" -p \" + vios[\"spot\"]\n if len(vios[\"user_res\"]) != 0:\n cmd = cmd + \" -e \"\n for res in vios[\"user_res\"]:\n cmd = cmd + res + \":\"\n if vios[\"cluster_id\"] != \"\":\n cmd = cmd + \" -c\"\n\n if vios[\"alt_disk\"] != \"\":\n cmd = cmd + \" -a \" + re.sub(' +', ':', vios[\"alt_disk\"])\n elif vios[\"skip\"] is True:\n cmd = cmd + \" -s\"\n if validate:\n cmd = cmd + \" -v\"\n return cmd\n\n\n# ----------------------------------------------------------------\ndef validate_vios(module, vios):\n \"\"\"\n Validate the execution of the viosupgrade command\n Arguments:\n module: {} dictionary\n vios: {} dictionary of attributes of vios object\n Return: integeger 0 --> OK !=0 --> NOK\n \"\"\"\n global ERROR\n global READY\n\n rc = 0\n cmd = build_viosupgrade_cmd(vios, validate=True)\n (rc, std_out, std_err) = exec_cmd(cmd, module, shell=True)\n if rc != 0:\n msg = 'Viosupgrade error on vios: {} :{}:{}'\\\n .format(vios[\"name\"], std_out, std_err)\n logging.error(msg)\n vios[\"status\"] = ERROR\n else:\n vios[\"status\"] = READY\n return rc\n\n\n# ----------------------------------------------------------------\ndef validate_tuple(module, tuple, tuple_key, upgrade_status):\n \"\"\"\n validate the execution of viosupgrade command for a list of vios\n beloonging to the same cluster or deserving the same lpar\n and set the result in upgrade_status dict\n\n Arguments:\n module:\n tuple: {} dictionary ov vioses\n tuple_key: String: \"vioses with space separator\"\n upgrade_status: {} key: tuple_key, value:tuple status\n Return: Integer 0 --> OK\n \"\"\"\n rc = 0\n for vios in tuple.values():\n rc = validate_vios(module, vios)\n if rc != 0:\n upgrade_status[tuple_key] = ERROR\n return rc\n upgrade_status[tuple_key] = READY\n return rc\n\n\n# ----------------------------------------------------------------\ndef viosupgrade(module, tuples, upgrade_status):\n \"\"\"\n Execute the viosupgrade command on all vios of all targets selected\n in the tuples dictionary\n In parallel all targets but sequentialy each vios of one target\n set the status of tuples in upgrade_status dict\n Arguments:\n module: {}\n tuples: {}\n upgrade_status: {}\n Return: Integer: number of error\n \"\"\"\n global CHANGED\n global ERROR\n global READY\n global RUNNING\n global DONE\n TIMEOUT = 5400 # 1 H 30 Min\n LOOP_TIME = 90 # 1 Min 30 Sec\n nb_error = 0\n CONTINUE = True\n while CONTINUE: # continue while at least one tuple is not done even not in error\n loop_start = int(time.time())\n CONTINUE = False\n for tuple_key in tuples.keys():\n tuple = tuples[tuple_key]\n if upgrade_status[tuple_key] == ERROR or upgrade_status[tuple_key] == DONE:\n continue # go to next tuple\n elif upgrade_status[tuple_key] == READY:\n validate_tuple(module, tuple, tuple_key, upgrade_status)\n if upgrade_status[tuple_key] == ERROR:\n continue # go to next tuple\n CONTINUE = True\n vioses = tuple_key.split()\n nb_vioses = len(vioses)\n for index, vios_name in enumerate(vioses):\n vios = tuple[vios_name]\n previous_vios = {}\n if index != 0:\n previous_vios = tuple[vioses[index - 1]]\n # if vios is ready ant it is the first or the previous is done\n # then start migration\n if vios[\"status\"] == READY:\n if index == 0 or previous_vios[\"status\"] == DONE:\n # now run the upgrade command.\n cmd = build_viosupgrade_cmd(vios, False)\n (ret, std_out, std_err) = exec_cmd(cmd, module, shell=True)\n if ret != 0:\n msg = 'Viosupgrade error on vios: {} :{}'\\\n .format(vios_name, std_err)\n logging.error(msg)\n upgrade_status[tuple_key] = ERROR\n vios[\"status\"] = ERROR\n nb_error += 1\n break # break vioses loop and go to next tuple\n upgrade_status[tuple_key] = RUNNING\n vios[\"status\"] = RUNNING\n start_time = int(time.time())\n vios[\"loop_time\"] = start_time\n vios[\"start_time\"] = start_time\n break # break vioses loop and go to next tuple\n\n # if vios is running then test real state\n if vios[\"status\"] == RUNNING:\n actual_time = int(time.time())\n # wait until LOOP_TIME in sec since last test\n if actual_time < vios[\"loop_time\"] + LOOP_TIME:\n # sleep until loop_time + LOOP_TIME in sec\n time.sleep(vios[\"loop_time\"] + LOOP_TIME - loop_start)\n actual_time = vios[\"loop_time\"] + LOOP_TIME\n # test real status an change the status if reqiured then continue\n # Query to get the status of the upgrade for each target\n status = get_viosupgrade_status(module, vios)\n if status == ERROR:\n vios[\"status\"] = ERROR\n upgrade_status[tuple_key] = ERROR\n nb_error += 1\n break\n if status == DONE:\n if get_cluster_status(module, vios) != 0:\n status = RUNNING\n vios[\"loop_time\"] = actual_time\n vios[\"status\"] = status\n # set tuple in error if TimeOut\n if status == RUNNING and (actual_time > vios[\"start_time\"] + TIMEOUT):\n vios[\"status\"] = ERROR\n upgrade_status[tuple_key] = ERROR\n break # break vioses loop and go to next tuple\n\n # if vios is migrated --> next vios\n if vios[\"status\"] == DONE:\n CHANGED = True\n # if last vios is migrated set tuple to migrated --> next tuple\n if index == (len(vioses) - 1):\n upgrade_status[tuple_key] = DONE\n # End of vios loop, go to the next tuple\n # else:\n # continue # loop on the next vios\n return nb_error\n\n\n###################################################################################\n\nif __name__ == '__main__':\n DEBUG_DATA = []\n OUTPUT = []\n NIM_NODE = {}\n CHANGED = False\n VARS = {}\n ERROR = \"UPGRADE-FAILURE\"\n READY = \"READY-FOR-UPGRADE\"\n RUNNING = \"RUNNING\"\n DONE = \"UPGRADE-SUCCESS\"\n REJECTED = \"UPGRADE-REJECTED\"\n nb_error = 0\n\n MODULE = AnsibleModule(\n argument_spec=dict(\n description=dict(required=False, type='str'),\n\n # IBM automation generic attributes\n targets=dict(required=True, type='list'),\n actions=dict(required=True, type='dict'),\n vars=dict(required=False, type='dict'),\n vios_status=dict(required=False, type='dict'),\n nim_node=dict(required=False, type='dict'),\n\n # following attributes are dictionaries with\n # key: 'all_vios' or hostname and value: a string or boolean\n # example:\n # ios_mksysb={\"target1\": \"mksysb_name_1\", \"target2\": \"mksysb_name_2\"}\n # ios_mksysb={\"all_vios\": \"mksysb_name\", \"target2\": \"mksysb_name_2\"}\n ios_mksysb=dict(required=True, type='dict'),\n # force={\"all_vios\": False, \"target_x\": True}\n force=dict(required=False, type='dict'),\n alt_disk=dict(required=False, type='dict'),\n # Resources (-e option) The valid resource type are:\n # resolv_conf, script, fb_script, file_res, image_data, and log\n # Dictionary with key: 'all_vios' or hostname and value: string\n # exemple: user_res={\"all_vios\": \"resolv_conf_name\", \"vios_name\": \"file_res_name\"}\n # in that exemple the viosupgrade will be called with -e resolv_conf_name:file_res_name\n user_res=dict(required=False, type='dict'),\n ),\n )\n\n # =========================================================================\n # Get Module params\n # =========================================================================\n\n msg = \"\"\n user_res = {}\n alt_disk = {}\n VERBOSITY = MODULE._verbosity\n\n targets = MODULE.params['targets']\n actions = MODULE.params['actions']\n ios_mksysb = MODULE.params['ios_mksysb']\n force = MODULE.params['force']\n nim_user_res = []\n REQUIRED_IOSLEVEL = \"2.2.6.30\"\n # Handle playbook variables\n LOGNAME = '/tmp/ansible_upgradeios_debug.log'\n if MODULE.params['vars']:\n VARS = MODULE.params['vars']\n if'log_file' in VARS.keys():\n LOGNAME = VARS['log_file']\n if MODULE.params['vios_status']:\n tuples_status = MODULE.params['vios_status']\n else:\n vios_status = None\n # Open log file\n OUTPUT.append('Log file: {}'.format(LOGNAME))\n LOGFRMT = '[%(asctime)s] %(levelname)s: [%(funcName)s:%(thread)d] %(message)s'\n LEVEL = logging.DEBUG\n \n logging.basicConfig(filename='{}'.format(LOGNAME), format=LOGFRMT, level=LEVEL)\n\n logging.debug('*** START NIM VIOSUPGRADE OPERATION ***')\n all_targets = list(set(targets)) # remove duplicates tuples\n all_targets = [elem.replace(',', ' ').replace(':', ' ') for elem in all_targets]\n all_targets = [re.sub(' +', ' ', elem) for elem in all_targets]\n logging.debug('VIOSUpgrade operation for tagets:{}'.format(targets))\n logging.info('VERBOSITY is set to {}'.format(VERBOSITY))\n OUTPUT.append('VIOSUpgrade operation for {}'.format(all_targets))\n # build mksysb - spot table. spot is needed (if action = bosinst)\n mksysb_htab = get_ios_mksysb(MODULE)\n # build NIM node info (if needed)\n if MODULE.params['nim_node']:\n NIM_NODE = MODULE.params['nim_node']\n else:\n NIM_NODE['nim_vios'] = get_nim_clients_info(MODULE)\n logging.debug('NIM VIOS: {}'.format(NIM_NODE['nim_vios']))\n if MODULE.params['user_res']:\n user_res = MODULE.params['user_res']\n # get all existing user_res from nim server\n # The valid types are: resolv_conf, script, fb_script, file_res, image_data, and log.\n nim_user_res = get_nim_user_res(MODULE)\n if MODULE.params['alt_disk']:\n alt_disk = MODULE.params['alt_disk']\n\n # if health check status is known remove tuple with wrong status\n # build the list of target matching nim client list\n # remove duplicates vios\n # check vios connectivity and get ClusterID\n # get altinst_rootvg disk\n # remove tuples without c_rsh connectivity\n # exclude tuples with different clusterID\n # remove tuple having the same clusterID than an other tuple\n # remove tuple having unsuficient ioslevel\n logging.debug(\"ALL_TARGETS = {}\".format(all_targets))\n new_target_list = []\n all_vioses = []\n all_cluster_ids = []\n\n # build here the targets tuple structure\n tuples = {}\n # tuples = {} # Dict: key = tuple ex: \"vios1 vios2\"\n # tuples[tuple] = {} # Dict: key = vios_name ex: \"vios1\" or \"vios2\"\n # tuples[tuple][vios_name] = {} # Dict: keys are \"name\", \"cluster_id\", \"ios_mksysb\"...\n # tuples[tuple][vios_name][\"name\"] = \"\" # String: \n # tuples[tuple][vios_name][\"host_name\"] = \"\" # String: get from nim object\n # tuples[tuple][vios_name][\"ip\"] = \"\" # String: ip adress coresponding to host_name\n # tuples[tuple][vios_name][\"interface\"] = \"\" # String: interface configured wit ip\n # tuples[tuple][vios_name][\"cluster_id\"] = \"\" # String: \n # tuples[tuple][vios_name][\"altinst_rootvg\"] = \"\" # String: \n # tuples[tuple][vios_name][\"rootvg\"] = \"\" # String: \n # tuples[tuple][vios_name][\"level\"] = \"\" # String: \n # tuples[tuple][vios_name][\"free_pv\"] = {} # Dict: key = disk value = size\n # tuples[tuple][vios_name][\"skip\"] = Boolean: skip the alt disk copy operation\n # tuples[tuple][vios_name][\"action\"] = \"\" # String: \n # tuples[tuple][vios_name][\"ios_mksysb\"] = \"\" # String: \n # tuples[tuple][vios_name][\"spot\"] = \"\" # String: \n # tuples[tuple][vios_name][\"alt_disk\"] = \"\" # String: \n # tuples[tuple][vios_name][\"user_res\"] = [] # Liste of resource name\n # tuples[tuple][vios_name][\"status\"] = \"\" # String: status to follow installation steps\n # tuples[tuple][vios_name][\"start_time\"] = 0 # Integer: viosupgrade start time from epoch\n # tuples[tuple][vios_name][\"loop_time\"] = 0 # Integer: viosupgrade start time from epoch\n\n upgrade_status = {} # the key is the tuple string ex: \"vios1 vios2\"\n for tuple_key in all_targets:\n tuple = {}\n vioses = tuple_key.split()\n msg = \"\"\n cluster_id = \"\"\n\n if not (vios_status is None):\n if len(vioses) == 1 and vioses[0] in vios_status\\\n and vios_status[vioses[0]] != 'SUCCESS-HC'\\\n and ios_status[vioses[0]] != 'SUCCESS-ALTDC':\n OUTPUT.append(\" {} vios skiped ({})\"\n .format(vioses[0], vios_status[vioses[0]]))\n logging.warn(\"{} vios skiped ({})\"\n .format(vioses[0], vios_status[vioses[0]]))\n upgrade_status[tuple_key] = vios_status[vioses[0]]\n continue\n if len(vioses) == 2:\n key1 = vioses[0] + \"-\" + vioses[1]\n key2 = vioses[1] + \"-\" + vioses[0]\n if key1 in vios_status.keys()\\\n and vios_status[key1] != 'SUCCESS-HC'\\\n and vios_status[key1] != 'SUCCESS-ALTDC':\n OUTPUT.append(\" {} vioses skiped ({})\"\n .format(tuple_key, vios_status[key1]))\n logging.warn(\"{} vioses skiped ({})\"\n .format(tuple_key, vios_status[key1]))\n upgrade_status[tuple_key] = vios_status[key1]\n continue\n if key2 in vios_status.keys()\\\n and vios_status[key2] != 'SUCCESS-HC'\\\n and vios_status[key2] != 'SUCCESS-ALTDC':\n OUTPUT.append(\" {} vioses skiped ({})\"\n .format(tuple_key, vios_status[key2]))\n logging.warn(\"{} vioses skiped ({})\"\n .format(tuple_key, vios_status[key2]))\n vios_status[key1] = vios_status[key2]\n upgrade_status[tuple_key] = vios_status[key2]\n continue\n else:\n OUTPUT.append(\" {} vioses skiped (no previous status found)\"\n .format(key1))\n logging.warn(\"{} vioses skiped (no previous status found)\"\n .format(key1))\n upgrade_status[tuple_key] = \"FAILURE-NO-PREV-STATUS\"\n\n for vios_name in vioses:\n msg = \"\"\n vios = {}\n vios[\"name\"] = vios_name\n vios[\"status\"] = READY\n vios[\"altinst_rootvg\"] = \"\"\n vios[\"rootvg\"] = \"\"\n vios[\"alt_disk\"] = \"\"\n vios[\"cluster_id\"] = \"\"\n vios[\"host_name\"] = \"\"\n vios[\"ip\"] = \"\"\n vios[\"interface\"] = \"\"\n vios[\"interface_type\"] = \"\"\n vios[\"cluster_status\"] = \"\"\n vios[\"skip\"] = False\n vios[\"level\"] = \"\"\n vios[\"start_time\"] = 0\n vios[\"loop_time\"] = 0\n vios[\"free_pv\"] = {}\n tuple[vios_name] = vios\n\n if vios_name not in NIM_NODE['nim_vios']:\n msg = \"vios: {} is not a nim client.\".format(vios_name)\n upgrade_status[tuple_key] = \"UNKNOWN-NIM-CLIENT\"\n if vios_name in all_vioses:\n msg = \"vios: {} is already in the list of targets.\"\\\n .format(vios_name)\n upgrade_status[tuple_key] = \"DUPLICATE-VIOS\"\n if msg:\n vios[\"status\"] = upgrade_status[tuple_key]\n break # vios loop\n\n cluster_id = \"\"\n vios[\"host_name\"] = NIM_NODE['nim_vios'][vios_name][\"vios_ip\"]\n # get dominized host_name and ip @ of the vios\n cmd = 'LC_ALL=C /bin/host {}'.format(vios[\"host_name\"])\n (ret, std_out, std_err) = exec_cmd(cmd, MODULE, False, True, True)\n if ret != 0:\n msg = 'skip target: {}, cannot get {} ip address.'\\\n .format(tuple_key, vios_name)\n break # vios loop\n else:\n # parse stdout\n for line in std_out.split('\\n'):\n line = line.strip()\n match_key = re.match(r\"^(\\S+)\\s+\\S+\\s+(\\d+.\\d+.\\d+.\\d+)$\", line)\n if match_key:\n vios[\"ip\"] = match_key.group(2)\n rootvg_size = 0\n cmd = ['/usr/lpp/bos.sysmgt/nim/methods/c_rsh', vios[\"host_name\"],\n '\"LC_ALL=C /etc/lsattr -El vioscluster0; /usr/bin/netstat -in;'\n ' /usr/sbin/lsdev -c adapter -t sea -s pseudo -F name:description;'\n ' /usr/ios/cli/ioscli lspv; /usr/ios/cli/ioscli ioslevel;'\n ' /usr/ios/cli/ioscli lspv -free;'\n ' /usr/ios/cli/ioscli cluster -status -field cluster_state\"']\n (ret, std_out, std_err) = exec_cmd(cmd, MODULE)\n # check vios connectivity\n if ret != 0:\n msg = 'skip target: {}, cannot reach {} with c_rsh.'\\\n .format(tuple_key, vios_name)\n break # vios loop\n else:\n # parse std_out and get clusterID, altinst_rootvg,\n # vios version, free pv, rootvg size\n for line in std_out.split('\\n'):\n line = line.strip()\n\n # search cluster_id\n if vios[\"cluster_id\"] == \"\":\n match_key = re.match(r\"^cluster_id\\s+(\\S+).*\", line)\n if match_key:\n cluster_id = match_key.group(1)\n vios[\"cluster_id\"] = cluster_id\n if cluster_id in all_cluster_ids:\n msg = '{}: an other node is allready belonginng'\\\n 'to the cluster with ID: {}.'.format(vios_name, cluster_id)\n break # parse std_out loop\n if len(vioses) > 1\\\n and vios[\"cluster_id\"] != tuple[vioses[0]][\"cluster_id\"]:\n msg = '{}: vioses belong to different cluster\"'.format(tuple_key)\n break # parse std_out loop\n continue # next line\n\n # search vios hsot interface\n if vios[\"interface\"] == \"\":\n match_key = re.match(r\"^(\\S+)\\s+\\d+\\s+\\S+\\s+(\\d+.\\d+.\\d+.\\d+)\\s+.*\", line)\n if match_key and match_key.group(2) == vios[\"ip\"]:\n interface = match_key.group(1)\n vios[\"interface\"] = interface.replace(\"en\", \"ent\")\n continue # next line\n # search SEA adapter\n if vios[\"interface_type\"] == \"\":\n match_key = re.match(r\"^(\\S+):Shared Ethernet Adapter\", line)\n if match_key and match_key.group(1) == vios[\"interface\"]:\n vios[\"interface_type\"] = \"SEA\"\n continue # next line\n\n # search altinst_rootvg and rootvg disk name\n if vios[\"altinst_rootvg\"] == \"\" or vios[\"rootvg\"] == \"\":\n match_key = re.match(r\"^(\\S+)\\s+(\\S+)\\s+(\\S+).*\", line)\n if match_key and match_key.group(3) == \"altinst_rootvg\":\n vios[\"altinst_rootvg\"] = match_key.group(1)\n vios[\"skip\"] = True\n continue # next line\n elif match_key and match_key.group(3) == \"rootvg\":\n if vios[\"interface_type\"] == \"\":\n vios[\"interface_type\"] = \"OTHER\" # end of search SEA section\n vios[\"rootvg\"] = match_key.group(1)\n continue # next line\n\n # search vios level\n if vios[\"level\"] == \"\":\n match_key = re.match(r\"^(\\d+.\\d+.\\d+.\\d+)$\", line)\n if match_key:\n if match_key.group(1) >= REQUIRED_IOSLEVEL:\n vios[\"level\"] = match_key.group(1)\n else:\n msg = '{} ioslevel is {}, '\\\n 'the minimum required is {}'\\\n .format(vios_name, match_key.group(1), REQUIRED_IOSLEVEL)\n break # parse std_out loop\n continue # next line\n\n # search free pv\n match_key = re.match(r\"^(\\S+)\\s+(\\S+)\\s+(\\d+)$\", line)\n if match_key:\n vios[\"free_pv\"][match_key.group(1)] = int(match_key.group(3), 10)\n continue # next line\n\n # get cluster status\n match_key = re.match(r\"^Cluster\\s+State:\\s+(\\S+)$\", line)\n if match_key:\n vios[\"cluster_status\"] = match_key.group(1)\n continue # next line\n elif line == \"Cluster does not exist.\":\n vios[\"cluster_status\"] = \"UNKOWN\"\n continue # next line\n # end annalysis of command output\n\n if vios[\"cluster_id\"] and vios[\"cluster_status\"] != \"OK\":\n msg = '{}, the cluster is not in the correct state to be upgraded.'\\\n .format(tuple_key)\n if msg:\n break # vios loop\n\n cmd = ['/usr/lpp/bos.sysmgt/nim/methods/c_rsh', vios[\"host_name\"],\n '\"LC_ALL=C /usr/sbin/lqueryvg -p {} -At\"'.format(vios[\"rootvg\"])]\n (ret, std_out, std_err) = exec_cmd(cmd, MODULE)\n # check vios connectivity\n if ret != 0:\n msg = 'skip target: {}, cannot reach {} with c_rsh.'\\\n .format(tuple_key, vios_name)\n break # vios loop\n else:\n total_pps = 0\n free_pps = 0\n pp_size = 0\n for line in std_out.split('\\n'):\n line = line.strip()\n # search rootvg size\n match_key = re.match(r\"^PP Size:\\s+(\\d+).*\", line)\n if match_key:\n pp_size = int(match_key.group(1))\n match_key = re.match(r\"^Total PPs:\\s+(\\S+).*\", line)\n if match_key:\n total_pps = int(match_key.group(1))\n match_key = re.match(r\"^Free PPs:\\s+(\\S+).*\", line)\n if match_key:\n free_pps = int(match_key.group(1))\n if pp_size == 0 or total_pps == 0:\n msg = \"Program Error\"\n else:\n # root vg size in Megabytes\n rootvg_size = (total_pps - free_pps) * (2 ** (pp_size - 20)) # in Megabytes\n\n if msg:\n vios[\"status\"] = REJECTED\n break # vios loop\n\n force_install = False # default value\n disks = \"\"\n action = \"\"\n mksysb = \"\"\n vios[\"user_res\"] = []\n if vios_name in force.keys():\n force_install = force[vios_name]\n elif \"all_vios\" in force.keys():\n force_install = force[\"all_vios\"]\n\n if vios_name in ios_mksysb.keys():\n mksysb = ios_mksysb[vios_name]\n elif \"all_vios\" in ios_mksysb.keys():\n mksysb = ios_mksysb[\"all_vios\"]\n else:\n msg = '{}: no ios_mksysb property specified.'\\\n .format(vios_name)\n break # vios loop\n vios[\"ios_mksysb\"] = mksysb\n if mksysb not in mksysb_htab.keys():\n msg = '{}: The specified ios_mksysb: {} resource does not exist'\\\n .format(vios_name, mksysb)\n break # vios loop\n elif mksysb_htab[mksysb][\"ioslevel\"] < \"3.1.0.0\":\n msg = '{}: the ios_mksysb level: {} {}, is insufficient.'\\\n ' The minimum level is 3.1.0.0'\\\n .format(vios_name, mksysb, mksysb_htab[mksysb][\"ioslevel\"])\n break # vios loop\n elif mksysb_htab[mksysb][\"ioslevel\"] < vios[\"level\"] and not force_install:\n msg = '{}: the ios_mksysb level {} {} should be greater than vios level {}.'\\\n .format(vios_name, mksysb, mksysb_htab[mksysb][\"ioslevel\"], vios[\"level\"])\n break # vios loop\n elif mksysb_htab[mksysb][\"ioslevel\"] == vios[\"level\"] and not force_install:\n msg = '{}: the ios_mksysb level {} {} should be greater than vios level {}.'\\\n .format(vios_name, mksysb, mksysb_htab[mksysb][\"ioslevel\"], vios[\"level\"])\n break # vios loop\n if vios_name in actions.keys():\n action = actions[vios_name]\n elif \"all_vios\" in actions.keys():\n action = actions[\"all_vios\"]\n else:\n msg = '{}: atcion property must be specified.'\\\n .format(vios_name)\n break # vios loop\n vios[\"action\"] = action\n if action != \"bosinst\" and action != \"altdisk\":\n msg = '{}: action type should be bosinst or altdisk.'\\\n .format(vios_name)\n break # vios loop\n\n # a bosinst installation type needs a spot resource.\n if action == 'bosinst':\n if \"spot\" in mksysb_htab[mksysb].keys():\n vios[\"spot\"] = mksysb_htab[mksysb][\"spot\"]\n else:\n msg = '{}: There is no defined spot for ios_mksysb '\\\n 'resource: {}, the bosinst installation required one.'\\\n .format(vios_name, mksysb)\n break # vios loop\n # an aldisk installation requires that the ip interface is configured on a non SEA\n elif vios[\"interface_type\"] == \"SEA\":\n msg = '{}: altdisk method is not supported on a VIOS defined with SEA interface.'\\\n .format(vios_name)\n break # vios loop\n res_list = []\n if vios_name in user_res.keys():\n res_list = user_res[vios_name].replace(':', ' ').replace(',', ' ').split()\n if \"all_vios\" in user_res.keys():\n res_list.extend(user_res[\"all_vios\"].replace(':', ' ').replace(',', ' ').split())\n res_list = list(set(res_list))\n vios[\"user_res\"] = res_list\n for res in res_list:\n if res not in nim_user_res.keys():\n msg = '{}: the resource {} does not exist or is not '\\\n 'an authorized nim resource.'\\\n .format(vios_name, res)\n break\n if action == 'altdisk' and nim_user_res[res] == \"file_res\":\n msg = '{}: the resource {} of type file_res is not '\\\n 'supported for altdisk type installation.'\\\n .format(vios_name, res)\n logging.warning(msg)\n if msg:\n break # vios loop\n\n if vios_name in alt_disk.keys():\n disks = alt_disk[vios_name].strip()\n elif \"all_vios\" in alt_disk.keys():\n disks = alt_disk[\"all_vios\"].strip()\n if disks:\n disks = disks.replace(':', ' ').replace(',', ' ').strip()\n vios[\"alt_disk\"] = disks\n if not disks and action == 'altdisk':\n msg = '{}: No alt_disk property is specified.'\\\n .format(vios_name)\n break # vios loop\n elif not disks:\n if not vios[\"altinst_rootvg\"]:\n msg = '{}: The bosinst operation requires an altinst_rootvg.'\\\n 'Create one or add the alt_disk property for this node.'\\\n .format(vios_name)\n break # vios loop\n else:\n vios[\"skip\"] = True\n\n # Reject vios and tuple if altinst_rootvg already exists\n # and alt_disk property is specified\n elif disks and vios[\"altinst_rootvg\"]:\n msg = '{}: altinst_rootvg already exist, rename it.'.format(vios_name)\n if action == 'bosinst':\n msg += ' Or remove the alt_disk property.'\n\n # test if alt_disks are free.\n # test the total size of alt_disks is enhougth for installation or clonne rootvg\n elif disks:\n d_lsit = disks.split()\n total_size = 0\n for disk in d_lsit:\n if disk in vios[\"free_pv\"].keys():\n total_size += vios[\"free_pv\"][disk]\n else:\n msg = '{}: the specified disk {} is not free'\\\n .format(vios_name, disk)\n break # test disk loop\n if msg:\n break # vios loop\n if total_size < 30720 and action == \"altdisk\":\n msg = '{}: The total size of alternate disk(s) {}: {} '\\\n 'is less than 30G. Choose disk(s) with adequate size.'\\\n .format(vios_name, disks, total_size)\n elif action == 'bosinst' and total_size < rootvg_size:\n msg = '{}: The total size of alternate disk(s) {}: {} '\\\n 'is less than the actual rootvg size {}.'\\\n 'Choose disk(s) with adequate size.'\\\n .format(vios_name, disks, total_size, rootvg_size)\n if msg:\n break # vios loop\n vios[\"skip\"] = False\n # end management disk size\n # end vios loop\n if msg:\n logging.warning(msg)\n OUTPUT.append(msg)\n msg = \"Then the \\\"{}\\\" target will not be selected for upgrade operation\"\\\n .format(tuple_key)\n logging.warning(msg)\n OUTPUT.append(msg)\n logging.debug('Rejected vios tuple: {}: {}'.format(tuple_key, tuple))\n vios[\"status\"] = REJECTED\n upgrade_status[tuple_key] = REJECTED\n else:\n all_vioses.extend(vioses)\n upgrade_status[tuple_key] = DONE\n for vios_name in vioses:\n if tuple[vios_name][\"status\"] == READY:\n upgrade_status[tuple_key] = READY\n tuples[tuple_key] = tuple\n if cluster_id:\n all_cluster_ids.append(cluster_id)\n break\n # end tuple loop\n\n logging.debug('Remaining TARGETS={}'.format(tuples))\n\n MODULE.targets = all_targets\n OUTPUT.append('Remaining Targets list:{}'.format(tuples.keys()))\n\n if len(tuples.keys()) == 0:\n msg = 'All targets have been rejected. It remains no thing to do!'\n OUTPUT.append(msg)\n if VERBOSITY == 3:\n MODULE.exit_json(\n changed=False,\n msg=msg,\n nim_node=NIM_NODE,\n targets=MODULE.targets,\n debug_output=DEBUG_DATA,\n output=OUTPUT,\n status=upgrade_status)\n else:\n MODULE.exit_json(\n changed=False,\n msg=msg,\n output=OUTPUT,\n status=upgrade_status)\n\n nb_error = viosupgrade(MODULE, tuples, upgrade_status)\n\n # Prints vios status for each targets\n for tuple_key in upgrade_status:\n status = upgrade_status[tuple_key]\n msg = 'VIOSUpgrade operation on target: \"{}\" end with status: {}.'\\\n .format(tuple_key, status)\n OUTPUT.append(msg)\n logging.info(msg)\n if status == DONE or status == ERROR:\n for vios_name in tuple_key.split():\n msg = 'VIOSUpgrade {} operation status on \"{}\": {}.'\\\n .format(tuples[tuple_key][vios_name][\"action\"], vios_name,\n tuples[tuple_key][vios_name][\"status\"])\n logging.info(msg)\n OUTPUT.append(msg)\n\n # Prints a global result statement\n if nb_error == 0:\n msg = 'NIM VIOSUpgrade operation completed successfully'\n OUTPUT.append(msg)\n logging.info(msg)\n else:\n msg = 'VIOSUpgrade operation failed: {} errors'.format(nb_error)\n OUTPUT.append(msg)\n logging.error(msg)\n\n # # =========================================================================\n # # Exit\n # # =========================================================================\n if nb_error == 0:\n if VERBOSITY == 3:\n MODULE.exit_json(\n changed=CHANGED,\n msg=msg,\n nim_node=NIM_NODE,\n targets=MODULE.targets,\n debug_output=DEBUG_DATA,\n output=OUTPUT,\n status=upgrade_status)\n else:\n MODULE.exit_json(\n changed=CHANGED,\n msg=msg,\n targets=MODULE.targets,\n output=OUTPUT,\n status=upgrade_status)\n else:\n MODULE.fail_json(\n changed=CHANGED,\n msg=msg,\n targets=MODULE.targets,\n debug_output=DEBUG_DATA,\n output=OUTPUT,\n status=upgrade_status)\n","sub_path":"library/aix_nim_viosupgrade.py","file_name":"aix_nim_viosupgrade.py","file_ext":"py","file_size_in_byte":49206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"395290888","text":"import pymongo\n\nimport time\nfrom jianwei.pipelines import MongoPipeline\n\nmonitor = MongoPipeline('mongodb://localhost:27017', 'jianwei')\nmonitor_db = monitor.db\nnewest_name = max(monitor_db.list_collection_names())\nmongo_clt = monitor.db[newest_name]\nstart_time = time.time()\nwhile True:\n used_time = time.time() - start_time\n\n print('Time: %6.2f min, Documents stacked: %6d in collection %s'\n % (used_time/60, mongo_clt.count_documents({}), mongo_clt.name))\n time.sleep(5)\n\n '''After Main Procedure starts, then start it, for waiting reading the MongoDB'''\n\n","sub_path":"files/monitor_spider.py","file_name":"monitor_spider.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"9439433","text":"from __future__ import unicode_literals\n\nfrom django import forms\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom cms.models import CMSPlugin\nfrom cms.plugin_pool import plugin_pool\nfrom cms.plugin_base import CMSPluginBase\n\nfrom sections.cms_plugins import WrapPluginForm, WrapPlugin\nfrom sections.models import Wrap\nfrom baseplugin.utils import get_indicator_hidden\n\nfrom . import conf\nfrom .models import Column\n\n\nclass ColumnPluginForm(forms.ModelForm):\n class Meta:\n model = Column\n fields = conf.COLUMN_FIELDS\n widgets = {\n 'bg_color': forms.Select(\n choices=conf.COLUMN_BACKGROUND_COLORS\n ),\n 'css_class': forms.Select(\n choices=conf.COLUMN_CSS_CLASSES\n ),\n 'height': forms.Select(\n choices=conf.COLUMN_HEIGHTS\n ),\n 'width': forms.RadioSelect(\n choices=conf.COLUMN_WIDTHS\n ),\n }\n\n\nclass ColumnPlugin(CMSPluginBase):\n allow_children = True\n child_classes = conf.COLUMN_PLUGINS\n exclude = conf.COLUMN_EXCLUDE\n fieldsets = conf.COLUMN_FIELDSETS\n form = ColumnPluginForm\n model = Column\n module = _('layout')\n name = _('column')\n render_template = 'cms/plugins/columns_column.html'\n\n def render(self, context, instance, placeholder):\n request = context['request']\n context.update({\n 'object': instance,\n 'placeholder':placeholder,\n 'indicator_hidden': get_indicator_hidden(request, instance),\n })\n return context\n\nplugin_pool.register_plugin(ColumnPlugin)\n","sub_path":"columns/cms_plugins.py","file_name":"cms_plugins.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"645687645","text":"# AprilTags Example\n#\n# This example shows the power of the OpenMV Cam to detect April Tags\n# on the OpenMV Cam M7. The M4 versions cannot detect April Tags.\n\n\nimport machine, gc,utime\nfrom pyb import LED\nimport sensor, image, time, math\nimport LPF2Class\n\nsensor.reset()\nsensor.set_pixformat(sensor.RGB565)\nsensor.set_framesize(sensor.QQVGA) # we run out of memory if the resolution is much bigger...\nsensor.skip_frames(time = 2000)\nsensor.set_auto_gain(False) # must turn this off to prevent image washout...\nsensor.set_auto_whitebal(False) # must turn this off to prevent image washout...\n\nprint(\"made it here\")\n\n# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work.\n\n# The apriltag code supports up to 6 tag families which can be processed at the same time.\n# Returned tag objects will have their tag family and id within the tag family.\n\ntag_families = 0\ntag_families |= image.TAG16H5 # comment out to disable this family\ntag_families |= image.TAG25H7 # comment out to disable this family\ntag_families |= image.TAG25H9 # comment out to disable this family\ntag_families |= image.TAG36H10 # comment out to disable this family\ntag_families |= image.TAG36H11 # comment out to disable this family (default family)\ntag_families |= image.ARTOOLKIT # comment out to disable this family\n\n# What's the difference between tag families? Well, for example, the TAG16H5 family is effectively\n# a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which\n# is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve\n# rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a\n# reason to use the other tags families just use TAG36H11 which is the default family.\n\ndef family_name(tag):\n if(tag.family() == image.TAG16H5):\n return \"TAG16H5\"\n if(tag.family() == image.TAG25H7):\n return \"TAG25H7\"\n if(tag.family() == image.TAG25H9):\n return \"TAG25H9\"\n if(tag.family() == image.TAG36H10):\n return \"TAG36H10\"\n if(tag.family() == image.TAG36H11):\n return \"TAG36H11\"\n if(tag.family() == image.ARTOOLKIT):\n return \"ARTOOLKIT\"\n\n\nprint(\"hi\")\nred_led=LED(1)\nred_led.on()\nlpf2 = LPF2Class.LPF2(3, 'P4', 'P5') # OpenMV\nprint(\"ugh\")\nlpf2.initialize()\n\nprint(\"initialized\")\n\nwhile True:\n if not lpf2.connected:\n red_led.on()\n utime.sleep(1)\n lpf2.initialize()\n\n else:\n red_led.off()\n while lpf2.connected:\n gc.collect()\n img = sensor.snapshot()\n for tag in img.find_apriltags(families=tag_families): # defaults to TAG36H11 without \"families\".\n img.draw_rectangle(tag.rect(), color = (255, 0, 0))\n img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0))\n print_args = (family_name(tag), tag.id(), (180 * tag.rotation()) / math.pi)\n print(\"APRIL TAG \" + str(tag.id()))\n lpf2.send_value(int(tag.id()))\n utime.sleep(0.1)\n","sub_path":"SPIKE Prime Backpacks/OpenMVCamera/AprilTagDetection/PythonIDE/AprilTags.py","file_name":"AprilTags.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"98689984","text":"import sys\nimport logging\nfrom mpi4py import MPI\n\ndef err_out_screen(err_msg):\n \"\"\"\n Generic routine to exit the program gracefully. This specific error function does not log\n error messages to the log file, but simply prints them out to the screen. This is function\n is designed specifically for early in the program execution where a log file hasn't been\n established yet.\n Logan Karsten - National Center for Atmospheric Research, karsten@ucar.edu\n \"\"\"\n\n err_msg_out = 'ERROR: ' + err_msg\n print(err_msg_out)\n sys.exit(1)\n\ndef err_out_screen_para(err_msg,MpiConfig):\n \"\"\"\n Generic function for printing an error message to the screen and aborting MPI.\n This should only be called if logging cannot occur and an abrupt end the program\n is neded.\n :param err_msg:\n :param MpiConfig:\n :return:\n \"\"\"\n err_msg_out = 'ERROR: RANK - ' + str(MpiConfig.rank) + ' : ' + err_msg\n print(err_msg_out)\n MpiConfig.comm.Abort()\n sys.exit(1)\n\ndef check_program_status(ConfigOptions,MpiConfig):\n \"\"\"\n Generic function to check the err statuses for each processor in the program.\n If any flags come back, gracefully exit the program.\n :param ConfigOptions:\n :param MpiConfig:\n :return:\n \"\"\"\n # Sync up processors to ensure everyone is on the same page.\n MpiConfig.comm.barrier()\n\n # Collect values from each processor.\n data = MpiConfig.comm.gather(ConfigOptions.errFlag, root=0)\n if MpiConfig.rank == 0:\n for i in range(MpiConfig.size):\n if data[i] != 0:\n MpiConfig.comm.Abort()\n sys.exit(1)\n else:\n assert data is None\n\n # Sync up processors.\n MpiConfig.comm.barrier()\n\ndef init_log(ConfigOptions,MpiConfig):\n \"\"\"\n Function for initializing log file for individual forecast cycles. Each\n log file is unique to the instant the program was initialized.\n :param ConfigOptions:\n :return:\n \"\"\"\n try:\n logObj = logging.getLogger('logForcing')\n except:\n ConfigOptions.errMsg = \"Unable to create logging object \" \\\n \"for: \" + ConfigOptions.logFile\n err_out_screen_para(ConfigOptions.errMsg,MpiConfig)\n try:\n formatter = logging.Formatter('[%(asctime)s]: %(levelname)s '\n '- %(message)s', '%m/%d %H:%M:%S')\n except:\n ConfigOptions.errMsg = \"Unable to establish formatting for logger.\"\n err_out_screen_para(ConfigOptions.errMsg,MpiConfig)\n try:\n ConfigOptions.logHandle = logging.FileHandler(ConfigOptions.logFile,mode='a')\n except:\n ConfigOptions.errMsg = \"Unable to create log file handle for: \" + \\\n ConfigOptions.logFile\n err_out_screen_para(ConfigOptions.errMsg,MpiConfig)\n try:\n ConfigOptions.logHandle.setFormatter(formatter)\n except:\n ConfigOptions.errMsg = \"Unable to set formatting for: \" + \\\n ConfigOptions.logFile\n err_out_screen_para(ConfigOptions.errMsg,MpiConfig)\n try:\n logObj.addHandler(ConfigOptions.logHandle)\n except:\n ConfigOptions.errMsg = \"ERROR: Unable to add log handler for: \" + \\\n ConfigOptions.logFile\n err_out_screen_para(ConfigOptions.errMsg,MpiConfig)\n\ndef err_out(ConfigOptions):\n \"\"\"\n Function to error out after an error message has been logged for a\n forecast cycle. We will exit with a non-zero exit status.\n :param ConfigOptions:\n :return:\n \"\"\"\n try:\n logObj = logging.getLogger('logForcing')\n except:\n ConfigOptions.errMsg = \"Unable to obtain a logger object for: \" + \\\n ConfigOptions.logFile\n raise Exception()\n try:\n logObj.setLevel(logging.ERROR)\n except:\n ConfigOptions.errMsg = \"Unable to set ERROR logger level for: \" + \\\n ConfigOptions.logFile\n raise Exception()\n try:\n logObj.error(ConfigOptions.errMsg)\n except:\n ConfigOptions.errMsg = \"Unable to write error message to: \" + \\\n ConfigOptions.logFile\n raise Exception()\n MPI.Finalize()\n sys.exit(1)\n\ndef log_error(ConfigOptions,MpiConfig):\n \"\"\"\n Function to log an error message to the log file.\n :param ConfigOptions:\n :param MpiConfig:\n :return:\n \"\"\"\n try:\n logObj = logging.getLogger('logForcing')\n except:\n err_out_screen_para(('Unable to obtain logger object on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile),MpiConfig)\n try:\n logObj.setLevel(logging.ERROR)\n except:\n err_out_screen_para(('Unable to set ERROR logger level on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile),MpiConfig)\n try:\n logObj.error(\"RANK: \" + str(MpiConfig.rank) + \" - \" + ConfigOptions.errMsg)\n except:\n err_out_screen_para(('Unable to write ERROR message on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile),MpiConfig)\n ConfigOptions.errFlag = 1\n\ndef log_critical(ConfigOptions,MpiConfig):\n \"\"\"\n Function for logging an error message without exiting without a\n non-zero exit status.\n :param ConfigOptions:\n :return:\n \"\"\"\n try:\n logObj = logging.getLogger('logForcing')\n except:\n err_out_screen_para(('Unable to obtain logger object on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile),MpiConfig)\n try:\n logObj.setLevel(logging.CRITICAL)\n except:\n err_out_screen_para(('Unable to set CRITICAL logger level on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile),MpiConfig)\n try:\n logObj.critical(\"RANK: \" + str(MpiConfig.rank) + \" - \" + ConfigOptions.errMsg)\n except:\n err_out_screen_para(('Unable to write CRITICAL message on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile),MpiConfig)\n ConfigOptions.errFlag = 1\n\ndef log_warning(ConfigOptions,MpiConfig):\n \"\"\"\n Function to log warning messages to the log file.\n :param ConfigOptions:\n :return:\n \"\"\"\n try:\n logObj = logging.getLogger('logForcing')\n except:\n err_out_screen_para(('Unable to obtain logger object on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile),MpiConfig)\n try:\n logObj.setLevel(logging.WARNING)\n except:\n err_out_screen_para(('Unable to set WARNING logger level on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile),MpiConfig)\n try:\n logObj.warning(\"RANK: \" + str(MpiConfig.rank) + \" - \" + ConfigOptions.statusMsg)\n except:\n err_out_screen_para(('Unable to write WARNING message on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile),MpiConfig)\n\ndef log_msg(ConfigOptions,MpiConfig):\n \"\"\"\n Function to log INFO messages to a specified log file.\n :param ConfigOptions:\n :return:\n \"\"\"\n try:\n logObj = logging.getLogger('logForcing')\n except:\n err_out_screen_para(('Unable to obtain logger object on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile),MpiConfig)\n try:\n logObj.setLevel(logging.INFO)\n except:\n err_out_screen_para(('Unable to set INFO logger level on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile),MpiConfig)\n try:\n logObj.info(\"RANK: \" + str(MpiConfig.rank) + \" - \" + ConfigOptions.statusMsg)\n except:\n err_out_screen_para(('Unable to write INFO message on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile),MpiConfig)\n\ndef close_log(ConfigOptions,MpiConfig):\n \"\"\"\n Function for closing a log file.\n :param ConfigOptions:\n :return:\n \"\"\"\n try:\n logObj = logging.getLogger('logForcing')\n except:\n err_out_screen_para(('Unable to obtain logger object on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile), MpiConfig)\n try:\n logObj.removeHandler(ConfigOptions.logHandle)\n except:\n err_out_screen_para(('Unable to remove logging file handle on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile), MpiConfig)\n try:\n ConfigOptions.logHandle.close()\n except:\n err_out_screen_para(('Unable to close looging file: ' + ConfigOptions.logFile +\n ' on RANK: ' + str(MpiConfig.rank)),MpiConfig)\n ConfigOptions.logHandle = None","sub_path":"core/errMod.py","file_name":"errMod.py","file_ext":"py","file_size_in_byte":8875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"29962750","text":"\nimport os\nfrom PIL import Image\nimport shutil\nimport sys\nimage_size=144\n#改变之后的图片尺寸\n\nsource_path=os.getcwd()+\"/image/\"#等待转换的图片存放地址\ntypes='png' #转换后的图片格式\ntarget_path=os.getcwd()+\"/changepng/\"#转换过格式的图片存放地址\nfinal_path=os.getcwd()+\"/final/\"# 转换过格式和尺寸的图片存放地址\n\n#如果没有转换后的图片存放文件夹,就创建对应的文件夹\nif not os.path.exists(target_path):\n os.makedirs(target_path)\nif not os.path.exists(final_path):\n os.makedirs(final_path)\n\ndef changepng(source_path,types):\n files = []\n image_list=os.listdir(source_path)\n #print(image_list)\n files = [os.path.join(source_path,_) for _ in image_list]\n for index,jpg in enumerate(files):\n if index > 1000:\n break\n try:\n sys.stdout.write('\\r>>Converting image %d/100000 ' % (index))\n sys.stdout.flush()\n im = Image.open(jpg)\n png = os.path.splitext(jpg)[0] + \".\" + types\n im.save(png)\n shutil.move(png,target_path)\n except IOError as e:\n print('could not read:',jpg)\n print('error:',e)\n print('skip it\\n')\n sys.stdout.write('Convert Over!\\n')\n sys.stdout.flush()\n\n\nif __name__ == '__main__':\n path = r\"F:\\untitled7\\get_html\\imgs\"\n changepng(path, \"png\")\n","sub_path":"test1/test03.py","file_name":"test03.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"253184342","text":"import openpyxl\n\nclass Write_excel_xlsx_append(object):\n def __init__(self, path, value, sheet_name):\n self.path = path\n self.value = value\n self.sheet_name = sheet_name\n\n def write_excel_xlsx_append(self):\n \"\"\"向xls文件内写入(追加)内容\"\"\"\n\n # 写入内容格式[[], [],[]..]\n # 写��内容行数\n index = len(self.value)\n # 读模式打开文件\n work_book = openpyxl.load_workbook(self.path)\n # 定位sheet页\n work_sheet = work_book[self.sheet_name]\n # sheet页数据行数\n rows_old = work_sheet.max_row\n rows = lambda x: x-1 if x else 0\n row_old_real = rows(rows_old)\n print('原sheet页数据行数为: %s' % (row_old_real))\n # 追加内容\n for i in range(1, index+1):\n for j in range(1, len(self.value[i-1])+1):\n work_sheet.cell(rows_old+i-1, j).value = self.value[i-1][j-1]\n print('写入完成,%s页数据行数为: %s' % (self.sheet_name, rows_old + index))\n # 保存文件\n work_book.save(self.path)\n print('新数据保存成功')\n\n","sub_path":"read_write_apend_xlsx/write_xlsx.py","file_name":"write_xlsx.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"547294470","text":"import cv2\n\nimport numpy as np\n\n\ndef Find(path):\n # 창 이름 설정\n\n cv2.namedWindow('image')\n\n # 이미지 파일 읽기\n\n img = cv2.imread(path, cv2.IMREAD_COLOR)\n\n # 이미지 사이즈 조정\n\n img = Resize(img)\n\n # 이미지 색 바꾸기\n\n # img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # img_ycrcb = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)\n\n img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\n # 잡음 제거\n\n img_hsv = cv2.fastNlMeansDenoisingColored(img_hsv, None, 10, 10, 7, 21)\n\n lower = np.array([0, 48, 80], dtype=\"uint8\")\n\n upper = np.array([20, 255, 255], dtype=\"uint8\")\n\n img_hand = cv2.inRange(img_hsv, lower, upper)\n\n # 경계선 찾음\n\n contours, hierarchy = cv2.findContours(img_hand, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n\n # 가장 큰 영역 찾기\n\n max = 0\n\n maxcnt = None\n\n for cnt in contours:\n\n area = cv2.contourArea(cnt)\n\n if (max < area):\n max = area\n\n maxcnt = cnt\n\n # maxcontours의 각 꼭지점 다각선 만들기\n\n hull = cv2.convexHull(maxcnt)\n\n # img 다 0으로 만들기?\n\n mask = np.zeros(img.shape).astype(img.dtype)\n\n color = [255, 255, 255]\n\n # 경계선 내부 255로 채우기\n\n cv2.fillPoly(mask, [maxcnt], color)\n\n img_hand = cv2.bitwise_and(img, mask)\n\n cv2.drawContours(img_hand, [maxcnt], 0, (255, 0, 0), 3)\n\n cv2.drawContours(img_hand, [hull], 0, (0, 255, 0), 3)\n\n # 이미지 보여주기\n\n cv2.imshow('image', img_hand)\n\n # 창 esc 끄기\n\n while True:\n\n if cv2.waitKey(0) == 27:\n cv2.destroyWindow('image')\n\n break;\n\n return\n\n\ndef Resize(img):\n print(img.shape)\n\n width = 500\n\n ratio = width / img.shape[1] # width * 사진 너비 = 비율\n\n height = int(ratio * img.shape[0]) # 비율 * 사진 높이\n\n # 축소 INTER_AREA\n\n # 확대 INTER_LINEAR\n\n resize = cv2.resize(img, dsize=(width, height), interpolation=cv2.INTER_AREA)\n\n # resize = cv2.resize(img, dsize = (0, 0), fx=1.5, fy=1.5, interpolation = cv2.INTER_AREA)\n\n print(resize.shape)\n\n return resize\n\n\n\nFind(\"ok3.jpg\")\n\n\n\n","sub_path":"python_source/딥러닝/hand.py","file_name":"hand.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"246446368","text":"##### IMPORT #####\nimport pyglet, random, math\nfrom pyglet import font\nfrom classes import Player, Feather, RotatingSprite, Window, Poetry\nfrom pyglet.window import mouse\n\n##### USEFUL SIMPLE FUNCTIONS #####\ndef center_image(image):\n \"\"\"\n Sets an image's anchor point to its center\n :param image: image\n :return: None\n \"\"\"\n image.anchor_x = image.width // 2 # put the anchor of the image at the half of its width\n image.anchor_y = image.height // 2 # put the anchor of the image at the half of its height\n\ndef distance(point_1=(0, 0), point_2=(0, 0)):\n '''\n Calculates the distance between two points.\n :param point_1: tuple\n :param point_2: tuple\n :return: float\n '''\n return math.sqrt(\n (point_1[0] - point_2[0]) ** 2 +\n (point_1[1] - point_2[1]) ** 2) # pythagore\n\n##### MUSIC #####\nmusicSource = pyglet.media.load('resources/sound/violin.wav')\nmusic = pyglet.media.Player()\nmusic.volume = 0.0005\n#Keep playing for as long as the app is running (or you tell it to stop):\nmusic.eos_action = pyglet.media.SourceGroup.loop\n\n##### GAME WINDOW #####\ngame_window = Window()\nx = game_window.width\ny = game_window.height\ngame = False #State of the game, on or off\n\n##### WALLPAPER #####\nwallpaper = pyglet.resource.image('resources/sprites/wallpaper.jpg')\nwallpaper_sprite = pyglet.sprite.Sprite(img=wallpaper, x=0, y=0)\n\n##### MENU ####\nclose_img = pyglet.resource.image('resources/sprites/close_game.png')\nclose_img2 = pyglet.resource.image('resources/sprites/close_game_grey.png')\nclose_scale = close_img.height/close_img.width\nclose = pyglet.sprite.Sprite(img=close_img,\n x=close_img.width*close_scale//4,\n y=y-int(2*close_img.height*close_scale)) #set position of close image\nclose.scale = close_scale\n\nrestart_img = pyglet.resource.image('resources/sprites/restart_game.png')\nrestart_img2 = pyglet.resource.image('resources/sprites/restart_game_grey.png')\nrestart_scale = restart_img.height/restart_img.width\nrestart = pyglet.sprite.Sprite(img=restart_img,\n x=restart_img.width*restart_scale//4,\n y=y-int(3.5*restart_img.height*restart_scale)) #set position of restart image\nrestart.scale = restart_scale\n\n##### BATCH #####\nbatch = pyglet.graphics.Batch()\n\n##### PARCHMENT #####\nparchment_image = pyglet.resource.image('resources/sprites/parchment.png')\ncenter_image(parchment_image)\nparchment_scale = parchment_image.height/parchment_image.width #Scale of the parchment\nparchment = pyglet.sprite.Sprite(img=parchment_image,\n x=x//2,\n y=parchment_image.height//2 + 20)\n\n##### PLAYER #####\nplayer_image = pyglet.resource.image('resources/sprites/player.png')\ncenter_image(player_image)\nplayer_sprite = Player(img=player_image,\n x=x//2,\n y=(y+2*parchment.y)//2,\n batch=batch) # set position of player as a Player instance\ngame_window.push_handlers(player_sprite)\n\n##### PLAYER LIVES #####\nplayer_lives = 3\nlive = pyglet.text.Label('Lives : ' + str(player_lives),\n font_name='Times New Roman',\n font_size=x/30,\n x=x-x//10, y=y-y//15,\n anchor_x='center', anchor_y='center')\n\n##### CIRCLE SEGMENTS #####\ncircle_segment = pyglet.resource.image(\"resources/sprites/circle_segment.png\")\ncenter_image(circle_segment)\n#Load the 15 segments with the RotatingSprite class\nfor i in range(15):\n angle_degrees = (360/15)*i # set the angle of every segment\n angle_radians = math.radians(angle_degrees)\n xc, yc = (x//2, (y+2*parchment.y)//2)\n r = x//6 #radius of the circle\n segment = RotatingSprite(angle_radians=angle_radians,\n r=r, xc=xc, yc=yc,\n word=RotatingSprite.words[i], img=circle_segment, batch=batch)\n RotatingSprite.segments.append(segment) #add the segment to the list which is updated\n RotatingSprite.all_segments.append(segment)\n\n##### POETRY #####\npoem = Poetry()\npoem.initialize()\nline = 0 #actual line of the poetry\n\n##### INRODUCTION AND GAME OVER LABEL #####\nintro_text = pyglet.text.Label('Press left mouse button to start',\n font_name='Times New Roman',\n font_size=x/30,\n italic=True,\n x=x//2, y=y//2,\n anchor_x='center', anchor_y='center')\n\ngame_over = pyglet.text.Label('Game Over',\n font_name='Times New Roman',\n font_size=x/30,\n italic=True,\n x=x//2, y=y//2,\n anchor_x='center', anchor_y='center')\n\nrestart_text = pyglet.text.Label('Press left mouse button to restart',\n font_name='Times New Roman',\n font_size=x/30,\n italic=True,\n x=x//2, y=y//3,\n anchor_x='center', anchor_y='center')\n\n##### GAME FUNCTIONS #####\ndef write_towards(poetry):\n global line\n toward = poetry.split_poetry()\n msg = ' '.join(toward[line]) #take the first verse\n label = pyglet.text.Label(str(msg),\n font_name='Times New Roman',\n font_size=18,\n color=(75, 0, 130, 255),\n x=parchment.x, y=parchment.y,\n anchor_x='center', anchor_y='center')\n label.draw() #write the sentence on the parchment\n\ndef chargeBar(player_sprite, player_image):\n '''\n Draws the line for the reloading time.\n :param player_sprite: sprite\n :param player_image: image\n :return: None\n '''\n player_start = player_sprite.x - player_sprite.width // 2\n\n pyglet.graphics.draw(2, pyglet.gl.GL_LINES,\n (\"v2f\", (player_start, player_sprite.y-(player_image.height)/1.2, player_start+2*(player_sprite.width*(player_sprite.reloading/60)), player_sprite.y-(player_image.height)/1.2))\n )\n pyglet.graphics.draw(2, pyglet.gl.GL_LINES,\n (\"v2f\", (player_start, player_sprite.y-(player_image.height)/1.2+1, player_start+2*(player_sprite.width*(player_sprite.reloading/60)), player_sprite.y-(player_image.height)/1.2+1))\n )\n pyglet.graphics.draw(2, pyglet.gl.GL_LINES,\n (\"v2f\", (player_start, player_sprite.y-(player_image.height)/1.2+2, player_start+2*(player_sprite.width*(player_sprite.reloading/60)), player_sprite.y-(player_image.height)/1.2+2))\n )\n pyglet.graphics.draw(2, pyglet.gl.GL_LINES,\n (\"v2f\", (player_start, player_sprite.y-(player_image.height)/1.2+3, player_start+2*(player_sprite.width*(player_sprite.reloading/60)), player_sprite.y-(player_image.height)/1.2+3))\n )\n\ndef in_sprite(sprite, x, y):\n '''\n Verifies if the coordonates (x, y) are in the sprite\n :param sprite: sprite\n :param x: int\n :param y: int\n :return: bool\n '''\n res = sprite.x <= x <= sprite.x + sprite.width and sprite.y <= y <= sprite.y + sprite.height\n return res\n\n@game_window.event\ndef on_draw():\n '''\n The draw function.\n :return: None\n '''\n global game, player_lives\n game_window.clear()\n wallpaper_sprite.draw()\n if game:\n restart.draw()\n close.draw()\n live.draw()\n game_window.fps_display.draw()\n parchment.draw()\n #Draw the player and the segments\n batch.draw()\n write_towards(poem)\n #Draw the segments\n for segment in RotatingSprite.segments:\n segment.label.draw()\n #Draw the reloading line\n chargeBar(player_sprite, player_image)\n #Draw every projectile\n for feather in Feather.feathers:\n feather.draw()\n #Draw the dead feathers\n for obj in RotatingSprite.intert_objects:\n obj.draw()\n else:\n if player_lives > 0:\n intro_text.draw()\n else:\n game_over.draw()\n restart_text.draw()\n\n@game_window.event\ndef on_mouse_press(x, y, button, modifiers):\n global game\n if mouse.LEFT == True:\n if game: #if the game is on or off\n if in_sprite(restart, x, y): #condition to press on the button\n game_restart()\n elif in_sprite(close, x, y):\n pyglet.app.exit()\n else:\n game_restart()\n game = True\n \n@game_window.event\ndef on_mouse_motion(x, y, dx, dy):\n '''\n Controls the animation of the two buttons.\n :return: None\n '''\n if restart.image == restart_img and in_sprite(restart, x, y): #turn the image in grey when mouse is on the restart button\n restart.image = restart_img2\n elif restart.image != restart_img and not in_sprite(restart, x, y):\n restart.image = restart_img\n\n if close.image == close_img and in_sprite(close, x, y): #turn the image in grey when mouse is on the close button\n close.image = close_img2\n elif close.image != close_img and not in_sprite(close, x, y):\n close.image = close_img\n\ndef game_restart():\n '''\n Restart the game and set all variables to their beginning state.\n '''\n global player_lives, line\n RotatingSprite.dead_segments.reverse() #segments in the order of their death\n for segment in RotatingSprite.dead_segments: # transform all dead segments back in segments but in the right order (reverse)\n segment.relive()\n RotatingSprite.words.insert(0, segment.word)\n RotatingSprite.dead_segments.clear() # clear the dead_segment list when restart\n RotatingSprite.intert_objects.clear() # clear the dead feathers when restart\n player_lives = 3\n line = 0\n\ndef update(dt):\n '''\n Updates the game objects every frame (60 times per second)\n :param dt: float\n :return: None\n '''\n global line, player_lives, game, live\n if game:\n player_sprite.update(dt)\n if len(Feather.feathers) > 0:\n for feather in Feather.feathers: # update position of all dead segments\n feather.update_position(dt)\n if len(RotatingSprite.segments) > 0:\n for segment in RotatingSprite.segments: # update position of all segments\n segment.update(dt)\n if len(RotatingSprite.dead_segments) > 0:\n for dead_segment in RotatingSprite.dead_segments: # update position of all dead segments\n dead_segment.update(dt)\n if len(RotatingSprite.intert_objects) > 0:\n for obj in RotatingSprite.intert_objects: #update position of the dead feathers\n obj.update(dt)\n\n if player_lives > 0:\n live.text = 'Lives : ' + str(player_lives)\n else:\n game = False\n\n ### Collision\n for feather in Feather.feathers:\n already_dead = False #prevent the delete of two segments with the same feather\n already_hit = False #prevent the delete of two lives with the same feather\n if distance(point_1=(feather.x, feather.y), point_2=(xc, yc)) > r - circle_segment.height//2: # check when a feather reaches the segments \n feather.dead = True # kill the feather\n if len(RotatingSprite.segments) > 0:\n for segment in RotatingSprite.all_segments: #even the dead segments\n if distance(point_1=(feather.x, feather.y), point_2=(segment.x, segment.y)) < 1.27 * r * math.sin(math.radians(360/15)/2): # check which segments is hit by the feather\n if not already_dead: # kill the segment if the feather has not kill one already\n if segment.word == RotatingSprite.words[0]:\n line += 1\n segment.dead = True\n segment.update(dt) # update the next segment in segment list (to prevent a bug)\n already_dead = True\n elif not already_hit:\n if player_lives > 0:\n player_lives -= 1\n already_hit = True\n else:\n print('Win')\n else:\n pass\n\nif __name__ == \"__main__\":\n\n pyglet.clock.schedule_interval(update, game_window.frame_rate) #Activate the update function (60 Hz)\n\n music.queue(musicSource)\n music.play()\n \n pyglet.app.run()","sub_path":"usr/duc/good project/circles.py","file_name":"circles.py","file_ext":"py","file_size_in_byte":12504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"9450173","text":"from idlelib.idle_test.test_run import S\nfrom itertools import product\n\nfrom django.contrib.auth import logout, update_session_auth_hash\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import SetPasswordForm\nfrom django.contrib.auth.models import User, Group\nfrom django.contrib import messages\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.db.models import Q\nfrom django.http import JsonResponse, HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\n\nfrom sbs.Forms.BeltExamForm import BeltExamForm\nfrom sbs.Forms.ClubForm import ClubForm\nfrom sbs.Forms.ClubRoleForm import ClubRoleForm\nfrom sbs.Forms.CommunicationForm import CommunicationForm\nfrom sbs.Forms.DisabledCommunicationForm import DisabledCommunicationForm\nfrom sbs.Forms.DisabledPersonForm import DisabledPersonForm\nfrom sbs.Forms.DisabledSportClubUserForm import DisabledSportClubUserForm\nfrom sbs.Forms.DisabledUserForm import DisabledUserForm\nfrom sbs.Forms.PersonForm import PersonForm\nfrom sbs.Forms.SportClubUserForm import SportClubUserForm\nfrom sbs.Forms.UserForm import UserForm\nfrom sbs.Forms.SearchClupForm import SearchClupForm\nfrom sbs.Forms.PreRegidtrationForm import PreRegistrationForm\nfrom sbs.Forms.UserSearchForm import UserSearchForm\nfrom sbs.Forms.ClupUserSearchForm import ClubSearchForm\n\nfrom sbs.models import SportsClub, SportClubUser, Communication, Person, BeltExam, Athlete, Coach, Level, CategoryItem, \\\n License\nfrom sbs.models.ClubRole import ClubRole\nfrom sbs.models.EnumFields import EnumFields\nfrom sbs.models.PreRegistration import PreRegistration\nfrom sbs.services import general_methods\nfrom datetime import date,datetime\nimport datetime\nfrom django.utils import timezone\n\nfrom zeep import Client\n# from sbs.models.Person import Person\n# from sbs.models.PreRegistration import PreRegistration\nfrom sbs.models.ReferenceReferee import ReferenceReferee\nfrom sbs.models.ReferenceCoach import ReferenceCoach\n\nfrom django.contrib.auth.models import Group, Permission, User\nfrom operator import itemgetter\n@login_required\ndef return_add_club(request):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n club_form = ClubForm()\n communication_form = CommunicationForm()\n\n if request.method == 'POST':\n\n club_form = ClubForm(request.POST, request.FILES or None)\n communication_form = CommunicationForm(request.POST, request.FILES)\n\n if club_form.is_valid():\n clubsave = SportsClub(name=club_form.cleaned_data['name'],\n shortName=club_form.cleaned_data['shortName'],\n foundingDate=club_form.cleaned_data['foundingDate'],\n logo=club_form.cleaned_data['logo'],\n clubMail=club_form.cleaned_data['clubMail'],\n isFormal=club_form.cleaned_data['isFormal'],\n petition=club_form.cleaned_data['petition'],\n\n )\n\n communication = communication_form.save(commit=False)\n communication.save()\n clubsave.communication = communication\n\n clubsave.save()\n\n log = str(club_form.cleaned_data['name']) + \" Klup eklendi\"\n log = general_methods.logwrite(request, request.user, log)\n\n\n\n\n messages.success(request, 'Kulüp Başarıyla Kayıt Edilmiştir.')\n\n return redirect('sbs:kulupler')\n\n else:\n\n messages.warning(request, 'Alanları Kontrol Ediniz')\n\n return render(request, 'kulup/kulup-ekle.html',\n {'club_form': club_form, 'communication_form': communication_form})\n\n\n@login_required\ndef return_clubs(request):\n perm = general_methods.control_access_klup(request)\n\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n user = request.user\n clubs = SportsClub.objects.none()\n ClupsSearchForm=ClubSearchForm(request.POST)\n if user.groups.filter(name='KulupUye'):\n clubuser = SportClubUser.objects.get(user=user)\n clubs = SportsClub.objects.filter(clubUser=clubuser).order_by(\"-pk\")\n\n if request.method == 'POST':\n\n if ClupsSearchForm.is_valid():\n kisi = ClupsSearchForm.cleaned_data.get('kisi')\n city = ClupsSearchForm.cleaned_data.get('city')\n name = ClupsSearchForm.cleaned_data.get('name')\n shortName = ClupsSearchForm.cleaned_data.get('shortName')\n clubMail = ClupsSearchForm.cleaned_data.get('clubMail')\n if not (kisi or city or name or shortName or clubMail):\n if user.groups.filter(name='KulupUye'):\n clubuser = SportClubUser.objects.get(user=user)\n clubs = SportsClub.objects.filter(clubUser=clubuser).order_by(\"-pk\")\n\n elif user.groups.filter(name__in=['Yonetim', 'Admin']):\n clubs = SportsClub.objects.all().order_by(\"-pk\")\n\n else:\n query = Q()\n if city:\n query &= Q(communication__city__name__icontains=city)\n if name:\n query &= Q(name__icontains=name)\n if clubMail:\n query &= Q(clubMail__icontains=clubMail)\n if shortName:\n query &= Q(shortName__icontains=shortName)\n if kisi:\n query &= Q(clubUser=kisi)\n if user.groups.filter(name='KulupUye'):\n clubuser = SportClubUser.objects.get(user=user)\n clubs = SportsClub.objects.filter(clubUser=clubuser).filter(query)\n\n elif user.groups.filter(name__in=['Yonetim', 'Admin']):\n clubs = SportsClub.objects.filter(query)\n\n return render(request, 'kulup/kulupler.html', {'clubs': clubs, 'ClupsSearchForm': ClupsSearchForm, })\n\n\n@login_required\ndef return_add_club_person(request):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n user_form = UserForm()\n person_form = PersonForm()\n communication_form = CommunicationForm()\n sportClubUser_form = SportClubUserForm()\n if request.method == 'POST':\n\n user_form = UserForm(request.POST)\n person_form = PersonForm(request.POST, request.FILES)\n communication_form = CommunicationForm(request.POST, request.FILES)\n sportClubUser_form = SportClubUserForm(request.POST)\n\n mail = request.POST.get('email')\n\n if User.objects.filter(email=mail) or ReferenceCoach.objects.exclude(status=ReferenceCoach.DENIED).filter(\n email=mail) or ReferenceReferee.objects.exclude(status=ReferenceReferee.DENIED).filter(\n email=mail) or PreRegistration.objects.exclude(status=PreRegistration.DENIED).filter(\n email=mail):\n messages.warning(request, 'Mail adresi başka bir kullanici tarafından kullanilmaktadir.')\n return render(request, 'kulup/kulup-uyesi-ekle.html',\n {'user_form': user_form, 'person_form': person_form, 'communication_form': communication_form,\n 'sportClubUser_form': sportClubUser_form,\n })\n\n tc = request.POST.get('tc')\n if Person.objects.filter(tc=tc) or ReferenceCoach.objects.exclude(status=ReferenceCoach.DENIED).filter(\n tc=tc) or ReferenceReferee.objects.exclude(status=ReferenceReferee.DENIED).filter(\n tc=tc) or PreRegistration.objects.exclude(status=PreRegistration.DENIED).filter(tc=tc):\n messages.warning(request, 'Tc kimlik numarasi sisteme kayıtlıdır. ')\n return render(request, 'kulup/kulup-uyesi-ekle.html',\n {'user_form': user_form, 'person_form': person_form, 'communication_form': communication_form,\n 'sportClubUser_form': sportClubUser_form,\n })\n\n name = request.POST.get('first_name')\n surname = request.POST.get('last_name')\n year = request.POST.get('birthDate')\n year = year.split('/')\n\n client = Client('https://tckimlik.nvi.gov.tr/Service/KPSPublic.asmx?WSDL')\n if not (client.service.TCKimlikNoDogrula(tc, name, surname, year[2])):\n messages.warning(request, 'Tc kimlik numarasi ile isim soyisim dogum yılı bilgileri uyuşmamaktadır. ')\n return render(request, 'kulup/kulup-uyesi-ekle.html',\n {'user_form': user_form, 'person_form': person_form, 'communication_form': communication_form,\n 'sportClubUser_form': sportClubUser_form,\n })\n\n if user_form.is_valid() and person_form.is_valid() and communication_form.is_valid() and sportClubUser_form.is_valid():\n user = User()\n user.username = user_form.cleaned_data['email']\n user.first_name = user_form.cleaned_data['first_name']\n user.last_name = user_form.cleaned_data['last_name']\n user.email = user_form.cleaned_data['email']\n group = Group.objects.get(name='KulupUye')\n password = User.objects.make_random_password()\n user.set_password(password)\n user.save()\n user.groups.add(group)\n user.save()\n\n person = person_form.save(commit=False)\n communication = communication_form.save(commit=False)\n person.save()\n communication.save()\n\n club_person = SportClubUser(\n user=user, person=person, communication=communication,\n role=sportClubUser_form.cleaned_data['role'],\n\n )\n\n club_person.save()\n\n subject, from_email, to = 'Halter - Kulüp Üye Bilgi Sistemi Kullanıcı Giriş Bilgileri', 'no-reply@twf.gov.tr', user.email\n text_content = 'Aşağıda ki bilgileri kullanarak sisteme giriş yapabilirsiniz.'\n html_content = ' Site adresi: https://sbs.halter.gov.tr
'\n html_content = html_content + 'Kullanıcı Adı: ' + user.username + '
'\n html_content = html_content + 'Şifre: ' + password + '
'\n msg = EmailMultiAlternatives(subject, text_content, from_email, [to])\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()\n\n log = str(user.get_full_name()) + \" Klupuyesi eklendi\"\n log = general_methods.logwrite(request, request.user, log)\n\n\n\n\n messages.success(request, 'Kulüp Üyesi Başarıyla Kayıt Edilmiştir.')\n\n return redirect('sbs:kulup-uyeleri')\n\n else:\n\n for x in user_form.errors.as_data():\n messages.warning(request, user_form.errors[x][0])\n\n return render(request, 'kulup/kulup-uyesi-ekle.html',\n {'user_form': user_form, 'person_form': person_form, 'communication_form': communication_form,\n 'sportClubUser_form': sportClubUser_form,\n })\n\n\n@login_required\ndef updateClubPersons(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n athlete = SportClubUser.objects.get(pk=pk)\n user = User.objects.get(pk=athlete.user.pk)\n person = Person.objects.get(pk=athlete.person.pk)\n communication = Communication.objects.get(pk=athlete.communication.pk)\n # sportClub = athlete.sportClub\n user_form = UserForm(request.POST or None, instance=user)\n person_form = PersonForm(request.POST or None, request.FILES or None, instance=person)\n communication_form = CommunicationForm(request.POST or None, instance=communication)\n sportClubUser_form = SportClubUserForm(request.POST or None, instance=athlete)\n clubs = SportsClub.objects.filter(clubUser__user=user)\n\n if request.method == 'POST':\n mail = request.POST.get('email')\n if mail != athlete.user.email:\n\n if User.objects.filter(email=mail) or ReferenceCoach.objects.exclude(status=ReferenceCoach.DENIED).filter(\n email=mail) or ReferenceReferee.objects.exclude(status=ReferenceReferee.DENIED).filter(\n email=mail) or PreRegistration.objects.exclude(status=PreRegistration.DENIED).filter(\n email=mail):\n messages.warning(request, 'Mail adresi başka bir kullanici tarafından kullanilmaktadir.')\n return render(request, 'kulup/kulup-uyesi-duzenle.html',\n {'user_form': user_form, 'communication_form': communication_form,\n 'person_form': person_form, 'sportClubUser_form': sportClubUser_form, 'clubs': clubs})\n\n tc = request.POST.get('tc')\n if tc != athlete.person.tc:\n if Person.objects.filter(tc=tc) or ReferenceCoach.objects.exclude(status=ReferenceCoach.DENIED).filter(\n tc=tc) or ReferenceReferee.objects.exclude(status=ReferenceReferee.DENIED).filter(\n tc=tc) or PreRegistration.objects.exclude(status=PreRegistration.DENIED).filter(tc=tc):\n messages.warning(request, 'Tc kimlik numarasi sisteme kayıtlıdır. ')\n return render(request, 'kulup/kulup-uyesi-duzenle.html',\n {'user_form': user_form, 'communication_form': communication_form,\n 'person_form': person_form, 'sportClubUser_form': sportClubUser_form, 'clubs': clubs})\n\n name = request.POST.get('first_name')\n surname = request.POST.get('last_name')\n year = request.POST.get('birthDate')\n year = year.split('/')\n\n client = Client('https://tckimlik.nvi.gov.tr/Service/KPSPublic.asmx?WSDL')\n if not (client.service.TCKimlikNoDogrula(tc, name, surname, year[2])):\n messages.warning(request, 'Tc kimlik numarasi ile isim soyisim dogum yılı bilgileri uyuşmamaktadır. ')\n return render(request, 'kulup/kulup-uyesi-duzenle.html',\n {'user_form': user_form, 'communication_form': communication_form,\n 'person_form': person_form, 'sportClubUser_form': sportClubUser_form, 'clubs': clubs})\n\n if user_form.is_valid() and communication_form.is_valid() and person_form.is_valid() and sportClubUser_form.is_valid():\n\n user = user_form.save(commit=False)\n user.username = user_form.cleaned_data['email']\n user.first_name = user_form.cleaned_data['first_name']\n user.last_name = user_form.cleaned_data['last_name']\n user.email = user_form.cleaned_data['email']\n user.save()\n person_form.save()\n communication_form.save()\n sportClubUser_form.save()\n\n log = str(user.get_full_name()) + \" klup uyesi guncellendi\"\n log = general_methods.logwrite(request, request.user, log)\n\n\n\n messages.success(request, 'Kulüp Üyesi Başarıyla Güncellenmiştir.')\n\n # return redirect('sbs:kulup-uyeleri')\n\n else:\n\n for x in user_form.errors.as_data():\n messages.warning(request, user_form.errors[x][0])\n\n return render(request, 'kulup/kulup-uyesi-duzenle.html',\n {'user_form': user_form, 'communication_form': communication_form,\n 'person_form': person_form, 'sportClubUser_form': sportClubUser_form, 'clubs': clubs})\n\n\n@login_required\ndef return_club_coach(request):\n perm = general_methods.control_access_klup(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n\n user_form = UserSearchForm()\n user = request.user\n club_user_array = SportClubUser.objects.none()\n coachs = Coach.objects.none()\n if request.method == 'POST':\n user_form = UserSearchForm(request.POST)\n sportsclup = request.POST.get('sportsClub')\n\n if user_form.is_valid():\n firstName = user_form.cleaned_data.get('first_name')\n lastName = user_form.cleaned_data.get('last_name')\n email = user_form.cleaned_data.get('email')\n if not (firstName or lastName or email or sportsclup):\n if user.groups.filter(name='KulupUye'):\n\n clubuser = SportClubUser.objects.get(user=user)\n clubs = SportsClub.objects.filter(clubUser=clubuser)\n clubsPk = []\n\n for club in clubs:\n coachs |= club.coachs.all().distinct()\n\n\n elif user.groups.filter(name__in=['Yonetim', 'Admin']):\n club_user_array = Coach.objects.all()\n else:\n query = Q()\n if lastName:\n query &= Q(user__last_name__icontains=lastName)\n if firstName:\n query &= Q(user__first_name__icontains=firstName)\n if email:\n query &= Q(user__email__icontains=email)\n if sportsclup:\n query &= Q(sportsclub__name__icontains=sportsclup)\n\n if user.groups.filter(name='KulupUye'):\n\n clubuser = SportClubUser.objects.get(user=user)\n clubs = SportsClub.objects.filter(clubUser=clubuser)\n for club in clubs:\n coachs |= club.coachs.all().distinct()\n\n coachs = coachs.filter(query).distinct()\n\n\n elif user.groups.filter(name__in=['Yonetim', 'Admin']):\n clubs = SportsClub.objects.all()\n for club in clubs:\n coachs |= club.coachs.all().distinct()\n\n coachs = coachs.filter(query).distinct()\n coachs = Coach.objects.filter(query).distinct()\n\n sportclup = SearchClupForm(request.POST, request.FILES or None)\n if user.groups.filter(name='KulupUye'):\n sc_user = SportClubUser.objects.get(user=user)\n clubs = SportsClub.objects.filter(clubUser=sc_user)\n clubsPk = []\n for club in clubs:\n clubsPk.append(club.pk)\n sportclup.fields['sportsClub'].queryset = SportsClub.objects.filter(id__in=clubsPk)\n elif user.groups.filter(name__in=['Yonetim', 'Admin']):\n sportclup.fields['sportsClub'].queryset = SportsClub.objects.all()\n\n return render(request, 'kulup/kulup-antrenorler.html',\n {'athletes': coachs, 'user_form': user_form, 'Sportclup': sportclup})\n\n\n@login_required\ndef return_rapor_club(request):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n\n return render(request, 'kulup/kulupRapor.html')\n\n\n@login_required\ndef return_clup(request):\n # print('ben geldim')\n login_user = request.user\n user = User.objects.get(pk=login_user.pk)\n\n # /datatablesten gelen veri kümesi datatables degiskenine alindi\n if request.method == 'GET':\n datatables = request.GET\n print(datatables)\n\n\n elif request.method == 'POST':\n datatables = request.POST\n\n try:\n order = datatables.get('order[0][column]')\n # print('test=', order)\n draw = int(datatables.get('draw'))\n # print(\"draw degeri =\", draw)\n # Ambil start\n start = int(datatables.get('start'))\n # print(\"start degeri =\", start)\n # Ambil length (limit)\n length = int(datatables.get('length'))\n # print(\"lenght degeri =\", length)\n # Ambil data search\n search = datatables.get('search[value]')\n # print(\"search degeri =\", search)\n except:\n draw = 1\n start = 0\n length = 10\n\n if length == -1:\n modeldata = SportsClub.objects.all().order_by('-creationDate')\n total = modeldata.count()\n\n # clüp hepsi\n\n else:\n if search:\n\n modeldata = SportsClub.objects.filter(\n Q(name__icontains=search) | Q(shortName__icontains=search) | Q(clubMail__icontains=search))\n\n total = modeldata.count()\n\n # print(modeldata)\n\n\n else:\n modeldata = SportsClub.objects.all().order_by('-creationDate')[\n start:start + length]\n total = SportsClub.objects.all().count()\n\n say = start + 1\n start = start + length\n page = start / length\n\n beka = []\n\n for item in modeldata:\n athlete = Athlete.objects.filter(licenses__sportsClub=item).count()\n uye = item.clubUser.all().count()\n\n data = {\n 'say': say,\n 'pk': item.pk,\n\n 'name': item.name,\n\n 'uye': uye,\n #\n 'athlete': athlete,\n 'coach': item.coachs.all().count(),\n\n }\n beka.append(data)\n say += 1\n\n order = int(order)\n if order != 0:\n if order == 1:\n beka.sort(key=lambda item: item['name'], reverse=False)\n elif order == 2:\n beka.sort(key=lambda item: item['uye'], reverse=True)\n elif order == 3:\n beka.sort(key=lambda item: item['athlete'], reverse=True)\n elif order == 4:\n beka.sort(key=lambda item: item['coach'], reverse=True)\n else:\n beka.sort(key=lambda item: item['say'], reverse=False)\n\n response = {\n\n 'data': beka,\n 'draw': draw,\n 'recordsTotal': total,\n 'recordsFiltered': total,\n\n }\n\n return JsonResponse(response)\n\n\n\n\n\n@login_required\ndef return_club_person(request):\n perm = general_methods.control_access_klup(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n\n user_form = UserSearchForm()\n user = request.user\n club_user_array=SportClubUser.objects.none()\n if request.method == 'POST':\n user_form = UserSearchForm(request.POST)\n sportsclup = request.POST.get('sportsClub')\n\n\n\n\n if user_form.is_valid():\n firstName = user_form.cleaned_data.get('first_name')\n lastName = user_form.cleaned_data.get('last_name')\n email = user_form.cleaned_data.get('email')\n if not (firstName or lastName or email or sportsclup):\n club_user_array = []\n if user.groups.filter(name='KulupUye'):\n\n clubuser = SportClubUser.objects.get(user=user)\n clubs = SportsClub.objects.filter(clubUser=clubuser)\n clubsPk = []\n for club in clubs:\n clubsPk.append(club.pk)\n\n club_user_array = SportClubUser.objects.filter(sportsclub__in=clubsPk).distinct()\n\n\n elif user.groups.filter(name__in=['Yonetim', 'Admin']):\n club_user_array = SportClubUser.objects.all()\n else:\n query = Q()\n if lastName:\n query &= Q(user__last_name__icontains=lastName)\n if firstName:\n query &= Q(user__first_name__icontains=firstName)\n if email:\n query &= Q(user__email__icontains=email)\n if sportsclup:\n query &=Q(sportsclub__name__icontains=sportsclup)\n\n club_user_array = []\n if user.groups.filter(name='KulupUye'):\n\n clubuser = SportClubUser.objects.get(user=user)\n clubs = SportsClub.objects.filter(clubUser=clubuser)\n clubsPk = []\n for club in clubs:\n clubsPk.append(club.pk)\n\n club_user_array = SportClubUser.objects.filter(sportsclub__in=clubsPk).filter(query).distinct()\n\n\n elif user.groups.filter(name__in=['Yonetim', 'Admin']):\n club_user_array = SportClubUser.objects.filter(query).distinct()\n\n sportclup = SearchClupForm(request.POST, request.FILES or None)\n if user.groups.filter(name='KulupUye'):\n sc_user = SportClubUser.objects.get(user=user)\n clubs = SportsClub.objects.filter(clubUser=sc_user)\n clubsPk = []\n for club in clubs:\n clubsPk.append(club.pk)\n sportclup.fields['sportsClub'].queryset = SportsClub.objects.filter(id__in=clubsPk)\n elif user.groups.filter(name__in=['Yonetim', 'Admin']):\n sportclup.fields['sportsClub'].queryset = SportsClub.objects.all()\n\n return render(request, 'kulup/kulup-uyeleri.html', {'athletes': club_user_array, 'user_form': user_form,'Sportclup':sportclup})\n\n\n@login_required\ndef return_club_role(request):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n club_role_form = ClubRoleForm()\n\n if request.method == 'POST':\n\n club_role_form = ClubRoleForm(request.POST)\n\n if club_role_form.is_valid():\n\n clubrole = ClubRole(name=club_role_form.cleaned_data['name'])\n clubrole.save()\n messages.success(request, 'Kulüp Üye Rolü Başarıyla Kayıt Edilmiştir.')\n return redirect('sbs:kulup-uye-rolu')\n\n else:\n\n messages.warning(request, 'Alanları Kontrol Ediniz')\n club_role = ClubRole.objects.all()\n return render(request, 'kulup/kulup-uye-rolu.html',\n {'club_role_form': club_role_form, 'club_role': club_role})\n\n\n@login_required\ndef deleteClubRole(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n if request.method == 'POST' and request.is_ajax():\n try:\n obj = ClubRole.objects.get(pk=pk)\n obj.delete()\n return JsonResponse({'status': 'Success', 'messages': 'save successfully'})\n except ClubRole.DoesNotExist:\n return JsonResponse({'status': 'Fail', 'msg': 'Object does not exist'})\n\n else:\n return JsonResponse({'status': 'Fail', 'msg': 'Not a valid request'})\n\n\n@login_required\ndef deleteClubUser(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n if request.method == 'POST' and request.is_ajax():\n try:\n obj = SportClubUser.objects.get(pk=pk)\n obj.delete()\n return JsonResponse({'status': 'Success', 'messages': 'delete successfully'})\n except ClubRole.DoesNotExist:\n return JsonResponse({'status': 'Fail', 'msg': 'Object does not exist'})\n\n else:\n return JsonResponse({'status': 'Fail', 'msg': 'Not a valid request'})\n\n\n@login_required\ndef deleteClubUserFromClub(request, pk, club_pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n if request.method == 'POST' and request.is_ajax():\n try:\n obj = SportClubUser.objects.get(pk=pk)\n club = SportsClub.objects.get(pk=club_pk)\n\n club.clubUser.remove(obj)\n\n log = str(club) + \" Klup üyesi cikarildi\"\n log = general_methods.logwrite(request, request.user, log)\n\n\n club.save()\n\n return JsonResponse({'status': 'Success', 'messages': 'delete successfully'})\n except ClubRole.DoesNotExist:\n return JsonResponse({'status': 'Fail', 'msg': 'Object does not exist'})\n\n else:\n return JsonResponse({'status': 'Fail', 'msg': 'Not a valid request'})\n\n\n@login_required\ndef deleteCoachFromClub(request, pk, club_pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n if request.method == 'POST' and request.is_ajax():\n try:\n obj = Coach.objects.get(pk=pk)\n club = SportsClub.objects.get(pk=club_pk)\n\n club.coachs.remove(obj)\n\n log = str(club) + \" Klup antrenör cikarildi\"\n log = general_methods.logwrite(request, request.user, log)\n club.save()\n\n return JsonResponse({'status': 'Success', 'messages': 'delete successfully'})\n except ClubRole.DoesNotExist:\n return JsonResponse({'status': 'Fail', 'msg': 'Object does not exist'})\n\n else:\n return JsonResponse({'status': 'Fail', 'msg': 'Not a valid request'})\n\n\n@login_required\ndef updateClubRole(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n clubrole = ClubRole.objects.get(id=pk)\n clubrole_form = ClubRoleForm(request.POST or None, instance=clubrole)\n\n if request.method == 'POST':\n if clubrole_form.is_valid():\n clubrole_form.save()\n messages.success(request, 'Başarıyla Güncellendi')\n return redirect('sbs:kulup-uye-rolu')\n else:\n messages.warning(request, 'Alanları Kontrol Ediniz')\n\n return render(request, 'kulup/kulupRolDuzenle.html',\n {'clubrole_form': clubrole_form})\n\n\n@login_required\ndef clubDelete(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n if request.method == 'POST' and request.is_ajax():\n try:\n obj = SportsClub.objects.get(pk=pk)\n obj.delete()\n return JsonResponse({'status': 'Success', 'messages': 'save successfully'})\n except SportsClub.DoesNotExist:\n return JsonResponse({'status': 'Fail', 'msg': 'Object does not exist'})\n\n else:\n return JsonResponse({'status': 'Fail', 'msg': 'Not a valid request'})\n\n\n@login_required\ndef clubUpdate(request, pk):\n perm = general_methods.control_access_klup(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n\n club = SportsClub.objects.get(id=pk)\n\n if request.user.groups.filter(name='KulupUye'):\n if not (club.clubUser.filter(user=request.user)):\n return redirect('sbs:kulupler')\n\n athletes = Athlete.objects.filter(licenses__sportsClub=club)\n\n\n try:\n com_id = club.communication.pk\n communication = Communication.objects.get(id=com_id)\n communication_form = CommunicationForm(request.POST or None, instance=communication)\n except:\n communication_form = CommunicationForm(request.POST or None)\n\n club_form = ClubForm(request.POST or None, request.FILES or None, instance=club)\n clubPersons = club.clubUser.all()\n clubCoachs = club.coachs.all()\n if request.method == 'POST':\n if club_form.is_valid():\n club_form.save()\n\n if not club.communication:\n communication = communication_form.save(commit=False)\n communication.save()\n club.communication=communication\n club.save()\n\n\n else:\n communication_form.save()\n\n log = str(club) + \" Klup güncellendi\"\n log = general_methods.logwrite(request, request.user, log)\n\n\n messages.success(request, 'Başarıyla Güncellendi')\n return redirect('sbs:update-club', club.pk)\n else:\n messages.warning(request, 'Alanları Kontrol Ediniz')\n\n return render(request, 'kulup/kulupDuzenle.html',\n {'club_form': club_form, 'communication_form': communication_form, 'clubPersons': clubPersons,\n 'athletes': athletes,\n 'club': club, 'clubCoachs': clubCoachs})\n\n\n@login_required\ndef choose_coach(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n coaches = Coach.objects.all()\n user_form = UserSearchForm()\n if request.method == 'POST':\n\n if user_form.is_valid():\n firstName = user_form.cleaned_data.get('first_name')\n lastName = user_form.cleaned_data.get('last_name')\n email = user_form.cleaned_data.get('email')\n if not (firstName or lastName or email):\n messages.warning(request, 'Lütfen Arama Kriteri Giriniz.')\n else:\n query = Q()\n if lastName:\n query &= Q(user__last_name__icontains=lastName)\n if firstName:\n query &= Q(user__first_name__icontains=firstName)\n if email:\n query &= Q(user__email__icontains=email)\n coaches = Coach.objects.filter(query)\n user_form = UserSearchForm(request.POST)\n athletes1 = request.POST.getlist('selected_options')\n if athletes1:\n students = [int(x) for x in athletes1]\n instances = Coach.objects.filter(id__in=students)\n club = SportsClub.objects.get(pk=pk)\n for coach in instances:\n club.coachs.add(coach)\n club.save()\n messages.success(request, 'Antrenör Başarıyla Eklenmiştir.')\n\n return redirect('sbs:update-club', pk=pk)\n\n return render(request, 'antrenor/antrenorsec.html', {'coaches': coaches, 'user_form': user_form})\n\n\n@login_required\ndef choose_sport_club_user(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n sportClubUsers = SportClubUser.objects.all()\n user_form = UserSearchForm()\n if request.method == 'POST':\n user_form = UserSearchForm(request.POST)\n athletes1 = request.POST.getlist('selected_options')\n if user_form.is_valid():\n firstName = user_form.cleaned_data.get('first_name')\n lastName = user_form.cleaned_data.get('last_name')\n email = user_form.cleaned_data.get('email')\n if not (firstName or lastName or email):\n print('')\n # messages.warning(request, 'Lütfen Arama Kriteri Giriniz.')\n else:\n query = Q()\n if lastName:\n query &= Q(user__last_name__icontains=lastName)\n if firstName:\n query &= Q(user__first_name__icontains=firstName)\n if email:\n query &= Q(user__email__icontains=email)\n sportClubUsers = SportClubUser.objects.filter(query)\n if athletes1:\n students = [int(x) for x in athletes1]\n instances = SportClubUser.objects.filter(id__in=students)\n\n club = SportsClub.objects.get(pk=pk)\n for club_user in instances:\n club.clubUser.add(club_user)\n club.save()\n\n log = str(club) + \" Klup uyesi ekledi\"\n log = general_methods.logwrite(request, request.user, log)\n\n\n\n\n\n messages.success(request, 'Kulüp Üyesi Başarıyla Eklenmiştir.')\n\n return redirect('sbs:update-club', pk=pk)\n\n return render(request, 'kulup/kulupuyesisec.html', {'coaches': sportClubUsers, 'user_form': user_form})\n\n\n@login_required\ndef return_belt_exams(request):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n\n user = request.user\n\n if user.groups.filter(name='KulupUye'):\n\n clubuser = SportClubUser.objects.get(user=user)\n clubs = SportsClub.objects.filter(clubUser=clubuser)\n clubsPk = []\n for club in clubs:\n clubsPk.append(club.pk)\n\n exams = BeltExam.objects.filter(sportClub__in=clubsPk)\n\n\n elif user.groups.filter(name__in=['Yonetim', 'Admin']):\n exams = BeltExam.objects.all()\n\n return render(request, 'kulup/kusak-sinavlari.html', {'exams': exams})\n\n\ndef detail_belt_exam(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n exam = BeltExam.objects.get(pk=pk)\n return render(request, 'kulup/kusak-sinavi-incele.html', {'exam': exam})\n\n\n@login_required\ndef approve_belt_exam(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n exam = BeltExam.objects.get(pk=pk)\n # her onaya geldiginde kuşaklari bir üst seviyeye göndermesini engelledik.\n if exam.status!=BeltExam.APPROVED:\n athletes = exam.athletes.all()\n for athlete in athletes:\n level = Level()\n # level.startDate = exam.examDate\n # level.levelType = EnumFields.LEVELTYPE.BELT\n # lastLevel = athlete.belts.last()\n # lastDefinition = lastLevel.definition\n # level.definition = lastDefinition.parent\n # level.status = Level.APPROVED\n # level.save()\n # athlete.belts.add(level)\n # athlete.save()\n\n\n\n exam.status = BeltExam.APPROVED\n exam.save()\n messages.success(request, 'Sınav Onaylanmıştır.')\n return redirect('sbs:kusak-sinavi-incele', pk=pk)\n\n\ndef denied_belt_exam(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n exam = BeltExam.objects.get(pk=pk)\n exam.status = exam.DENIED\n exam.save()\n return redirect('sbs:kusak-sinavi-incele', pk=pk)\n\n\n# sporcu seç\n@login_required\ndef choose_athlete(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n login_user = request.user\n user = User.objects.get(pk=login_user.pk)\n sinav = BeltExam.objects.get(pk=pk)\n if user.groups.filter(name='KulupUye'):\n sc_user = SportClubUser.objects.get(user=user)\n clubsPk = []\n clubs = SportsClub.objects.filter(clubUser=sc_user)\n for club in clubs:\n clubsPk.append(club.pk)\n exam_athlete = []\n for item in sinav.athletes.all():\n exam_athlete.append(item.user.pk)\n athletes = Athlete.objects.filter(licenses__sportsClub__in=clubsPk).exclude(belts=None).exclude(licenses=None).exclude(beltexam__athletes__user__in = exam_athlete).filter(licenses__branch=sinav.branch,licenses__status='Onaylandı').filter(belts__branch=sinav.branch,belts__status='Onaylandı').distinct()\n elif user.groups.filter(name__in=['Yonetim', 'Admin']):\n exam_athlete=[]\n for item in sinav.athletes.all():\n exam_athlete.append(item.user.pk)\n # print(sinav.branch)\n athletes=Athlete.objects.exclude(belts=None).exclude(licenses=None).exclude(beltexam__athletes__user__in = exam_athlete).filter(licenses__branch=sinav.branch,licenses__status='Onaylandı').filter(belts__branch=sinav.branch,belts__status='Onaylandı')\n # .exclude(belts__definition__parent_id=None) eklenmeli ama eklendigi zaman kuşaklarindan bir tanesi en üst olunca almıyor\n if request.method == 'POST':\n\n athletes1 = request.POST.getlist('selected_options')\n if athletes1:\n for x in athletes1:\n sinav.athletes.add(x)\n return redirect('sbs:kusak-sinavi-incele', pk=pk)\n return render(request, 'kulup/kusak-sınavı-antroner-sec.html', {'athletes': athletes})\n\n\n@login_required\ndef choose_coach_clup(request, pk):\n perm = general_methods.control_access(request)\n if not perm:\n logout(request)\n return redirect('accounts:login')\n login_user = request.user\n user = User.objects.get(pk=login_user.pk)\n clup = SportsClub.objects.get(pk=pk)\n\n coachsPk = []\n for coach in clup.coachs.all():\n coachsPk.append(coach.pk)\n athletes = Coach.objects.exclude(id__in=coachsPk)\n\n # license.athlete_set.first\n\n if request.method == 'POST':\n coach = request.POST.getlist('selected_options')\n if coach:\n for coa in coach:\n clup.coachs.add(Coach.objects.get(pk=coa))\n clup.save()\n\n log = str(clup) + \" Klup antrenor ekledi\"\n log = general_methods.logwrite(request, request.user, log)\n\n\n return redirect('sbs:update-club', pk=pk)\n return render(request, 'antrenor/Antrenor-sec.html', {'athletes': athletes})\n\n\n\n\n\n\n\n\n\n\n@login_required\ndef choose_coach(request, pk):\n perm = general_methods.control_access(request)\n if not perm:\n logout(request)\n return redirect('accounts:login')\n login_user = request.user\n user = User.objects.get(pk=login_user.pk)\n sinav = BeltExam.objects.get(pk=pk)\n athletes = Coach.objects.none()\n # .filter(grades__branch=sinav.branch) eklenmeli\n coa=[]\n for item in sinav.coachs.all():\n coa.append(item.user.pk)\n athletes = Coach.objects.filter(grades__branch=sinav.branch,grades__status='Onaylandı').exclude(beltexam__coachs__user_id__in=coa).filter(visa__startDate__year=timezone.now().year).exclude(grades=None).exclude(visa=None).exclude(grades__definition__name='1.Kademe').exclude(grades__definition=None).distinct()\n # for fd in coach:\n # for visa in fd.visa.all():\n # if(date(sinav.examDate.year,sinav.examDate.month,sinav.examDate.day)-date(visa.creationDate.year,visa.creationDate.month,visa.creationDate.day)).days<365:\n # athletes|=Coach.objects.filter(pk=fd.pk).distinct()\n\n if request.method == 'POST':\n athletes1 = request.POST.getlist('selected_options')\n if athletes1:\n for x in athletes1:\n if not sinav.coachs.all().filter(beltexam__coachs__user_id=x):\n sinav.coachs.add(x)\n sinav.save()\n return redirect('sbs:kusak-sinavi-incele', pk=pk)\n return render(request, 'kulup/kusak-sınavı-antroner-sec.html', {'athletes': athletes})\n\n\n@login_required\ndef add_belt_exam(request):\n perm = general_methods.control_access(request),\n if not perm:\n logout(request)\n return redirect('accounts:login')\n exam_form = BeltExamForm(request.POST, request.FILES or None)\n user = request.user\n if user.groups.filter(name='KulupUye'):\n sc_user = SportClubUser.objects.get(user=user)\n clubs = SportsClub.objects.filter(clubUser=sc_user)\n clubsPk = []\n for club in clubs:\n # print(club.dataAccessControl)\n clubsPk.append(club.pk)\n exam_form.fields['sportClub'].queryset = SportsClub.objects.filter(id__in=clubsPk)\n\n\n elif user.groups.filter(name__in=['Yonetim', 'Admin']):\n exam_form.fields['sportClub'].queryset = SportsClub.objects.all()\n\n if request.method == 'POST':\n exam_form = BeltExamForm(request.POST, request.FILES or None)\n if exam_form.is_valid():\n exam = exam_form.save()\n messages.success(request, 'Sınav başarıyla oluşturuldu')\n return redirect('sbs:kusak-sinavlari')\n else:\n messages.warning(request, 'Alanları Kontrol Ediniz')\n return render(request, 'kulup/kusak-sinavi-ekle.html', {'exam_form': exam_form})\n\n\n@login_required\ndef update_belt_exam(request, pk):\n # print('kusak sinavi düzenle çalisti')\n perm = general_methods.control_access(request)\n if not perm:\n logout(request)\n return redirect('accounts:login')\n sinav = BeltExam.objects.get(pk=pk)\n # license_form = LicenseForm(request.POST or None, request.FILES or None, instance=license,initial={'sportsClub': license.sportsClub})\n # print(sinav.sportClub)\n exam_form = BeltExamForm(request.POST or None, request.FILES or None, instance=sinav,\n initial={'sportsClub': sinav.sportClub.name})\n # print(exam_form)\n user = request.user\n if user.groups.filter(name='KulupUye'):\n sc_user = SportClubUser.objects.get(user=user)\n clubs = SportsClub.objects.filter(clubUser=sc_user)\n clubsPk = []\n for club in clubs:\n clubsPk.append(club.pk)\n exam_form.fields['sportClub'].queryset = SportsClub.objects.filter(id__in=clubsPk)\n\n\n elif user.groups.filter(name__in=['Yonetim', 'Admin']):\n exam_form.fields['sportClub'].queryset = SportsClub.objects.all()\n\n if request.method == 'POST':\n exam_form = BeltExamForm(request.POST, request.FILES or None)\n if exam_form.is_valid():\n exam = exam_form.save()\n messages.success(request, 'Sınav başarıyla güncellendi')\n return redirect('sbs:kusak-sinavlari')\n else:\n messages.warning(request, 'Alanları Kontrol Ediniz')\n\n return render(request, 'kulup/kusak-sinavi-güncelle.html', {'exam_form': exam_form})\n\n\n@login_required\ndef delete_belt_exam(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n if request.method == 'POST' and request.is_ajax():\n try:\n obj = SportsClub.objects.get(pk=pk)\n obj.delete()\n return JsonResponse({'status': 'Success', 'messages': 'save successfully'})\n except SportsClub.DoesNotExist:\n return JsonResponse({'status': 'Fail', 'msg': 'Object does not exist'})\n\n else:\n return JsonResponse({'status': 'Fail', 'msg': 'Not a valid request'})\n\n\n@login_required\ndef updateClubPersonsProfile(request):\n perm = general_methods.control_access_klup(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n\n user = request.user\n club_user = SportClubUser.objects.get(user=user)\n person = Person.objects.get(pk=club_user.person.pk)\n communication = Communication.objects.get(pk=club_user.communication.pk)\n user_form = DisabledUserForm(request.POST or None, instance=user)\n person_form = DisabledPersonForm(request.POST or None, request.FILES or None, instance=person)\n communication_form = DisabledCommunicationForm(request.POST or None, instance=communication)\n club_form = DisabledSportClubUserForm(request.POST or None, instance=club_user)\n password_form = SetPasswordForm(request.user, request.POST)\n\n if request.method == 'POST':\n data = request.POST.copy()\n person_form = DisabledPersonForm(data)\n\n if len(request.FILES) > 0:\n person.profileImage = request.FILES['profileImage']\n person.save()\n messages.success(request, 'Profil Fotoğrafı Başarıyla Güncellenmiştir.')\n\n if password_form.is_valid():\n user.set_password(password_form.cleaned_data['new_password2'])\n user.save()\n update_session_auth_hash(request, user)\n messages.success(request, 'Şifre Başarıyla Güncellenmiştir.')\n return redirect('sbs:kulup-uyesi-profil-guncelle')\n\n return render(request, 'kulup/kulup-uyesi-profil-guncelle.html',\n {'user_form': user_form, 'communication_form': communication_form,\n 'person_form': person_form, 'password_form': password_form, 'club_form': club_form})\n\n\n@login_required\ndef Exam_list_antroner_delete(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n if request.method == 'POST' and request.is_ajax():\n try:\n obj = SportsClub.objects.get(pk=pk)\n obj.delete()\n return JsonResponse({'status': 'Success', 'messages': 'save successfully'})\n except SportsClub.DoesNotExist:\n return JsonResponse({'status': 'Fail', 'msg': 'Object does not exist'})\n\n\n# listeden antroner sil\n\n@login_required\ndef choose_coach_remove(request, pk, exam_pk):\n perm = general_methods.control_access_klup(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n\n sinav = BeltExam.objects.get(pk=exam_pk)\n sinav.coachs.remove(Coach.objects.get(pk=pk))\n\n return redirect('sbs:kusak-sinavi-incele', pk=exam_pk)\n\n\n@login_required\ndef choose_athlete_remove(request, pk, exam_pk):\n perm = general_methods.control_access_klup(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n\n sinav = BeltExam.objects.get(pk=exam_pk)\n sinav.athletes.remove(Athlete.objects.get(pk=pk))\n\n return redirect('sbs:kusak-sinavi-incele', pk=exam_pk)\n","sub_path":"sbs/Views/ClubViews.py","file_name":"ClubViews.py","file_ext":"py","file_size_in_byte":48673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"568065191","text":"# -*- coding: utf-8 -*-\n\nimport sys\nfrom Qt import QtCore\nfrom Qt import QtGui\nfrom Qt import QtWidgets\n# QtCore.QTextCodec.setCodecForTr(QTextCodec.codecForName(\"utf8\"))\n\n\nclass SplitterWidget(QtWidgets.QMainWindow):\n def __init__(self, parent=None):\n super(SplitterWidget, self).__init__(parent)\n\n mainSplitter = QtWidgets.QSplitter(QtCore.Qt.Horizontal, self)\n\n leftText = QtWidgets.QTextEdit((u\"左窗口
sldjflksjdklf\"), mainSplitter)\n leftText.setAlignment(QtCore.Qt.AlignCenter)\n\n rightSplitter = QtWidgets.QSplitter(QtCore.Qt.Vertical, mainSplitter)\n rightSplitter.setOpaqueResize(False)\n\n upText = QtWidgets.QTextEdit((u\"上窗口\"), rightSplitter)\n upText.setAlignment(QtCore.Qt.AlignCenter)\n\n bottomText = QtWidgets.QTextEdit((u\"下窗口\"), rightSplitter)\n bottomText.setAlignment(QtCore.Qt.AlignCenter)\n\n mainSplitter.setStretchFactor(1, 20)\n rightSplitter.setStretchFactor(2, 1)\n # mainSplitter.setWindowTitle((\"分割窗口\"))\n\n self.setCentralWidget(mainSplitter)\n\n\nclass MainWidget(QtWidgets.QMainWindow):\n def __init__(self,parent=None):\n super(MainWidget,self).__init__(parent)\n self.setWindowTitle((u\"依靠窗口\"))\n\n widget = SplitterWidget()\n\n self.setCentralWidget(widget)\n\n #停靠窗口 1\n dock1=QtWidgets.QDockWidget((u\"停靠窗口 1\"),self)\n dock1.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable)\n dock1.setAllowedAreas(QtCore.Qt.LeftDockWidgetArea|QtCore.Qt.RightDockWidgetArea)\n te1=QtWidgets.QTextEdit((u\"窗口 1,可在 Main Window 的左部和右部停靠,不可浮动,不可关闭\"))\n dock1.setWidget(te1)\n self.addDockWidget(QtCore.Qt.RightDockWidgetArea,dock1)\n\n #停靠窗口 2\n dock2=QtWidgets.QDockWidget((u\"停靠窗口 2\"),self)\n dock2.setFeatures(QtWidgets.QDockWidget.DockWidgetFloatable|QtWidgets.QDockWidget.DockWidgetClosable)\n # dock2.setTitleBarWidget()\n te2=QtWidgets.QTextEdit((u\"窗口 2,只可浮动\"))\n dock2.setWidget(te2)\n self.addDockWidget(QtCore.Qt.RightDockWidgetArea,dock2)\n\n #停靠窗口 2\n dock4=QtWidgets.QDockWidget((u\"停靠窗口 4\"),self)\n dock4.setFeatures(QtWidgets.QDockWidget.DockWidgetFloatable|QtWidgets.QDockWidget.DockWidgetClosable)\n te4=QtWidgets.QTextEdit((u\"窗口 4,只可浮动\"))\n dock4.setWidget(te4)\n self.addDockWidget(QtCore.Qt.RightDockWidgetArea,dock4)\n\n #停靠窗口 3\n dock3=QtWidgets.QDockWidget((u\"停靠窗口 3\"),self)\n dock3.setFeatures(QtWidgets.QDockWidget.AllDockWidgetFeatures)\n te3=QtWidgets.QTextEdit((u\"窗口 3,可在 Main Window 任意位置停靠,可浮动,可关闭\"))\n dock3.setWidget(te3)\n self.addDockWidget(QtCore.Qt.BottomDockWidgetArea,dock3)\n\n #dock1 和dock4 合并\n self.tabifyDockWidget(dock1, dock4)\n\n\nif __name__ == \"__main__\":\n app=QApplication(sys.argv)\n main=MainWidget()\n main.show()\n app.exec_()","sub_path":"dog/untitled1/test_1/pyqtlayout26_03_v01.py","file_name":"pyqtlayout26_03_v01.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"352067254","text":"from PyQt4.QtCore import *\r\nfrom PyQt4.QtGui import *\r\n\r\nfrom DatabaseForGUI import *\r\n\r\nclass OtherGUI(QMainWindow):\r\n def __init__(self, parent):\r\n super().__init__(parent)\r\n \r\n self._parent = parent\r\n self.setWindowTitle(\"This is my other window\")\r\n\r\n # Create label and line edit\r\n self._name_label = QLabel(\"List of teachers\")\r\n self._teacher_list = QComboBox()\r\n \r\n self._tableview = QTableView()\r\n \r\n self.PopulateTeacherComboBox()\r\n self.PopulateTableView()\r\n\r\n # Create button\r\n self._close_button = QPushButton(\"Close\")\r\n\r\n # Create a vertical box layout to put the label, line edit and button into\r\n self._layout = QVBoxLayout()\r\n\r\n # Add the widgets to the vertical box layout\r\n self._layout.addWidget(self._name_label)\r\n self._layout.addWidget(self._teacher_list)\r\n self._layout.addWidget(self._tableview)\r\n self._layout.addWidget(self._close_button)\r\n\r\n # We then need to set the layout for the QMainWindow. However, we can't\r\n # use the setLayout method as it doesn't work properly. Therefore, put\r\n # the layout inside a widget and call setCentralWidget on the QMainWindow.\r\n self._widget = QWidget()\r\n self._widget.setLayout(self._layout)\r\n\r\n self.setCentralWidget(self._widget)\r\n\r\n # Connect up the button to some method, in this case 'add_name'\r\n self._close_button.clicked.connect(self.onCloseClicked)\r\n \r\n def onCloseClicked(self):\r\n # Close the window\r\n #self._parent.setHidden(False)\r\n self.close()\r\n\r\n def PopulateTeacherComboBox(self):\r\n teachers = g_database.GetAllTeachers()\r\n for teacher in teachers:\r\n self._teacher_list.addItem(teacher[1])\r\n \r\n def PopulateTableView(self):\r\n teachers = g_database.GetAllTeachers()\r\n model = QStandardItemModel()\r\n row = 0\r\n for teacher in teachers:\r\n for column in range(2):\r\n item = QStandardItem(\"{}\".format(teacher[column]))\r\n model.setItem(row, column, item)\r\n row+=1\r\n \r\n self._tableview.setModel(model)","sub_path":"Implementation/OtherGUI.py","file_name":"OtherGUI.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"529218521","text":"# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.\n# This product includes software developed at Datadog (https://www.datadoghq.com/).\n# Copyright 2019-Present Datadog, Inc.\nfrom __future__ import annotations\n\nfrom typing import Union, TYPE_CHECKING\n\nfrom datadog_api_client.model_utils import (\n ModelNormal,\n cached_property,\n unset,\n UnsetType,\n)\n\n\nif TYPE_CHECKING:\n from datadog_api_client.v2.model.rum_aggregation_function import RUMAggregationFunction\n from datadog_api_client.v2.model.rum_compute_type import RUMComputeType\n\n\nclass RUMCompute(ModelNormal):\n @cached_property\n def openapi_types(_):\n from datadog_api_client.v2.model.rum_aggregation_function import RUMAggregationFunction\n from datadog_api_client.v2.model.rum_compute_type import RUMComputeType\n\n return {\n \"aggregation\": (RUMAggregationFunction,),\n \"interval\": (str,),\n \"metric\": (str,),\n \"type\": (RUMComputeType,),\n }\n\n attribute_map = {\n \"aggregation\": \"aggregation\",\n \"interval\": \"interval\",\n \"metric\": \"metric\",\n \"type\": \"type\",\n }\n\n def __init__(\n self_,\n aggregation: RUMAggregationFunction,\n interval: Union[str, UnsetType] = unset,\n metric: Union[str, UnsetType] = unset,\n type: Union[RUMComputeType, UnsetType] = unset,\n **kwargs,\n ):\n \"\"\"\n A compute rule to compute metrics or timeseries.\n\n :param aggregation: An aggregation function.\n :type aggregation: RUMAggregationFunction\n\n :param interval: The time buckets' size (only used for type=timeseries)\n Defaults to a resolution of 150 points.\n :type interval: str, optional\n\n :param metric: The metric to use.\n :type metric: str, optional\n\n :param type: The type of compute.\n :type type: RUMComputeType, optional\n \"\"\"\n if interval is not unset:\n kwargs[\"interval\"] = interval\n if metric is not unset:\n kwargs[\"metric\"] = metric\n if type is not unset:\n kwargs[\"type\"] = type\n super().__init__(kwargs)\n\n self_.aggregation = aggregation\n","sub_path":"src/datadog_api_client/v2/model/rum_compute.py","file_name":"rum_compute.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"166063107","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 19 09:32:07 2017\n\n@author: roy\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 11 19:35:19 2017\n\n@author: roy\n\"\"\"\nfrom PIL import Image\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\n#src = np.asarray([[527, 500], [763, 500], [910, 600], [392, 600]], dtype=np.float32)\nsrc = np.asarray([[524, 500], [768, 500], [910, 600], [392, 600]], dtype=np.float32)\n#dst = np.asarray([[392, 300], [910, 300], [910, 600], [390, 600]], dtype=np.float32)\ndst = np.asarray([[196, 300], [455, 300], [455, 600], [196, 600]], dtype=np.float32)\n\nimage = np.asarray(Image.open('./test_images/straight_undist2.jpg').convert('L'))\n\n\nmt_persp=cv2.getPerspectiveTransform(src, dst)\n\nimg_size = (image.shape[1]//2, image.shape[0])\n\nimg_warped = cv2.warpPerspective(image, mt_persp, img_size)\n\nplt.figure(figsize=(12,9))\nplt.gray()\nplt.imshow(img_warped)\n\ndef lane_search(image):\n nwindows = 10\n margin = 100\n minpix = 50\n \n img_h, img_w = image.shape\n window_height = img_h//nwindows\n \n hstg = np.sum(image[img_h//2:, :], axis=0)\n img_out = np.dstack((image, image, image))*255\n \n midpoint = img_w//2\n leftx_base = np.argmax(hstg[:midpoint])\n rightx_base = np.argmax(hstg[midpoint:]) + midpoint\n \n nonzero = image.nonzero()\n nonzeroy = nonzero[0]\n nonzerox = nonzero[1]\n \n leftx_current = leftx_base\n rightx_current = rightx_base\n \n left_lane_inds = []\n right_lane_inds = []\n\n for window in range(nwindows):\n win_y_low = img_h - (window + 1)*window_height\n win_y_high = img_h - window * window_height\n \n win_xleft_low = leftx_current - margin\n win_xleft_high = leftx_current + margin\n \n win_xright_low = rightx_current - margin\n win_xright_high = rightx_current + margin\n \n #cv2.rectangle(img_out, (win_xleft_low, win_y_low), \n # (win_xleft_high, win_y_high), (0, 255, 0), 2)\n #cv2.rectangle(img_out, (win_xright_low, win_y_low), \n # (win_xright_high, win_y_high), (0, 255, 0), 2)\n \n good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & \n (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]\n good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &\n (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]\n \n left_lane_inds.append(good_left_inds)\n right_lane_inds.append(good_right_inds)\n \n if len(good_left_inds) > minpix:\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\n if len(good_right_inds) > minpix:\n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\n \n left_lane_inds = np.concatenate(left_lane_inds)\n right_lane_inds = np.concatenate(right_lane_inds)\n\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds]\n left_fit = np.polyfit(lefty, leftx, 2)\n print(left_fit)\n \n radius_l = cal_radius(leftx, lefty)\n print(radius_l)\n\n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds]\n right_fit = np.polyfit(righty, rightx, 2)\n print(right_fit)\n \n radius_r = cal_radius(rightx, righty)\n print(radius_r)\n \n # find indice for the whole image\n whole_img = np.indices((img_h, img_w))\n imgx= whole_img[1].flatten()\n imgy = whole_img[0].flatten()\n \n #print('imgx shape {}, imgy shape {}'.format(imgx.shape, imgy.shape))\n # got indice between left lane and right lane\n lane_inds = ((imgx >= (left_fit[0]*(imgy**2) + left_fit[1] * imgy + left_fit[2])) \n & (imgx < (right_fit[0]*(imgy**2) + right_fit[1] * imgy + right_fit[2])))\n # paint the lane to green\n img_out[imgy[lane_inds], imgx[lane_inds]] = [0, 255, 0]\n \n #paint left track to Red, right track to green\n img_out[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]\n img_out[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]\n \n\n return img_out\n\ndef cal_radius(x, y):\n mppx = 3.7/270\n #mppx = 3.7/540\n mppy = 3.0/100\n fit_rw = np.polyfit(y * mppy, x * mppx, 2)\n py_rw = 350 * mppy\n radius = (1.0 + (2*fit_rw[0]*py_rw+fit_rw[1])**2)**1.5/(2.0*fit_rw[0])\n return radius\n \ndef sobel_filter(image_gray, orient='x', kern_size = 3, thresh=(0, 255)):\n # Calculate directional gradient\n # Apply threshold\n if orient == 'x':\n sobel_img = cv2.Sobel(image_gray, cv2.CV_64F, 1, 0, ksize=kern_size)\n elif orient == 'y':\n sobel_img = cv2.Sobel(image_gray, cv2.CV_64F, 0, 1, ksize=kern_size)\n \n sobel_img[ sobel_img < 0 ] =0\n #sobel_img = np.abs(sobel_img)\n scaled_sobel = np.uint8(255 * sobel_img/np.max(sobel_img))\n grad_binary = np.zeros_like(image_gray)\n grad_binary[( scaled_sobel >= thresh[0] ) & (scaled_sobel <= thresh[1])] = 1\n return grad_binary\n\nimg3 = sobel_filter(img_warped, orient='x', kern_size=5, thresh=(100, 255))\nplt.figure(figsize=(12,9))\nplt.gray()\nplt.imshow(img3)\n\nimg4 = lane_search(img3)\n\nplt.figure(figsize=(12,9))\nplt.gray()\nplt.imshow(img4)\n","sub_path":"test_curvature.py","file_name":"test_curvature.py","file_ext":"py","file_size_in_byte":5264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"296079053","text":"# By submitting this assignment, I agree to the following:\n# \"Aggies do not lie, cheat, or steal, or tolerate those who do\"\n# \"I have not given or received any unauthorized aid on this assignment\"\n#\n# Name: Chase Johnson, Anna Olmedo, Bryan Jones, Gustavo Rodriguez\n# Section: 413\n# Assignment: Lab 02_Act3_Bonus\n# Date: 5 September, 2019\n\nfrom math import *\n\n# Set values\nxi = 30 # Starting time\ny = 50 # Distance Traveled\nspeed = 565/15\ntrackLength = 2*pi*0.5\n\nchoice = input(\"Are you giving me (m)inutes or (s)econds [s]? \")\nms = \"s\"\nif \"m\" in choice:\n ms = \"m\"\n\ntimeToRace = float(input(\"How long do you want the car to drive? \"))\nif ms == \"m\":\n timeToRace = timeToRace*60\n\ny += (timeToRace-xi)*(speed) # Current position\nprint(\"Distance at time: \" + str(int(timeToRace)) + \" = \" + str(y) + \" meters\")\n","sub_path":"Group_Labs/Lab02/Act3_Bonus.py","file_name":"Act3_Bonus.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"73254542","text":"#! -*- coding:utf-8 -*-\n\n'''\n@Author: ZM\n@Date and Time: 2021/5/4 10:39\n@File: nms.py\n'''\n\nimport numpy as np\n\ndef nms(bboxes, scores, iou_threshold=0.5, max_num_boxes=20):\n y1 = bboxes[:, 0]\n x1 = bboxes[:, 1]\n y2 = bboxes[:, 2]\n x2 = bboxes[:, 3]\n\n areas = (y2 - y1) * (x2 - x1)\n order = np.argsort(scores)[::-1]\n nms_index = []\n\n while np.size(order) > 0:\n i = order[0]\n nms_index.append(i)\n\n y_min = np.maximum(y1[i], y1[order[1:]])\n x_min = np.maximum(x1[i], x1[order[1:]])\n y_max = np.minimum(y2[i], y2[order[1:]])\n x_max = np.minimum(x2[i], x2[order[1:]])\n inter = np.maximum(0, y_max - y_min) * np.maximum(0, x_max - x_min)\n iou = inter / (areas[i] + areas[order[1:]] - inter)\n idx = np.nonzero(iou <= iou_threshold)[0]\n order = order[idx + 1]\n\n return nms_index[:max_num_boxes]\n","sub_path":"yolov3/nms.py","file_name":"nms.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"151600230","text":"import numpy as np\nimport argparse\nimport imutils\nimport cv2\nimport os\nimport warnings\nimport csv\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-d\", \"--dir\", required=True, help=\"Path to the image\")\nargs = vars(ap.parse_args())\n#args = [\"/Users/Wolf/Desktop/Area_Weight_correlation/tiff_images\"]\n\ncount = 0\nrows = []\n\ndef calculateHoughCircles(fileName):\n # load the image, clone it for output\n image = cv2.imread(fileName)\n if (image.shape[0] > image.shape[1]):\n image = imutils.rotate_bound(image, 270)\n output = image.copy()\n global rows\n global count\n height, width, depth = image.shape\n\n # Grayscale\n imgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # Median\n #*** Blurring can require modification for the script to work ***\n blur = cv2.medianBlur(imgray, 23)\n # Thresh\n ret1, thresh1 = cv2.threshold(blur, 0, 255, cv2.THRESH_TOZERO + cv2.THRESH_OTSU)\n \n #*** (ret1 - 12) is the threshold as a factor of the one determined by cv2.THRESH_TOZERO + cv2.THRESH_OTSU above. Its modification can fix the script. ***\n ret2, thresh = cv2.threshold(blur, (ret1 - 12), 255, cv2.THRESH_TOZERO)\n edged = cv2.Canny(thresh, 0, 255)\n\n # Crop image\n cropped = np.zeros((height, width), np.uint8)\n # NOTE: This cropping can be very problematic. It can cut too much or not enough depending on the image. Make sure to adjust this when implementing the algorithm.\n cropped[250:(height - 250), 450:(width - 450)] = -1\n\n masked = cv2.bitwise_and(edged, edged, mask=cropped)\n\n #In the line below, the parameters are very relevant for the alg. Radius, sensitivity, minDist, etc. can affect the algorithm. Change them before looking further.\n circles = cv2.HoughCircles(masked, cv2.HOUGH_GRADIENT, 5, 320, minRadius=120, maxRadius=160)\n\n # detect circles in the image\n # https://docs.opencv.org/3.1.0/da/d53/tutorial_py_houghcircles.html\n # VERY parameter specific. This requires tweaking at the photo level and is non generalizable.\n # Standardized pictures should fix this problem. We have a ~95% success rate\n\n # First check for 96 wells\n warn = False\n kill = False\n #print(circles.shape)\n if circles.shape[1] > 96:\n warnings.warn(\"96 wells were not detected! Skipping image\")\n print(\"Filename: \", os.path.basename(fileName))\n print(\"Greater than 96 wells were detected. Skipping image.\")\n kill = True\n \n if circles.shape[1] < 96:\n warnings.warn(\"96 wells were not detected! Skipping image.\")\n print(\"Filename: \", os.path.basename(fileName))\n print(\"Less than than 96 wells were detected. Skipping image.\")\n kill = True\n\n if circles is not None and kill == False:\n count = count + 1\n # convert the (x, y) coordinates and radius of the circles to integers\n circles = np.round(circles[0, :]).astype(\"int\")\n circ = circles.tolist()\n circ.sort(key=lambda x: x[1])\n # Hardcode list structure - rows on horizontals\n Lists = [[] for _ in range(8)]\n Lists[0] = circ[0:12]\n Lists[1] = circ[12:24]\n Lists[2] = circ[24:36]\n Lists[3] = circ[36:48]\n Lists[4] = circ[48:60]\n Lists[5] = circ[60:72]\n Lists[6] = circ[72:84]\n Lists[7] = circ[84:96]\n\n for l in Lists:\n if (len(l) != 12):\n warnings.warn(\"Wrong count of wells in a row\")\n l.sort(key=lambda x: x[0])\n for i in range(8):\n for n in range(12):\n (x, y, r) = Lists[i][n]\n xo = np.round(x).astype(\"int\")\n yo = np.round(y).astype(\"int\")\n ro = np.round(r).astype(\"int\")\n\n crop = output[(y - r):(y + r), (x - r):(x + r)]\n crop_gray = cv2.cvtColor(crop, cv2.COLOR_BGR2GRAY)\n ret, threshCrop1 = cv2.threshold(crop_gray, 0, 255, cv2.THRESH_TOZERO + cv2.THRESH_OTSU)\n \n #*** (ret1 - 15) is the threshold as a factor of the one determined by cv2.THRESH_TOZERO + cv2.THRESH_OTSU above. Its modification can fix the script. ***\n ret, threshCrop = cv2.threshold(crop_gray, (ret - 15), 255, cv2.THRESH_TOZERO)\n ret2, tc2 = cv2.threshold(threshCrop, 1, 255, cv2.THRESH_BINARY_INV)\n\n height, width = 2 * r, 2 * r\n mask = np.zeros((height, width), np.uint8)\n \n #Cut circle with radius = 25 from section of image. This keeps edges of well from interferring with flies. \n cv2.circle(mask, (r, r), (r - 25), (255, 255, 255), thickness=-1)\n masked_data = cv2.bitwise_and(tc2, tc2, mask=mask)\n \n image, contours, hierarchy = cv2.findContours(masked_data, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n contour_list = []\n area = 0\n for contour in contours:\n approx = cv2.approxPolyDP(contour, 0.01 * cv2.arcLength(contour, True), True)\n tempArea = cv2.contourArea(contour)\n (x, y), radius = cv2.minEnclosingCircle(contour)\n #Ad hoc morphology check\n if (len(approx) > 5) & (tempArea < 5000) & (tempArea > 1000) & (3.1415 * (radius ** 2) / 7 < tempArea):\n contour_list.append(contour)\n area = tempArea\n cv2.drawContours(masked_data, contour_list, -1, (50, 50, 50), 3)\n cv2.circle(output, (xo, yo), ro, (0, 255, 0), 4)\n rows.append({'NumericalLocation(Row)': str(i + 1), 'NumericalLocation(Col)': str(n + 1), 'Area': area,'FileName': os.path.basename(fileName)})\n if warn:\n cv2.imshow(\"output\", output)\n cv2.waitKey(0)\n\n\n\n\ntotal = 0\nfor file in os.listdir(args[0]):\n if file.endswith(\".tiff\"):\n total = total + 1\n print(os.path.join(args[0], file))\n calculateHoughCircles(os.path.join(args[0], file))\n\nwith open('Output.csv', 'w') as csvfile:\n fieldnames = ['NumericalLocation(Row)', 'NumericalLocation(Col)', 'Area', \"FileName\"]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n writer.writerows(rows)\n \nprint(\"Used \", count, \" out of \", total, \" images.\")\n\nif count/total < 0.75:\n warnings.warn(\"Less than 75% of images used.\")\nif count < 10:\n warnings.warn(\"Used less than 10 images for size estimates.\")\n","sub_path":"AssortedScripts/FlySizer.py","file_name":"FlySizer.py","file_ext":"py","file_size_in_byte":6534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"108034687","text":"# PDFs with Python - get number of pages, rotate pdf.\n# Input - dummy.pdf\n# Output - tilt.pdf\n\nimport PyPDF2\n\nwith open('dummy.pdf', 'rb') as file:\n # print(file)\n reader = PyPDF2.PdfFileReader(file)\n print(reader.numPages) # get number of pages\n page = reader.getPage(0)\n page.rotateCounterClockwise(90) # rotate pdf\n writer = PyPDF2.PdfFileWriter()\n writer.addPage(page)\n with open('tilt.pdf', 'wb') as new_file:\n writer.write(new_file)\n","sub_path":"pdf1.py","file_name":"pdf1.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"333973630","text":"\"\"\"\nZMQ Sender, Alerts framework.\n\n\"\"\"\n\n\nimport os\nimport logging\n\nimport zmq\n\nfrom WMCore.Alerts.Alert import RegisterMsg, UnregisterMsg, ShutdownMsg\n\n\n\nclass Sender(object):\n \"\"\"\n ZMQ sender to dispatch alerts to a target.\n\n \"\"\"\n # this delay specifies how long to wait when there are un-delivered\n # messages in the ZMQ buffer when closing the socket (channel) / context.\n # some messages may be lost but solves the issue of hanging esp. in the\n # test when there is no receiver available (ticket #1837)\n LINGER_DELAY = 1000 # [ms]\n\n\n def __init__(self, target, controller, label = None):\n self._label = label or \"Sender_%s\" % os.getpid()\n self._context = zmq.Context()\n # set up a channel to send work\n self._workChannel = self._context.socket(zmq.PUSH)\n self._workChannel.setsockopt(zmq.LINGER, self.LINGER_DELAY)\n self._workChannel.connect(target)\n # set up a control channel\n self._contChannel = self._context.socket(zmq.PUB)\n self._contChannel.setsockopt(zmq.LINGER, self.LINGER_DELAY)\n self._contChannel.connect(controller)\n # socket closure will be done on garbage collection of Sender instance\n\n\n def __call__(self, alert):\n \"\"\"\n Send the alert instance to the target that this sender represents.\n\n \"\"\"\n self._workChannel.send_json(alert)\n logging.debug(\"Alert %s sent.\" % alert)\n\n\n def register(self):\n \"\"\"\n Send a register message to the target.\n\n \"\"\"\n self._contChannel.send_json(RegisterMsg(self._label))\n logging.debug(\"Register message sent for %s.\" % self._label)\n\n\n def unregister(self):\n \"\"\"\n Send an unregister message to the target.\n\n \"\"\"\n self._contChannel.send_json(UnregisterMsg(self._label))\n logging.debug(\"Unregister message sent for %s.\" % self._label)\n\n\n def sendShutdown(self):\n \"\"\"\n Tells the Receiver to shut down.\n This method mostly here for convenience in tests.\n\n \"\"\"\n self._contChannel.send_json(ShutdownMsg())\n logging.debug(\"Shutdown message sent.\")\n","sub_path":"src/python/WMCore/Alerts/ZMQ/Sender.py","file_name":"Sender.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"157037937","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/tt/actions/write/stop.py\n# Compiled at: 2020-03-21 10:42:53\n# Size of source mod 2**32: 513 bytes\nfrom tt.dataaccess.utils import get_data_store\nfrom tt.actions.utils.utils import ensure_working\nfrom tt.dateutils.dateutils import formatted_str_for_isotime_str\n\ndef action_stop(colorizer, time):\n data = get_data_store().load()\n ensure_working(data)\n current = data['work'][(-1)]\n current['end'] = time\n get_data_store().dump(data)\n print('So you stopped working on ' + colorizer.red(current['name']) + ' at ' + colorizer.yellow(formatted_str_for_isotime_str(time, '%H:%M')) + '.')","sub_path":"pycfiles/tt_time_tracker-1.0.2-py3.6/stop.cpython-36.py","file_name":"stop.cpython-36.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"140866423","text":"import sys\nimport time\nfrom _2_gibbs_sample import gibbsSampler\n\nwith open(sys.argv[1], \"r\")as file:\n data = file.readlines()\nnums = data[0].split(\" \")\nk = int(nums[0].strip())\nt = int(nums[1].strip())\nn = int(nums[2].strip())\ndna = []\nfor i in range(1, len(data)):\n dna.append(data[i].strip())\n\n\nstart = time.time()\nprint(\"Starting...\")\n# with open(\"res.txt\", \"w\")as res:\n# for i in range(1000):\n# bestmotifs, bestscore = randomized_motif_search(dna, k, t)\n# res.write(\"{}: {}\\n\".format(\", \".join(bestmotifs), str(bestscore)))\n\nresmotifs = []\nresscore = 10000000\nfor i in range(20):\n x = time.time()\n bestmotifs, bestscore = gibbsSampler(dna, k, t, n, x)\n if i == 0 or bestscore < resscore:\n resmotifs = bestmotifs\n resscore = bestscore\n\nprint(\"{}:{}\".format(resmotifs, resscore))\n\nend = time.time()\nprint(\"Done! That took {} seconds\".format(str(end-start)))\n","sub_path":"week4/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"652043586","text":"from datetime import datetime\r\nimport pytz\r\n\r\nfile = '1541962108935000000_167_838.h5'\r\ncern_time=pytz.timezone('Europe/Zurich')\r\nunix_time=float(file[:18])/100000000\r\nutc=datetime.utcfromtimestamp(unix_time)\r\nprint(\"Time in UTC is\",utc)\r\ncern=pytz.utc.localize(utc).astimezone(cern_time)\r\nprint(\"Time in Switzerland/CERN is\",cern)\r\n","sub_path":"Awake/other_files/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"10321443","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport gurobipy as gu\n\n\n\ndifficultyValues = [14, 25, 31, 50]\n\ndef solve(map, difficulty, verbose, isRHC):\n if (verbose):\n print(\"Début de la résolution\")\n m = gu.Model(\"RushHour\")\n initialConditions = initConds(map, difficulty, m, verbose)\n m, X, Y, Z = genConstraintsAndVariables(m, map, initialConditions, verbose)\n if (verbose):\n print(\"Affectation de la fonction objectif\")\n toSum = []\n coeffs = []\n vehicules, cases, tours, posInitiale, isVertical, longueur, P = initialConditions\n for i in vehicules:\n for j in cases:\n for l in cases:\n for k in tours:\n try:\n toSum.append(Y[(i,j,l,k)])\n coeffs.append((len(P[(j,l)])-1) if isRHC else 1)\n except:\n pass\n m.setObjective(gu.quicksum(toSum[i]*coeffs[i] for i in range(len(toSum))) , gu.GRB.MINIMIZE)\n m.setParam('OutputFlag', verbose)\n m.optimize()\n f = open(\"log.log\", \"w\")\n f2 = open(\"soluce.txt\", \"w\")\n solution = []\n for v in m.getVars():\n if (v.x == 1):\n print('%s' % (v.varName), file = f)\n if (v.varName[0] == 'Y'):\n s = v.varName.split(\"_\")\n if (verbose):\n print(\"Déplacer le véhicule %s de la case %s à la case %s\" % (s[1], s[2], s[3]))#, file = f2)\n solution.append(\"Déplacer le véhicule %s de la case %s à la case %s\" % (s[1], s[2], s[3]))\n f.close()\n f2.close()\n return solution\n\ndef genConstraintsAndVariables(m, map, initialConditions, verbose):\n #extraction des variables\n vehicules, cases, tours, posInitiale, isVertical, longueur, P = initialConditions\n width, height = map[0]\n #définition des conteneurs pour les variables de décision\n X = {}\n Y = {}\n Z = {}\n\n #variables\n if (verbose):\n print(\"Création des variables de décision\")\n for k in tours:\n for i in vehicules:\n casesOccupables = set()\n for j in casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map):\n X[(i,j,k)] = m.addVar(vtype = gu.GRB.BINARY, name = \"X_%s_%s_%s\" % (i,j,k))\n autresCasesAccessibles = casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map)\n autresCasesAccessibles.remove(j)\n for l in autresCasesAccessibles:\n Y[(i,j,l,k)] = m.addVar(vtype = gu.GRB.BINARY, name = \"Y_%s_%s_%s_%s\" % (i,j,l,k))\n casesOccupables.add(j)\n if (isVertical[i]):\n for a in range(1,longueur[i]):\n casesOccupables.add(max(casesOccupables) + width)\n else:\n for a in range(1,longueur[i]):\n casesOccupables.add(max(casesOccupables) + 1)\n for j in casesOccupables:\n Z[(i,j,k)] = m.addVar(vtype = gu.GRB.BINARY, name = \"Z_%s_%s_%s\" % (i,j,k))\n #print(Z[('t1', 36, 5)])\n if (verbose):\n print(\"Variables de décision créées\")\n\n #contraintes\n if (verbose):\n print(\"Création des contraintes du PL\")\n #D'abord on instaure les contraintes de départ\n# tours = tours[1:]\n for i in vehicules:\n for j in casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map):\n if (j == posInitiale[i]):\n m.addConstr(X[(i, j, 0)] == 1)\n else:\n m.addConstr(X[(i, j, 0)] == 0)\n for j in rangeeDeCases(posInitiale[i], isVertical[i], map):\n if (j in getCasesOccupees(posInitiale[i], isVertical[i], longueur[i], P, map)):\n m.addConstr(Z[(i, j, 0)] == 1)\n else:\n m.addConstr(Z[(i ,j ,0)] == 0)\n for j in casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map):\n casesSansJ = casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map)\n casesSansJ.remove(j)\n for l in casesSansJ:\n m.addConstr(Y[(i,j,l,0)] == 0)\n\n\n #Puis on crée les contraintes de transition\n #contrainte 1\n for i in vehicules:\n for j in casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map):\n for k in tours:\n tmp = []\n for mm in getCasesOccupees(j, isVertical[i], longueur[i], P, map):\n tmp.append(Z[(i, mm, k)])\n m.addConstr(longueur[i] * X[(i,j,k)] <= gu.quicksum(tmp))\n\n #contrainte 2\n for j in cases:\n for k in tours:\n tmp = []\n for i in vehicules:\n try:\n tmp.append(Z[(i,j,k)])\n except:\n pass\n m.addConstr(gu.quicksum(tmp) <= 1)\n\n #contrainte 3\n for i in vehicules:\n for k in tours:\n tmp = []\n for j in rangeeDeCases(posInitiale[i], isVertical[i], map):\n tmp.append(Z[(i,j,k)])\n m.addConstr(gu.quicksum(tmp) == longueur[i])\n\n #contrainte b\n for k in tours:\n tmp = []\n for j in cases:\n for l in cases:\n for i in vehicules:\n try:\n tmp.append(Y[(i,j,l,k)])\n except:\n pass\n m.addConstr(gu.quicksum(tmp) <= 1)\n\n #--------------------\n tours = tours[1:]\n #On fait ceci car les contraintes 4 et c ne doivent pas être appliquées pour k = 0\n\n #contrainte a\n for k in tours:\n tmp = []\n for i in vehicules:\n for j in casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map):\n casesSansJ = casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map)\n casesSansJ.remove(j)\n for l in casesSansJ:\n tmp.append(Y[(i,j,l,k)])\n m.addConstr(gu.quicksum(tmp) == (1-X[('g', 17, k-1)]))\n\n\n #contrainte 4\n for i in vehicules:\n for j in casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map):\n casesSansJ = casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map)\n casesSansJ.remove(j)\n for l in casesSansJ:\n for k in tours:\n for p in P[(j,l)]:\n tmp = []\n vprime = vehicules.copy()\n vprime.remove(i)\n for iprime in vprime:\n try:\n tmp.append(Z[(iprime, p, k-1)])\n except:\n pass\n m.addConstr(Y[(i,j,l,k)] <= 1 - gu.quicksum(tmp))\n\n #contrainte c ( à faire pour tous les k != 0) (on accepte que les voitures soient téléportées sur le plateau lors de l'initialisation)\n for i in vehicules:\n for l in casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map):\n for k in tours:\n tmp = []\n for j in cases:\n try:\n tmp.append(Y[(i, j, l, k)])\n except:\n pass\n m.addConstr(gu.quicksum(tmp) <= X[(i, l, k)])\n\n #contrainte supplémentaire qui dit que l'on ne peut déplacer une voiture de j à l que si elle était bien en j avant\n for i in vehicules:\n for j in casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map):\n for k in tours:\n tmp = []\n for l in cases:\n try:\n tmp.append(Y[(i, j, l, k)])\n except:\n pass\n m.addConstr(gu.quicksum(tmp) <= X[(i, j, k-1)])\n\n #contrainte supplémentaire qui dit que les véhicules sont toujours quelque part\n for k in tours:\n for i in vehicules:\n tmp = []\n for j in casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map):\n tmp.append(X[(i,j,k)])\n m.addConstr(gu.quicksum(tmp) == 1)\n\n #contrainte supplémentaire qui maintient le marqueur des véhicules non déplacés\n for i in vehicules:\n for k in tours:\n tmp = []\n for j in casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map):\n casesSansJ = casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map)\n casesSansJ.remove(j)\n for l in casesSansJ:\n tmp.append(Y[(i,j,l,k)])\n\n for j in casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map):\n m.addConstr(gu.quicksum(tmp) >= X[(i,j,k)] - X[(i,j,k-1)])\n m.addConstr(gu.quicksum(tmp) >= X[(i,j,k-1)] - X[(i,j,k)])\n if (verbose):\n print(\"Contraintes créées\")\n return m, X, Y, Z\n\ndef rangeeDeCases(marqueur, isVertical, map):#fini\n return casesAccessibles(marqueur, isVertical, 1, map)\n\ndef casesAccessibles(marqueur, isVertical, longueur, map):#fini\n res = []\n width, height = map[0]\n if (isVertical):\n tmp = marqueur\n while (tmp-width > 0):\n tmp -= width #on remonte en haut de la colonne\n for i in range(height - longueur +1):\n res.append(tmp)\n tmp += width #et on ajoute les cases en descendant\n else:\n tmp = marqueur\n while ((tmp-1) % width != 0):\n tmp -= 1\n for i in range(width - longueur +1):\n res.append(tmp)\n tmp += 1\n return res\n\n\ndef initConds(map, difficulty, m, verbose):#fini\n if (verbose):\n print(\"Lecture des conditions de départ\")\n width, height = map[0]\n matrice = map[1]\n\n cases = range(1, width*height +1)\n\n tours = range(0, difficultyValues[difficulty]+1)\n\n posInitiale = {}\n vehicules = []\n isVertical = {}\n V = {}\n P = calculerP(map)\n M = {}\n visited = np.zeros(getCaseIndex(width, height-1, map), dtype=bool)\n for y in range(height):\n for x in range(width):\n cur = matrice[y][x]\n case = getCaseIndex(x+1,y,map)\n if (cur != '0' and (not visited[case-1])):\n vehicules.append(cur)\n posInitiale[cur] = case\n if (cur[0] == 't'):\n V[cur] = 3\n else:\n V[cur] = 2\n if ((x == width-1) or (matrice[y][x+1] != cur)):\n isVertical[cur] = True\n else:\n isVertical[cur] = False\n co = getCasesOccupees(posInitiale[cur], isVertical[cur], V[cur], P, map)\n M[(cur, case)] = co\n for c in co:\n visited[c-1] = True\n visited[case-1] = True\n if (verbose):\n print(\"Fin de la lecture des conditions de départ\")\n return vehicules, cases, tours, posInitiale, isVertical, V, P\n\n\ndef getCasesOccupees(marqueur, isVertical, longueur, P, map):#fini\n width, height = map[0]\n if (isVertical):\n return P[(marqueur, marqueur + (longueur-1)*width)]\n else:\n return P[(marqueur, marqueur + (longueur-1))]\n\ndef getCaseIndex(x,y, map):#fini\n width, height = map[0]\n return x + y*width\n\ndef calculerP(map):#fini\n P = {}\n width, height = map[0]\n for x1 in range(width):\n for y1 in range(height):\n for x2 in range(width):\n case1 = getCaseIndex(x1+1, y1, map)\n case2 = getCaseIndex(x2+1, y1, map)\n P[(case1, case2)] = []\n for x3 in robustRange(x1, x2):\n P[(case1, case2)].append(getCaseIndex(x3+1, y1, map))\n for y2 in range(height):\n case1 = getCaseIndex(x1+1, y1, map)\n case2 = getCaseIndex(x1+1, y2, map)\n P[(case1, case2)] = []\n for y3 in robustRange(y1, y2):\n P[(case1, case2)].append(getCaseIndex(x1+1, y3, map))\n return P\n\ndef robustRange(x1, x2):#fini\n if (x1 > x2):\n return range(x2, x1+1)\n else:\n return range(x1, x2+1)\n\n#import time\n#start = time.clock()\n#\n#import reader\n#test = \"puzzles/test/test4.text\"\n#deb = \"puzzles/débutant/jam1.txt\"\n#map = reader.read(\"puzzles/expert/jam40.txt\")\n#solve(map, 3, True, False)\n#print(time.clock() - start)","sub_path":"pl.py","file_name":"pl.py","file_ext":"py","file_size_in_byte":12356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"4148693","text":"from sqlalchemy import Column, String, create_engine, Integer, Date\nfrom flask_sqlalchemy import SQLAlchemy\nfrom datetime import date\nimport json\nimport os\n\n\ndatabase_path = \"postgres://qmuctnxsjzynmq:caaa92f73f104bc8eaf93ab5b71a13adec2d14616f4eac78575a109a0a6e4896@ec2-52-70-67-123.compute-1.amazonaws.com:5432/d5o744grfo2435\"\n# database_path = \"postgres:///m_db\"\ndb = SQLAlchemy()\n\n\ndef setup_db(app, database_path=database_path):\n app.config[\"SQLALCHEMY_DATABASE_URI\"] = database_path\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n db.app = app\n db.init_app(app)\n # db.create_all()\n\n# def db_insert_all():\n# db.drop_all()\n# db.create_all()\n# add_actor = Actor('atheer', 'Female', '20')\n# add_movie = Movie('hloe word ', date.today())\n# add_actor.insert()\n# add_movie.insert()\n# db.session.commit()\n\n'''\nActor\n'''\n\nclass Actor(db.Model):\n __tablename__ = 'actors'\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n age = Column(Integer)\n gender = Column(String)\n\n def __init__(self, name, gender, age):\n self.name = name\n self.age = age\n self.gender = gender\n\n def format(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'age': self.age,\n 'gender': self.gender}\n\n def insert(self):\n db.session.add(self)\n db.session.commit()\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n def update(self):\n db.session.commit()\n\n'''\nMovie\n'''\n\nclass Movie(db.Model):\n __tablename__ = 'movies'\n\n id = Column(Integer, primary_key=True)\n title = Column(String)\n release_date = Column(Date)\n\n def __init__(self, title, release_date):\n self.title = title\n self.release_date = release_date\n\n def format(self):\n return {\n 'id': self.id,\n 'title': self.title,\n 'release_date': self.release_date}\n\n def insert(self):\n db.session.add(self)\n db.session.commit()\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n def update(self):\n db.session.commit()\n","sub_path":" Capstone_Project_udacity_V.2/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"67518940","text":"# -*- coding: utf-8 -*-\r\n\r\nimport finicityapi.models.birthday\r\n\r\nclass CreateConsumerRequest(object):\r\n\r\n \"\"\"Implementation of the 'CreateConsumerRequest' model.\r\n\r\n TODO: type model description here.\r\n\r\n Attributes:\r\n first_name (string): The consumer first name(s) / given name(s)\r\n last_name (string): The consumer last name(s) / surname(s)\r\n address (string): The consumer’s street address\r\n city (string): The consumer’s city\r\n state (string): The consumer’s state\r\n zip (string): The consumer’s ZIP code\r\n phone (string): The consumer’s phone number\r\n ssn (string): The consumer’s 9-digit Social Security number (may\r\n include separators: nnn-nn-nnnn)\r\n birthday (Birthday): The consumer birth date\r\n email (string): The consumer’s email address\r\n suffix (string): The consumer suffix\r\n email_address (string): The consumer’s email address\r\n\r\n \"\"\"\r\n\r\n # Create a mapping from Model property names to API property names\r\n _names = {\r\n \"first_name\":'firstName',\r\n \"last_name\":'lastName',\r\n \"address\":'address',\r\n \"city\":'city',\r\n \"state\":'state',\r\n \"zip\":'zip',\r\n \"phone\":'phone',\r\n \"ssn\":'ssn',\r\n \"birthday\":'birthday',\r\n \"email_address\":'emailAddress',\r\n \"email\":'email',\r\n \"suffix\":'suffix'\r\n }\r\n\r\n def __init__(self,\r\n first_name=None,\r\n last_name=None,\r\n address=None,\r\n city=None,\r\n state=None,\r\n zip=None,\r\n phone=None,\r\n ssn=None,\r\n birthday=None,\r\n email_address=None,\r\n email=None,\r\n suffix=None,\r\n additional_properties = {}):\r\n \"\"\"Constructor for the CreateConsumerRequest class\"\"\"\r\n\r\n # Initialize members of the class\r\n self.first_name = first_name\r\n self.last_name = last_name\r\n self.address = address\r\n self.city = city\r\n self.state = state\r\n self.zip = zip\r\n self.phone = phone\r\n self.ssn = ssn\r\n self.birthday = birthday\r\n self.email = email\r\n self.suffix = suffix\r\n self.email_address = email_address\r\n\r\n # Add additional model properties to the instance\r\n self.additional_properties = additional_properties\r\n\r\n\r\n @classmethod\r\n def from_dictionary(cls,\r\n dictionary):\r\n \"\"\"Creates an instance of this model from a dictionary\r\n\r\n Args:\r\n dictionary (dictionary): A dictionary representation of the object as\r\n obtained from the deserialization of the server's response. The keys\r\n MUST match property names in the API description.\r\n\r\n Returns:\r\n object: An instance of this structure class.\r\n\r\n \"\"\"\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n first_name = dictionary.get('firstName')\r\n last_name = dictionary.get('lastName')\r\n address = dictionary.get('address')\r\n city = dictionary.get('city')\r\n state = dictionary.get('state')\r\n zip = dictionary.get('zip')\r\n phone = dictionary.get('phone')\r\n ssn = dictionary.get('ssn')\r\n birthday = finicityapi.models.birthday.Birthday.from_dictionary(dictionary.get('birthday')) if dictionary.get('birthday') else None\r\n email_address = dictionary.get('emailAddress')\r\n email = dictionary.get('email')\r\n suffix = dictionary.get('suffix')\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(first_name,\r\n last_name,\r\n address,\r\n city,\r\n state,\r\n zip,\r\n phone,\r\n ssn,\r\n birthday,\r\n email_address,\r\n email,\r\n suffix,\r\n dictionary)\r\n\r\n\r\n","sub_path":"finicityapi/models/create_consumer_request.py","file_name":"create_consumer_request.py","file_ext":"py","file_size_in_byte":4284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"155093598","text":"import numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Convolution2D, MaxPooling2D\nfrom keras.optimizers import Adam\nfrom keras.utils import np_utils\nfrom sklearn import metrics\nfrom sklearn.metrics import precision_score, recall_score, confusion_matrix, classification_report, accuracy_score, \\\n f1_score\nfrom keras.callbacks import LearningRateScheduler\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndef dnn(X, val_x, y, val_y):\n num_labels = y.shape[1]\n\n # build model\n model = Sequential()\n model.add(Dense(512, input_shape=(40,)))\n model.add(Activation('relu'))\n model.add(Dropout(0.3))\n\n model.add(Dense(256))\n model.add(Activation('relu'))\n model.add(Dropout(0.3))\n\n model.add(Dense(num_labels))\n model.add(Activation('softmax'))\n\n model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')\n\n model.fit(X, y, batch_size=256, epochs=5000, validation_data=(val_x, val_y))\n\n\ndef dnns(X, val_x, y, val_y):\n num_labels = y.shape[1]\n nets = 5\n\n model = [0] * nets\n # model = [0 for k in range(5)]\n\n # build model\n for net in range(nets):\n model[net] = Sequential()\n\n model[net].add(Dense(512, input_shape=(40,)))\n model[net].add(Activation('relu'))\n model[net].add(Dropout(0.45))\n\n model[net].add(Dense(256))\n model[net].add(Activation('relu'))\n model[net].add(Dropout(0.45))\n\n model[net].add(Dense(num_labels))\n model[net].add(Activation('softmax'))\n\n model[net].compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='RMSprop')\n\n # 训练网络\n history = [0] * nets\n epochs = 132\n for j in range(nets):\n X_train2, X_val2, Y_train2, Y_val2 = X, val_x, y, val_y\n history[j] = model[j].fit(X, Y_train2, batch_size=256,\n epochs=epochs,\n validation_data=(X_val2, Y_val2), verbose=0)\n # score = model[j].evaluate(X_val2, Y_val2, batch_size=256)\n # print(\"processing model # \"+str(j) + \" _ \" + str(score))\n # for key in history[j].history.keys():\n # print(key)\n\n print(\"DNN {0:d}: Epochs={1:d}, Train accuracy={2:.5f}, Validation accuracy={3:.5f}\".format(\n j + 1, epochs, max(history[j].history['accuracy']), max(history[j].history['val_accuracy'])))\n\n\n return model,history,nets\n\ndef process_show(history,nets):\n # 图示训练过程\n net = -1\n name_title = ['Loss', 'Accuracy']\n fig = plt.figure(figsize=(64, 64))\n for j in range(nets):\n for i in range(0, 2):\n ax = fig.add_subplot(8, 8, i + 1)\n plt.plot(history[j].history[list(history[j].history.keys())[i]],\n label=list(history[j].history.keys())[i])\n plt.plot(history[j].history[list(history[j].history.keys())[i + 2]],\n label=list(history[j].history.keys())[i + 2])\n plt.xlabel('Epochs', fontsize=18)\n plt.ylabel(name_title[i], fontsize=18)\n plt.legend()\n plt.show()\n\n# 定义评价指标\ndef acc(y_test, prediction):\n ### PRINTING ACCURACY OF PREDICTION\n ### RECALL\n ### PRECISION\n ### CLASIFICATION REPORT\n ### CONFUSION MATRIX\n cm = confusion_matrix(y_test, prediction)\n recall = np.diag(cm) / np.sum(cm, axis=1)\n precision = np.diag(cm) / np.sum(cm, axis=0)\n\n print('Recall:', recall)\n print('Precision:', precision)\n print('\\n clasification report:\\n', classification_report(y_test, prediction))\n print('\\n confussion matrix:\\n', confusion_matrix(y_test, prediction))\n\n ax = sns.heatmap(confusion_matrix(y_test, prediction), linewidths=0.5, cmap=\"YlGnBu\")","sub_path":"AC_TAU/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"445439791","text":"\"\"\"\nApproach: Try to find the pattern and repeated sub-problems to use DP.\n\nsee that just by applied the decision making logic on the given array would be difficult. Try to check if this pattern\ncan be changed. One way would be to modify the array such that the indices of the new array go from 0 to max element of\nthe given array. Then you store the sum of same valued elements from the given array onto the respective indices.\n\n\"\"\"\n\nclass Solution:\n def deleteAndEarn(self, nums: List[int]) -> int:\n simplified = [0 ] *(max(nums ) +1)\n for i in (nums):\n simplified[i] += i\n\n # Approach 1:\n \"\"\"\n return self.helper(simplified, 0, 0)\n\n def helper(self, nums, index, earned):\n #base\n if index >= len(nums):\n return earned\n\n #choose to delete:\n case0 = self.helper(nums, index + 2, earned + nums[index])\n\n # choose not to delete:\n case1 = self.helper(nums, index + 1, earned)\n\n return max(case0, case1)\n \"\"\"\n dp = [[0 ] *2 for _ in range(len(simplified))]\n dp[0][1] = simplified[0]\n\n for i in range(len(simplified)):\n dp[i][0] = max(dp[ i -1][0], dp[ i -1][1])\n dp[i][1] = dp[ i -1][0] + simplified[i]\n return max(dp[-1])\n\"\"\"\nApproach 1:\nTC: O(2^n)\nSC: O(n)\nApproach 2:\nTC: O(n)\nSC: O(n)\n\"\"\"","sub_path":"Problem-1.py","file_name":"Problem-1.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"489072453","text":"import datetime\nfrom Myshop.settings import YANDEX_ID, YANDEX_KEY, TEST_YANDEX_ID, TEST_YANDEX_KEY, RUSSIAN_POST_TOKEN, RUSSIAN_POST_KEY\n\nfrom django.core.mail import send_mail, EmailMessage\nfrom django.shortcuts import render, redirect\nfrom rest_framework.authentication import SessionAuthentication\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.views import APIView\n\nfrom orders.models import Order, OrderItem\nfrom yandex_checkout import Payment, Configuration\n\nfrom shop.models import Flavour\n\nimport json\nimport requests\nfrom django.http import HttpResponse\n\n\ndef payment_process(*args, order_id):\n Configuration.account_id = YANDEX_ID\n Configuration.secret_key = YANDEX_KEY\n # Configuration.account_id = TEST_YANDEX_ID\n # Configuration.secret_key = TEST_YANDEX_KEY\n order = Order.published.get(id=order_id)\n value = float(order.get_total_cost() + order.deliver_cost)\n\n json_yandex = {\n \"amount\": {\n \"value\": value,\n \"currency\": \"RUB\"\n },\n \"description\": 'Номер заказа: {} от {}'.format(order.id, order.created.date()),\n \"metadata\": {\n \"order_id\": order.id\n },\n \"capture\": True,\n \"confirmation\": {\n \"type\": \"redirect\",\n \"return_url\": \"https://mrpit.online\"\n },\n \"receipt\": {\n \"customer\": {\n \"full_name\": order.client.username,\n \"email\": order.email,\n \"phone\": order.phone\n },\n \"items\": [\n ]\n },\n }\n items = order.items.all()\n for item in items:\n item = {\n \"description\": item.flavour.product,\n \"quantity\": item.quantity,\n \"amount\": {\n \"value\": item.price,\n \"currency\": \"RUB\"\n },\n \"vat_code\": \"2\",\n \"payment_mode\": \"full_prepayment\",\n \"payment_subject\": \"commodity\"\n }\n json_yandex[\"receipt\"][\"items\"].append(item)\n delivery_item = {\n \"description\": \"Доставка\",\n \"quantity\": \"1\",\n \"amount\": {\n \"value\": order.deliver_cost,\n \"currency\": \"RUB\"\n },\n \"vat_code\": \"2\",\n \"payment_mode\": \"full_prepayment\",\n \"payment_subject\": \"commodity\"\n }\n json_yandex[\"receipt\"][\"items\"].append(delivery_item)\n\n payment = Payment.create(json_yandex)\n\n return redirect(payment.confirmation.confirmation_url)\n\n\nclass CsrfExemptSessionAuthentication(SessionAuthentication):\n def enforce_csrf(self, request):\n return None\n\n\nclass YandexNotifications(APIView):\n permission_classes = [AllowAny]\n authentication_classes = (CsrfExemptSessionAuthentication,)\n\n def post(self, request):\n event_json = json.loads(request.body)\n if event_json[\"event\"] == \"payment.succeeded\":\n order_id = int(event_json[\"object\"][\"metadata\"][\"order_id\"])\n order = Order.published.get(id=order_id)\n order.paid = True\n order.status = \"В работе\"\n order.save()\n # Отправляем письмо администрации об оплате заказа\n subject_pay = 'Заказ №{} оплачен!'.format(order.id)\n mail_from = 'no-reply@mrpit.online'\n mail_to = ['admin@mrpit.online', 'nukez@inbox.ru']\n admin_message = 'Заказ №{} оплачен!
'\\\n 'Перейти в админку по ссылке'.format(order.id)\n mail = EmailMessage(subject_pay, admin_message, mail_from, mail_to)\n mail.content_subtype = \"html\"\n mail.send()\n\n for item in order.items.all():\n flavour = Flavour.published.get(id=item.flavour.id)\n if flavour.quantity > 0:\n flavour.quantity -= item.quantity\n if flavour.quantity == 0:\n flavour.for_offer = False\n # Отправка письма администрации о том, что товар из набора кончился и нужно формировать новый\n subject = 'Закончился вкус у товара'\n sender = 'no-reply@mrpit.online'\n message = 'Закончился вкус у товара. {} {}\\n' \\\n 'Необходимо проверить, есть ли товар в наборе и при необходимости переформировать' \\\n .format(flavour.name, flavour.product.name)\n send_mail(subject, message, sender, ['admin@mrpit.online'])\n else:\n flavour.quantity = 0\n\n flavour.save()\n # Если доставка в регионы, то создаём отправление в лк почты россии\n if order.city != 'Пермь':\n russian_post_create_delivery(order_id)\n return HttpResponse(status=200)\n elif event_json[\"event\"] == \"payment.waiting_for_capture\":\n return HttpResponse(status=200)\n else:\n return HttpResponse(status=500)\n\n\ndef russian_post_create_delivery(order_id):\n order = Order.published.get(id=order_id)\n\n\n protocol = \"https://\"\n host = \"otpravka-api.pochta.ru\"\n token = RUSSIAN_POST_TOKEN\n key = RUSSIAN_POST_KEY\n\n request_headers = {\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json;charset=UTF-8\",\n \"Authorization\": \"AccessToken \" + token,\n \"X-User-Authorization\": \"Basic \" + key\n }\n\n path = \"/1.0/user/backlog\"\n\n new_orders = [{\n \"postoffice-code\": \"614961\",\n \"tel-address\": order.phone,\n \"surname\": order.last_name,\n \"given-name\": order.first_name,\n \"mail-direct\": 643,\n \"address-type-to\": \"DEFAULT\",\n \"index-to\": order.postal_code,\n \"region-to\": \"Заполнить регион!\",\n \"place-to\": order.city,\n \"street-to\": order.address,\n \"house-to\": \"Заполнить номер дома и кв!\",\n \"mass\": order.total_mass,\n \"mail-category\": \"ORDINARY\",\n \"mail-type\": \"ONLINE_PARCEL\",\n \"order-num\": order.id\n }]\n\n url = protocol + host + path\n\n requests.put(url, headers=request_headers, data=json.dumps(new_orders))\n\n","sub_path":"payment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"167704443","text":"#table number 1 and 2 !!!!!!\n\nimport pandas as pd\nimport psycopg2\nimport datetime\nconnection = psycopg2.connect(user=\"postgres\", password=\"barmej\", host=\"127.0.0.1\", port=\"5432\", database=\"imdb_database\")\n\ncursor = connection.cursor()\n\ncursor.execute(\"SELECT label_name, label_id FROM label_types\")\nresult1 = cursor.fetchall()\nlabels = dict(result1)\nlabels\n\ndata = pd.read_csv('IMDB_Dataset.csv')\ndata.insert(0, 'id', range(1,1+len(data)))\ndata.insert(3, 'date', datetime.datetime.now().replace(microsecond=0))\n\ndata['sentiment']=[labels[x] for x in data['sentiment']]\n\nfor i,row in data.iterrows():\n\trow_ = tuple(row)\n\tsql1 = \"INSERT INTO data_input4 (id, input_data, input_date) VALUES (%s,%s,%s)\"\n\tcursor.execute(sql1, (row['id'],row['review'],datetime.datetime.now()))\n\tsql2 = \"INSERT INTO data_labeling5 (id_label, Label_number,Label_date) VALUES (%s,%s,%s)\"\n\tcursor.execute(sql2,(row['id'],row['sentiment'],datetime.datetime.now()))\n\tconnection.commit()\n","sub_path":"project_9_try11.py","file_name":"project_9_try11.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"90739882","text":"#!/usr/bin/env pyhton\n\nimport numpy as np\nimport poisson2D\nimport time\n\nimport matplotlib.pyplot as plt\n\nfilename = input('Nombre del archivo:')\nk = poisson2D.leeImagen(filename,0.01)\n\nax = 0.0\nbx = 1.0\nay = 0.0\nby = 1.0\nNx = k.shape[0]-2\nNy = k.shape[1]-2\nboundA = -1\nboundB = -1\nboundC = 1\nboundD = 1\n\n#k = np.ones([Nx+2,Ny+2])\n## Calcula Delta x y Delta y\nhx = (bx-ax)/(Nx+1)\nhy = (by-ay)/(Ny+1)\n\nht = 1\nr = ht / (hx*hy)\n\npoisson2D.ImprimeDatos(ax,bx,ay,by,Nx,Ny,hx,hy,\n boundA,\"Dirichlet\",\n boundB,\"Dirichlet\",\n boundC,\"Dirichlet\",\n boundD,\"Dirichlet\")\n\n## Definicion del sistema lineal de N+1 x N+1\nf = np.zeros((Ny,Nx)) # RHS\nA = poisson2D.Laplaciano2D(Nx, Ny,1,k) # Matriz del sistema\n\n## Aplicacion de las condiciones de frontera Dirichlet\n\nf[Ny-1,: ] = boundB # Top wall\nf[0 ,: ] = boundA # Bot wall\nf[: ,0 ] = boundC # Left wall\nf[: ,Nx-1] = boundD # Right wall\n\n\n## La solucion sera guardada en el arreglo u, que es de tamanio Ny+2 x Nx+2, pues incluye las fronteras\nu = np.zeros((Ny+2, Nx+2))\n\n## Se utiliza un algoritmo del paquete linalg para obtener la solucion del sistema de N x N\nut = np.copy(u[1:Ny+1,1:Nx+1])\nut.shape = ut.size # Cambiamos los arreglos a formato unidimensional\nf.shape = f.size # Cambiamos los arreglos a formato unidimensional\n\nt1_start = time.perf_counter()\nut = np.linalg.solve(A,f)\nt1_stop = time.perf_counter()\nprint(time.ctime(), '\\n CPU time: {:0.6f} '.format(t1_stop-t1_start))\n\n## Los valores en los lados del dominio son conocidos debido a las cond. Dirichlet\nu[Ny+1,: ] = boundB # Top wall\nu[0 ,: ] = boundA # Bot wall\nu[: ,0 ] = boundC # Left wall\nu[: ,Nx+1] = boundD # Right wall\n\npoisson2D.ImprimeSistema(A,ut,f)\n\nut.shape = (Ny, Nx) # Regresamos el arreglo a formato bidimensional\nu[1:Ny+1,1:Nx+1] = ut\n\nx = np.linspace(ax,bx,Nx+2)\ny = np.linspace(ay,by,Ny+2)\nxg, yg = np.meshgrid(x,y)\n\npoisson2D.GuardaSolucion('imagenX', x, y, k)\n\nplt.imshow(u,cmap='inferno')\nplt.show()\n\n#poisson2D.GuardaSolucion('SALIDA', x, y, u)\n\n# Post-procesamiento ...\nNNX = u.shape[0]\nunew = np.copy(u)\nfor j in range(u.shape[0]):\n for i in range(u.shape[1]):\n unew[i,j] = u[NNX-i-1,j]\n\npoisson2D.GraficaSuperficieC(xg,yg,unew,'inferno') #hot, cool, rainbow, ...\n\n","sub_path":"TEST/2D_K_Variable_01.py","file_name":"2D_K_Variable_01.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"634470698","text":"# Import unittest module for creating unit tests\nimport unittest\n\n# Import time module to implement \nimport time\n\n# Import the Selenium 2 module (aka \"webdriver\")\nfrom selenium import webdriver\n\n# For automating data input\nfrom selenium.webdriver.common.keys import Keys\n\n# For providing custom configurations for Chrome to run\nfrom selenium.webdriver.chrome.options import Options\n\n\n# --------------------------------------\n# Provide a class for the unit test case\nclass PythonOrgSearchChrome(unittest.TestCase):\n\n\t# Anything declared in setUp will be executed for all test cases\n\tdef setUp(self):\n\t\t# Select which device you want to emulate by uncommenting it\n\t\t# More information at: https://sites.google.com/a/chromium.org/chromedriver/mobile-emulation\n\t\tmobile_emulation = { \n\t\t\t\"deviceName\": \"Nexus 5\"\n\t\t\t\n\t\t\t# Or specify a specific build using the following two arguments\n\t\t\t#\"deviceMetrics\": { \"width\": 360, \"height\": 640, \"pixelRatio\": 3.0 },\n\t\t #\"userAgent\": \"Mozilla/5.0 (Linux; Android 4.2.1; en-us; Nexus 5 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Mobile Safari/535.19\" }\n\t\t}\n\t\t\n\t\t# Define a variable to hold all the configurations we want\n\t\tchrome_options = webdriver.ChromeOptions()\n\t\t\n\t\t# Add the mobile emulation to the chrome options variable\n\t\tchrome_options.add_experimental_option(\"mobileEmulation\", mobile_emulation)\n\t\tchrome_options.add_argument('--headless')\n\n\t\t# Create driver, pass it the path to the chromedriver file and the special configurations you want to run\n\t\tself.driver = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver', chrome_options=chrome_options)\n\n\t# An individual test case. Must start with 'test_' (as per unittest module)\n\tdef test_search_in_python_chrome(self):\t\n\t\t# Assigning a local variable for the global driver\n\t\tdriver = self.driver\n\n\t\t# Go to google.com\n\n\t\tdriver.get('http://localhost:8080')\n\t\tdriver.implicitly_wait(30)\n\t\ttext = driver.find_element_by_id(\"sample-item\").text\n\t\t\n\t\tprint(\"Text:\",text)\n\t\tself.assertEqual(\"text in the item\", text)\n\n\n\t\t# Take a screenshot of the results\n\t# Anything declared in tearDown will be executed for all test cases\n\tdef tearDown(self):\n\t\t# Close the browser. \n\t\t# Note close() will close the current tab, if its the last tab it will close the browser. To close the browser entirely use quit()\n\t\tself.driver.close()\n\n# Boilerplate code to start the unit tests\nif __name__ == \"__main__\":\n\tunittest.main()\n","sub_path":"seltest.py","file_name":"seltest.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"502487829","text":"from tkinter import *\r\n\r\ndef doSome():\r\n print('wyświetlam tekst')\r\n\r\ndef doSome2():\r\n print('To jest druga funckcja')\r\n \r\nroot = Tk()\r\nroot.geometry('300x200')\r\n\r\n# -- MAIN MENU\r\n\r\nmenu_main = Menu(root)\r\n\r\nroot.config(menu=menu_main)\r\n\r\nsubmenu = Menu(menu_main)\r\neditmenu = Menu(menu_main)\r\n\r\n# -- dodanie menu\r\nmenu_main.add_cascade(label='File', menu=submenu)\r\nmenu_main.add_cascade(label='Edit', menu=editmenu)\r\n\r\nsubmenu.add_command(label='New project', command=doSome)\r\nsubmenu.add_command(label='New...', command=doSome2)\r\nsubmenu.add_separator()\r\nsubmenu.add_command(label='Exit', command=root.quit)\r\n\r\neditmenu.add_command(label='Redo', command=doSome)\r\n\r\n# -- TOOLBAR\r\n\r\ntoolbar = Frame(root, bg='yellow')\r\ntool_btn = Button(toolbar, text='Insert image', command=doSome)\r\ntool_print_btn = Button(toolbar, text='Print', command=doSome)\r\n\r\ntool_btn.pack(side=LEFT, padx=2, pady=2)\r\ntool_print_btn.pack(side=LEFT, padx=2, pady=2)\r\ntoolbar.pack(side=TOP, fill=X)\r\n\r\n# -- STATUS\r\n\r\nstatusbar = Label(root, text='Prepare', bd=1, relief=SUNKEN, anchor=W)\r\n\r\n\r\nstatusbar.pack(side=BOTTOM, fill=X)\r\n\r\n\r\n\r\nroot.mainloop()","sub_path":"Python_projekty_windows/Tkinter-gui/newgui.py","file_name":"newgui.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"256926159","text":"#!/usr/bin/python\n########################################################################################################################\n#\n# Copyright (c) 2014, Regents of the University of California\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the\n# following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n# disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the\n# following disclaimer in the documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n########################################################################################################################\n\n\"\"\"ADC library\n\"\"\"\nimport laygo\nimport numpy as np\nimport os\n#import logging;logging.basicConfig(level=logging.DEBUG)\n\ndef create_power_pin_from_inst(laygen, layer, gridname, inst_left, inst_right):\n \"\"\"create power pin\"\"\"\n rvdd0_pin_xy = laygen.get_inst_pin_coord(inst_left.name, 'VDD', gridname, sort=True)\n rvdd1_pin_xy = laygen.get_inst_pin_coord(inst_right.name, 'VDD', gridname, sort=True)\n rvss0_pin_xy = laygen.get_inst_pin_coord(inst_left.name, 'VSS', gridname, sort=True)\n rvss1_pin_xy = laygen.get_inst_pin_coord(inst_right.name, 'VSS', gridname, sort=True)\n\n laygen.pin(name='VDD', layer=layer, xy=np.vstack((rvdd0_pin_xy[0],rvdd1_pin_xy[1])), gridname=gridname)\n laygen.pin(name='VSS', layer=layer, xy=np.vstack((rvss0_pin_xy[0],rvss1_pin_xy[1])), gridname=gridname)\n\ndef generate_sarlogic(laygen, objectname_pfix, templib_logic, placement_grid, routing_grid_m3m4, m=1, origin=np.array([0, 0])):\n \"\"\"generate sar logic \"\"\"\n pg = placement_grid\n rg_m3m4 = routing_grid_m3m4\n\n #inv_name = 'inv_' + str(m) + 'x'\n #oai22_name = 'oai22_' + str(m) + 'x'\n #mux2to1_name = 'mux2to1_' + str(m) + 'x'\n #nand_name = 'nand_' + str(m) + 'x'\n inv_name = 'inv_1x'\n oai22_name = 'oai22_1x'\n mux2to1_name = 'mux2to1_1x'\n nand_name = 'nand_1x'\n inv_obuf_name = 'inv_' + str(m) + 'x'\n\n # placement\n isaopb0 = laygen.place(name = \"I\" + objectname_pfix + 'INV0', templatename = inv_name,\n gridname = pg, xy=origin, template_libname=templib_logic)\n isaomb0 = laygen.relplace(name=\"I\" + objectname_pfix + 'INV1', templatename=inv_name,\n gridname=pg, refinstname=isaopb0.name, template_libname=templib_logic)\n ioai0 = laygen.relplace(name = \"I\" + objectname_pfix + 'OAI0', templatename = oai22_name,\n gridname = pg, refinstname = isaomb0.name, template_libname=templib_logic)\n ildpo0 = laygen.relplace(name=\"I\" + objectname_pfix + 'INV2', templatename=inv_name,\n gridname=pg, refinstname=ioai0.name, template_libname=templib_logic)\n ioai1 = laygen.relplace(name = \"I\" + objectname_pfix + 'OAI1', templatename = oai22_name,\n gridname = pg, refinstname = ildpo0.name, template_libname=templib_logic)\n ildno0 = laygen.relplace(name=\"I\" + objectname_pfix + 'INV3', templatename=inv_name,\n gridname=pg, refinstname=ioai1.name, template_libname=templib_logic)\n ind0 = laygen.relplace(name=\"I\" + objectname_pfix + 'ND0', templatename=nand_name,\n gridname=pg, refinstname=ildno0.name, template_libname=templib_logic)\n imuxen0 = laygen.relplace(name=\"I\" + objectname_pfix + 'MUXEN0', templatename=inv_name,\n gridname=pg, refinstname=ind0.name, template_libname=templib_logic)\n imux0 = laygen.relplace(name=\"I\" + objectname_pfix + 'MUX0', templatename=mux2to1_name,\n gridname=pg, refinstname=imuxen0.name, template_libname=templib_logic)\n izp0 = laygen.relplace(name=\"I\" + objectname_pfix + 'OBUF0', templatename=inv_obuf_name,\n gridname=pg, refinstname=imux0.name, template_libname=templib_logic)\n imux1 = laygen.relplace(name=\"I\" + objectname_pfix + 'MUX1', templatename=mux2to1_name,\n gridname=pg, refinstname=izp0.name, template_libname=templib_logic)\n izm0 = laygen.relplace(name=\"I\" + objectname_pfix + 'OBUF1', templatename=inv_obuf_name,\n gridname=pg, refinstname=imux1.name, template_libname=templib_logic)\n imux2 = laygen.relplace(name=\"I\" + objectname_pfix + 'MUX2', templatename=mux2to1_name,\n gridname=pg, refinstname=izm0.name, template_libname=templib_logic)\n izmid0 = laygen.relplace(name=\"I\" + objectname_pfix + 'OBUF2', templatename=inv_obuf_name,\n gridname=pg, refinstname=imux2.name, template_libname=templib_logic)\n\n # internal pins\n isaopb0_i_xy = laygen.get_inst_pin_coord(isaopb0.name, 'I', rg_m3m4)\n isaopb0_o_xy = laygen.get_inst_pin_coord(isaopb0.name, 'O', rg_m3m4)\n isaomb0_i_xy = laygen.get_inst_pin_coord(isaomb0.name, 'I', rg_m3m4)\n isaomb0_o_xy = laygen.get_inst_pin_coord(isaomb0.name, 'O', rg_m3m4)\n ioai0_a_xy = laygen.get_inst_pin_coord(ioai0.name, 'A', rg_m3m4)\n ioai0_b_xy = laygen.get_inst_pin_coord(ioai0.name, 'B', rg_m3m4)\n ioai0_c_xy = laygen.get_inst_pin_coord(ioai0.name, 'C', rg_m3m4)\n ioai0_d_xy = laygen.get_inst_pin_coord(ioai0.name, 'D', rg_m3m4)\n ioai0_o_xy = laygen.get_inst_pin_coord(ioai0.name, 'O', rg_m3m4)\n ildpo0_i_xy = laygen.get_inst_pin_coord(ildpo0.name, 'I', rg_m3m4)\n ildpo0_o_xy = laygen.get_inst_pin_coord(ildpo0.name, 'O', rg_m3m4)\n ioai1_a_xy = laygen.get_inst_pin_coord(ioai1.name, 'A', rg_m3m4)\n ioai1_b_xy = laygen.get_inst_pin_coord(ioai1.name, 'B', rg_m3m4)\n ioai1_c_xy = laygen.get_inst_pin_coord(ioai1.name, 'C', rg_m3m4)\n ioai1_d_xy = laygen.get_inst_pin_coord(ioai1.name, 'D', rg_m3m4)\n ioai1_o_xy = laygen.get_inst_pin_coord(ioai1.name, 'O', rg_m3m4)\n ildno0_i_xy = laygen.get_inst_pin_coord(ildno0.name, 'I', rg_m3m4)\n ildno0_o_xy = laygen.get_inst_pin_coord(ildno0.name, 'O', rg_m3m4)\n ind0_a_xy = laygen.get_inst_pin_coord(ind0.name, 'A', rg_m3m4)\n ind0_b_xy = laygen.get_inst_pin_coord(ind0.name, 'B', rg_m3m4)\n ind0_o_xy = laygen.get_inst_pin_coord(ind0.name, 'O', rg_m3m4)\n imux0_i0_xy = laygen.get_inst_pin_coord(imux0.name, 'I0', rg_m3m4)\n imux0_i1_xy = laygen.get_inst_pin_coord(imux0.name, 'I1', rg_m3m4)\n imux0_en0_xy = laygen.get_inst_pin_coord(imux0.name, 'EN0', rg_m3m4)\n imux0_en1_xy = laygen.get_inst_pin_coord(imux0.name, 'EN1', rg_m3m4)\n imux0_o_xy = laygen.get_inst_pin_coord(imux0.name, 'O', rg_m3m4)\n imuxen0_i_xy = laygen.get_inst_pin_coord(imuxen0.name, 'I', rg_m3m4)\n imuxen0_o_xy = laygen.get_inst_pin_coord(imuxen0.name, 'O', rg_m3m4)\n imux1_i0_xy = laygen.get_inst_pin_coord(imux1.name, 'I0', rg_m3m4)\n imux1_i1_xy = laygen.get_inst_pin_coord(imux1.name, 'I1', rg_m3m4)\n imux1_en0_xy = laygen.get_inst_pin_coord(imux1.name, 'EN0', rg_m3m4)\n imux1_en1_xy = laygen.get_inst_pin_coord(imux1.name, 'EN1', rg_m3m4)\n imux1_o_xy = laygen.get_inst_pin_coord(imux1.name, 'O', rg_m3m4)\n imux2_i0_xy = laygen.get_inst_pin_coord(imux2.name, 'I0', rg_m3m4)\n imux2_i1_xy = laygen.get_inst_pin_coord(imux2.name, 'I1', rg_m3m4)\n imux2_en0_xy = laygen.get_inst_pin_coord(imux2.name, 'EN0', rg_m3m4)\n imux2_en1_xy = laygen.get_inst_pin_coord(imux2.name, 'EN1', rg_m3m4)\n imux2_o_xy = laygen.get_inst_pin_coord(imux2.name, 'O', rg_m3m4)\n izp0_i_xy = laygen.get_inst_pin_coord(izp0.name, 'I', rg_m3m4)\n izp0_o_xy = laygen.get_inst_pin_coord(izp0.name, 'O', rg_m3m4)\n izm0_i_xy = laygen.get_inst_pin_coord(izm0.name, 'I', rg_m3m4)\n izm0_o_xy = laygen.get_inst_pin_coord(izm0.name, 'O', rg_m3m4)\n izmid0_i_xy = laygen.get_inst_pin_coord(izmid0.name, 'I', rg_m3m4)\n izmid0_o_xy = laygen.get_inst_pin_coord(izmid0.name, 'O', rg_m3m4)\n\n #reference route coordinate\n y0 = isaopb0_i_xy[0][1]\n x0 = laygen.get_inst_xy(name=isaopb0.name, gridname=rg_m3m4)[0] + 1\n x1 = laygen.get_inst_xy(name=izmid0.name, gridname=rg_m3m4)[0]\\\n +laygen.get_template_size(name=izmid0.cellname, gridname=rg_m3m4, libname=templib_logic)[0] - 1\n #saopb/saomb\n rsaopbv0, rsaopb0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], isaopb0_i_xy[0], np.array([x0, y0 + 3]), rg_m3m4)\n rsaombv0, rsaomb0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], isaomb0_i_xy[0], np.array([x0, y0 + 4]), rg_m3m4)\n #vplus/vminus\n [rv0, rvplus0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], isaopb0_o_xy[0], ioai0_c_xy[0], y0 - 0, rg_m3m4)\n [rv0, rvminus0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], isaomb0_o_xy[0], ioai1_c_xy[0], y0 + 1, rg_m3m4)\n #rst/sb\n rrstv0, rrst0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], ioai0_b_xy[0], np.array([x0, y0 - 2]), rg_m3m4)\n rv0, rsb0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], ioai0_d_xy[0], np.array([x0, y0 - 1+6]), rg_m3m4)\n rrstv1, rrst1 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], ioai1_b_xy[0], np.array([x0, y0 - 2]), rg_m3m4)\n [rv0, rsb1, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], ioai1_d_xy[0], ioai0_d_xy[0], y0 - 1, rg_m3m4)\n #ldpo\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], ioai0_o_xy[0], ildpo0_i_xy[0], y0 + 0 - 3, rg_m3m4, extendl=3, extendr=1)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], ildpo0_o_xy[0], ioai0_a_xy[0], y0 - 4, rg_m3m4)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], ildpo0_o_xy[0], imux0_i0_xy[0], y0 - 4, rg_m3m4)\n #ldno\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], ioai1_o_xy[0], ildno0_i_xy[0], y0 + 0, rg_m3m4, extendl=2, extendr=2)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], ildno0_o_xy[0], ioai1_a_xy[0], y0 - 3, rg_m3m4)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], ildno0_o_xy[0], imux1_i0_xy[0], y0 - 3, rg_m3m4)\n #nand input\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], ildpo0_o_xy[0], ind0_b_xy[0], y0 - 4, rg_m3m4)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], ildno0_o_xy[0], ind0_a_xy[0], y0 - 3, rg_m3m4)\n #nand output(ldndo)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], ind0_o_xy[0], imux2_i0_xy[0], y0 - 1, rg_m3m4)\n #mux en\n rextselv0, rextsel0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], imuxen0_i_xy[0], np.array([x0, y0 + 2]), rg_m3m4)\n rv0, rh0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], imux0_en1_xy[0], np.array([x0, y0 + 2]), rg_m3m4)\n rv0, rh0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], imux1_en1_xy[0], np.array([x0, y0 + 2]), rg_m3m4)\n rv0, rh0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], imux2_en1_xy[0], np.array([x0, y0 + 2]), rg_m3m4)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], imuxen0_o_xy[0], imux0_en0_xy[0], y0 + 1, rg_m3m4)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], imuxen0_o_xy[0], imux1_en0_xy[0], y0 + 1, rg_m3m4)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], imuxen0_o_xy[0], imux2_en0_xy[0], y0 + 1, rg_m3m4)\n #mux ext\n rv0, rext_zpb0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], imux0_i1_xy[0], np.array([imux0_i1_xy[0][0]-4, y0 + 0]), rg_m3m4)\n rv0, rext_zmb0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], imux1_i1_xy[0], np.array([imux1_i1_xy[0][0]-4, y0 + 0]), rg_m3m4)\n rv0, rext_zmidb0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], imux2_i1_xy[0], np.array([imux2_i1_xy[0][0]-4, y0 + 0]), rg_m3m4)\n #mux output\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], imux0_o_xy[0], izp0_i_xy[0], y0 + 0, rg_m3m4)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], imux1_o_xy[0], izm0_i_xy[0], y0 + 0, rg_m3m4)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], imux2_o_xy[0], izmid0_i_xy[0], y0 + 0, rg_m3m4)\n #final output\n rv0, rzp0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], izp0_o_xy[0], np.array([x1, y0 - 4]), rg_m3m4)\n rv0, rzm0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], izm0_o_xy[0], np.array([x1, y0 - 3]), rg_m3m4)\n rv0, rzmid0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], izmid0_o_xy[0], np.array([x1, y0 - 1]), rg_m3m4)\n \n #pins \n laygen.create_boundary_pin_form_rect(rsaopb0, rg_m3m4, \"SAOPB\", laygen.layers['pin'][4], size=6, direction='left')\n laygen.pin_from_rect('SAOPB2', laygen.layers['pin'][3], rsaopbv0, gridname=rg_m3m4, netname='SAOPB')\n laygen.create_boundary_pin_form_rect(rsaomb0, rg_m3m4, \"SAOMB\", laygen.layers['pin'][4], size=6, direction='left')\n laygen.pin_from_rect('SAOMB2', laygen.layers['pin'][3], rsaombv0, gridname=rg_m3m4, netname='SAOMB')\n laygen.create_boundary_pin_form_rect(rsb0, rg_m3m4, \"SB\", laygen.layers['pin'][4], size=6, direction='left')\n laygen.create_boundary_pin_form_rect(rrst0, rg_m3m4, \"RST\", laygen.layers['pin'][4], size=6, direction='left')\n #laygen.pin_from_rect('RST2', laygen.layers['pin'][3], rrstv0, gridname=rg_m3m4, netname='RST')\n laygen.pin_from_rect('RST2', laygen.layers['pin'][3], rrstv1, gridname=rg_m3m4, netname='RST')\n laygen.create_boundary_pin_form_rect(rextsel0, rg_m3m4, \"EXTSEL\", laygen.layers['pin'][4], size=6, direction='left')\n laygen.pin_from_rect('EXTSEL2', laygen.layers['pin'][3], rextselv0, gridname=rg_m3m4, netname='EXTSEL')\n laygen.create_boundary_pin_form_rect(rext_zpb0, rg_m3m4, \"EXT_ZPB\", laygen.layers['pin'][4], size=4, direction='left')\n laygen.create_boundary_pin_form_rect(rext_zmb0, rg_m3m4, \"EXT_ZMB\", laygen.layers['pin'][4], size=4, direction='left')\n laygen.create_boundary_pin_form_rect(rext_zmidb0, rg_m3m4, \"EXT_ZMIDB\", laygen.layers['pin'][4], size=4, direction='left')\n laygen.create_boundary_pin_form_rect(rzp0, rg_m3m4, \"ZP\", laygen.layers['pin'][4], size=6, direction='right')\n laygen.create_boundary_pin_form_rect(rzm0, rg_m3m4, \"ZM\", laygen.layers['pin'][4], size=6, direction='right')\n laygen.create_boundary_pin_form_rect(rzmid0, rg_m3m4, \"ZMID\", laygen.layers['pin'][4], size=6, direction='right')\n\n # power pin\n create_power_pin_from_inst(laygen, layer=laygen.layers['pin'][2], gridname=rg_m1m2, inst_left=isaopb0, inst_right=izmid0)\n\nif __name__ == '__main__':\n laygen = laygo.GridLayoutGenerator(config_file=\"laygo_config.yaml\")\n\n import imp\n try:\n imp.find_module('bag')\n laygen.use_phantom = False\n except ImportError:\n laygen.use_phantom = True\n\n tech=laygen.tech\n utemplib = tech+'_microtemplates_dense'\n logictemplib = tech+'_logic_templates'\n laygen.load_template(filename=tech+'_microtemplates_dense_templates.yaml', libname=utemplib)\n laygen.load_grid(filename=tech+'_microtemplates_dense_grids.yaml', libname=utemplib)\n laygen.load_template(filename=logictemplib+'.yaml', libname=logictemplib)\n laygen.templates.sel_library(utemplib)\n laygen.grids.sel_library(utemplib)\n\n # library load or generation\n workinglib = 'adc_sar_generated'\n laygen.add_library(workinglib)\n laygen.sel_library(workinglib)\n if os.path.exists(workinglib + '.yaml'): # generated layout file exists\n laygen.load_template(filename=workinglib + '.yaml', libname=workinglib)\n laygen.templates.sel_library(utemplib)\n\n #grid\n pg = 'placement_basic' #placement grid\n rg_m1m2 = 'route_M1_M2_cmos'\n rg_m1m2_thick = 'route_M1_M2_thick'\n rg_m2m3 = 'route_M2_M3_cmos'\n rg_m2m3_thick = 'route_M2_M3_thick'\n rg_m3m4 = 'route_M3_M4_basic'\n rg_m4m5 = 'route_M4_M5_basic'\n rg_m5m6 = 'route_M5_M6_basic'\n rg_m1m2_pin = 'route_M1_M2_basic'\n rg_m2m3_pin = 'route_M2_M3_basic'\n\n #display\n #laygen.display()\n #laygen.templates.display()\n #laygen.save_template(filename=workinglib+'_templates.yaml', libname=workinglib)\n\n mycell_list = []\n #sarlogic generation\n m=1\n cellname='sarlogic'\n print(cellname+\" generating\")\n mycell_list.append(cellname)\n laygen.add_cell(cellname)\n laygen.sel_cell(cellname)\n generate_sarlogic(laygen, objectname_pfix='SL0', templib_logic=logictemplib,\n placement_grid=pg, routing_grid_m3m4=rg_m3m4, m=m, origin=np.array([0, 0]))\n laygen.add_template_from_cell()\n\n\n laygen.save_template(filename=workinglib+'.yaml', libname=workinglib)\n #bag export, if bag does not exist, gds export\n import imp\n try:\n imp.find_module('bag')\n import bag\n prj = bag.BagProject()\n for mycell in mycell_list:\n laygen.sel_cell(mycell)\n laygen.export_BAG(prj, array_delimiter=['[', ']'])\n except ImportError:\n laygen.export_GDS('output.gds', cellname=mycell_list, layermapfile=tech+\".layermap\") # change layermapfile\n","sub_path":"generators/adc_sar_sarlogic_layout_generator_bak160128.py","file_name":"adc_sar_sarlogic_layout_generator_bak160128.py","file_ext":"py","file_size_in_byte":18414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"642291006","text":"import ipopt\nimport probInfo as prob\nfrom problemData import *\nimport globalVars\n\nclass nlpProb(object):\n\n def __init__(self, N, T, t0, x0, x00_23, ncons, nu, path, obstacle, posIdx,\n ns_option, V_cmd, lb_VTerm, lb_VdotVal, delChi_max, obstacleID, fHandleCost = None):\n try:\n self.N = N\n self.T = T\n self.t0 = t0\n self.x0 = x0\n self.ncons = ncons # number of constraints\n self.ncons_vary = np.copy(ncons)\n self.nu = nu # number of controls\n self.path = path\n self.obstacle = obstacle\n self.posIdx = posIdx\n self.ns_option = ns_option\n self.V_cmd = V_cmd\n self.lb_VTerm = lb_VTerm\n self.lb_VdotVal = lb_VdotVal\n self.fHandleCost = fHandleCost\n self.addObstacleConstraints = False\n self.obstacleNumber = np.array([], dtype=int)\n self.delChi_max = delChi_max\n self.obstacleID = obstacleID\n self.x00_23 = x00_23\n\n useOnlyObstaclesInView = True\n\n if useOnlyObstaclesInView:\n nObstacle = len(obstacle.N)\n if nObstacle > 0:\n for j in range(nObstacle):\n \n p1 = x0[0:2]\n p2 = np.array([obstacle.E[j], obstacle.N[j]])\n distToObstacle = distance(p1, p2)\n \n #print('{0:.1f}, {1:.1f}'.format(distToObstacle, safeDistance))\n \n if distToObstacle < safeDistance:\n self.addObstacleConstraints = True\n self.obstacleNumber = np.concatenate([self.obstacleNumber, np.array([j])])\n self.ncons_vary += N\n\n else:\n nObstacle = len(obstacleID)\n if nObstacle > 0:\n for j in range(nObstacle):\n \n id = obstacleID[j]\n p1 = x0[0:2]\n p2 = np.array([obstacle.E[id], obstacle.N[id]])\n distToObstacle = distance(p1, p2)\n \n # print('{0:.1f}, {1:.1f}'.format(distToObstacle, safeDistance))\n \n if distToObstacle < safeDistance:\n self.addObstacleConstraints = True\n self.obstacleNumber = np.concatenate([self.obstacleNumber, np.array([id]) ])\n self.ncons_vary += N\n\n pass\n except:\n print('Error in init')\n\n def objective(self, u):\n N = self.N\n T = self.T\n t0 = self.t0\n x0 = self.x0\n path = self.path\n obstacle = self.obstacle\n posIdx = self.posIdx\n V_cmd = self.V_cmd\n fHandleCost = self.fHandleCost\n x00_23 = self.x00_23\n\n x = prob.computeOpenloopSolution(u, N, T, t0, x0, x00_23)\n costvec = np.zeros([3*N+2, 1])\n\n for k in range(N):\n uk = np.array([u[k],u[k+N]])\n costout = prob.runningCosts(uk, x[k], t0 + k*T, path, obstacle, posIdx, V_cmd)\n costvec[k] = costout[0] # V\n costvec[k+N] = costout[1] # Vdot or Vddot\n costvec[k+2*N] = costout[2] # Chidot or Chiddot\n\n cost_goalDist, cost_goalDelChi = prob.goalCost(x0, t0)\n #cost_goalDist, cost_goalDelChi = prob.goalCost(x[-1,:], t0)\n costvec[3*N] = cost_goalDist # goal dist\n costvec[3*N+1] = cost_goalDelChi # goal delta chi\n\n cost = np.sum(costvec)\n\n\n # write data once for analysis later using a global variable. other mentioned can be developed to not use the\n # global variable - but this was the least intrusive way of adding the functionality\n if globalVars.writeToFileCost == True:\n for k in range(3*N):\n fHandleCost.write('%.2f ' %(costvec[k]) )\n fHandleCost.write('%.2f ' % (costvec[3*N]))\n fHandleCost.write('%.2f ' % (costvec[3*N+1]))\n fHandleCost.write('\\n')\n globalVars.writeToFileCost = False\n\n return cost\n\n\n def gradient(self, u):\n N = self.N\n nu = self.nu\n\n eps = 1e-2\n obj_grad_u = np.zeros(nu*N)\n for k in range(nu*N):\n uplus = np.copy(u)\n uminus = np.copy(u)\n\n uplus[k] = uplus[k] + eps\n obj_uplus = self.objective(uplus)\n\n uminus[k] = uminus[k] - eps\n obj_uminus = self.objective(uminus)\n\n obj_grad_u[k] = (obj_uplus - obj_uminus) / (2 * eps)\n\n return obj_grad_u\n\n\n def constraints(self, u):\n try:\n N = self.N\n T = self.T\n t0 = self.t0\n x0 = self.x0\n path = self.path\n obstacle = self.obstacle\n posIdx = self.posIdx\n ns_option = self.ns_option\n x00_23 = self.x00_23\n\n x = prob.computeOpenloopSolution(u, N, T, t0, x0, x00_23)\n\n consR1 = np.array([], dtype=float)\n\n if ns == 6:\n\n if ns_option == 1: # Additional Current velocity + Terminal velocity constraint\n\n consR2 = np.array([x[0, idx_V] * x[0, idx_Chidot] * useLatAccelCons]) # lateral acceleration\n consR3 = np.array([x[0, idx_V]]) # current velocity\n\n constmp = np.concatenate([consR1, consR2])\n consR = np.concatenate([constmp, consR3])\n\n # terminal constraint (dy, V, delChi)\n consT1, consT2, consT3 = prob.terminalCons(u, x[N - 1], t0, path, obstacle, posIdx)\n consT = np.concatenate([consT2, consT3])\n\n elif ns_option == 2:\n\n # No terminal velocity constraint\n consR2 = np.array([x[0, idx_V] * x[0, idx_Chidot] * useLatAccelCons]) # lateral acceleration\n\n consR = np.concatenate([consR1, consR2])\n\n # terminal constraint (dy, dV, delChi)\n consT1, consT2, consT3 = prob.terminalCons(u, x[N - 1], t0, path, obstacle, posIdx)\n consT = np.concatenate([consT2, consT3])\n\n elif ns_option == 3:\n\n # No terminal velocity constraint\n consR2 = np.array([x[0, idx_V] * x[0, idx_Chidot] * useLatAccelCons]) # lateral acceleration\n\n consR = np.concatenate([consR1, consR2])\n\n # terminal constraint (dy, V, delChi)\n consT1, consT2, consT3 = prob.terminalCons(u, x[N - 1], t0, path, obstacle, posIdx)\n consT = consT3\n\n elif ns == 4:\n\n if ns_option == 1:\n\n u_mat = u.reshape(2, -1).T\n consR2 = np.array([x[0, idx_V] * u_mat[0, idx_Chidot] * useLatAccelCons]) # lateral acceleration\n consR3 = np.array([x[0, idx_V]]) # current velocity\n\n constmp = np.concatenate([consR1, consR2])\n consR = np.concatenate([constmp, consR3])\n\n # terminal constraint (dy, dV, delChi)\n consT1, consT2, consT3 = prob.terminalCons(u, x[N - 1], t0, path, obstacle, posIdx)\n consT = np.concatenate([consT2, consT3])\n\n elif ns_option == 2:\n\n u_mat = u.reshape(2, -1).T\n consR2 = np.array([x[0, idx_V] * u_mat[0, idx_Chidot] * useLatAccelCons]) # lateral acceleration\n\n consR = np.concatenate([consR1, consR2])\n\n # terminal constraint (dy, V, delChi)\n consT1, consT2, consT3 = prob.terminalCons(u, x[N-1], t0, path, obstacle, posIdx) # ydist, VEnd\n consT = np.concatenate([consT2, consT3])\n\n\n elif ns_option == 3:\n\n u_mat = u.reshape(2,-1).T\n consR2 = np.array([x[0, idx_V] * u_mat[0, idx_Chidot] * useLatAccelCons]) # lateral acceleration\n\n consR = np.concatenate([consR1, consR2])\n\n # terminal constraint (dy, dV, delChi)\n consT1, consT2, consT3 = prob.terminalCons(u, x[N-1], t0, path, obstacle, posIdx) # ydist, VEnd\n consT = consT3\n\n # total constraints without obstacles\n cons = np.concatenate([consR,consT])\n\n # total constraints with obstacles\n if self.addObstacleConstraints == True:\n\n for j in self.obstacleNumber:\n for k in range(N):\n position = x[k][0:2]\n obstacleDistance = np.sqrt([(obstacle.E[j] - position[0]) ** 2 +\n (obstacle.N[j] - position[1]) ** 2])\n cons = np.concatenate([cons, obstacleDistance])\n\n return cons\n except:\n print('Error in constraints')\n\n def jacobian(self, u):\n try:\n N = self.N\n ncons_vary = self.ncons_vary\n nu = self.nu\n jac = np.zeros([ncons_vary,nu*N])\n eps = 1e-2\n\n for j in range(ncons_vary):\n\n for k in range(nu*N):\n uplus = np.copy(u)\n uminus = np.copy(u)\n\n uplus[k] = uplus[k] + eps\n cons_uplus = self.constraints(uplus)\n\n uminus[k] = uminus[k] - eps\n cons_uminus = self.constraints(uminus)\n\n jac[j,k] = (cons_uplus[j] - cons_uminus[j]) / (2 * eps)\n\n return jac.flatten()\n except:\n print('Error in jacobian')\n\n\n def setup(self, u0):\n try:\n N = self.N\n T = self.T\n t0 = self.t0\n x0 = self.x0\n nu = self.nu\n path = self.path\n obstacle = self.obstacle\n posIdx = self.posIdx\n ns_option = self.ns_option\n V_cmd = self.V_cmd\n lb_VTerm = self.lb_VTerm\n lb_VdotVal = self.lb_VdotVal\n fHandleCost = self.fHandleCost\n delChi_max = self.delChi_max\n obstacleID = self.obstacleID\n x00_23 = self.x00_23\n\n LARGE_NO = 1e12\n\n if ns == 6:\n\n lb_Vddot = np.ones([N,1])*lb_VddotVal\n lb_Chiddot = np.ones([N,1])*lb_ChiddotVal\n\n ub_Vddot = np.ones([N,1])*ub_VddotVal\n ub_Chiddot = np.ones([N,1])*ub_ChiddotVal\n\n lb = np.concatenate([lb_Vddot, lb_Chiddot])\n ub = np.concatenate([ub_Vddot,ub_Chiddot])\n\n elif ns == 4:\n\n lb_Vdot = np.ones([N, 1]) * lb_VdotVal\n lb_Chidot = np.ones([N, 1]) * lb_ChidotVal\n\n ub_Vdot = np.ones([N, 1]) * ub_VdotVal\n ub_Chidot = np.ones([N, 1]) * ub_ChidotVal\n\n lb = np.concatenate([lb_Vdot, lb_Chidot])\n ub = np.concatenate([ub_Vdot, ub_Chidot])\n\n\n lataccel_max = lataccel_maxVal\n\n cl_running = np.array([], dtype=float)\n cu_running = np.array([], dtype=float)\n\n cl_tmp1 = np.concatenate([cl_running, [-lataccel_max]])\n cu_tmp1 = np.concatenate([cu_running, [+lataccel_max]])\n\n #u_approx = u0.flatten(1)\n #x = prob.computeOpenloopSolution(u_approx, N, T, t0, x0)\n\n if ns_option == 1:\n\n # Speed Constraint\n cl_tmp2 = np.concatenate([cl_tmp1, [lb_V]])\n cu_tmp2 = np.concatenate([cu_tmp1, [ub_V]])\n\n # Terminal Constraint - V\n tmp = 0\n cl_tmp3 = np.concatenate([cl_tmp2, [tmp]]) # need to modify\n cu_tmp3 = np.concatenate([cu_tmp2, [tmp]])\n\n # Terminal Constraint - delChi\n cl = np.concatenate([cl_tmp3, [-delChi_max]])\n cu = np.concatenate([cu_tmp3, [+delChi_max]])\n\n\n elif ns_option == 2:\n\n cl_tmp2 = cl_tmp1\n cu_tmp2 = cu_tmp1\n\n cl_tmp3 = np.concatenate([cl_tmp2, [lb_VTerm]])\n cu_tmp3 = np.concatenate([cu_tmp2, [ub_VTerm]])\n\n # Terminal Constraint - delChi\n cl = np.concatenate([cl_tmp3, [-delChi_max]])\n cu = np.concatenate([cu_tmp3, [+delChi_max]])\n\n elif ns_option == 3:\n\n cl = np.concatenate([cl_tmp1, [-delChi_max]])\n cu = np.concatenate([cu_tmp1, [+delChi_max]])\n\n # total constraints with obstacles\n\n if self.addObstacleConstraints == True:\n\n #print(self.obstacleNumber)\n for j in self.obstacleNumber:\n for k in range(N):\n cl = np.concatenate([cl, [obstacle.sr[j]]])\n cu = np.concatenate([cu, [LARGE_NO]])\n\n nlp = ipopt.problem(\n n=nu*N,\n m=len(cl),\n problem_obj=nlpProb(N, T, t0, x0, x00_23, ncons, nu, path,\n obstacle, posIdx, ns_option, V_cmd,\n lb_VTerm, lb_VdotVal, delChi_max, obstacleID, fHandleCost),\n lb=lb,\n ub=ub,\n cl=cl,\n cu=cu\n )\n #print(len(cl))\n nlp.addOption('print_level', nlpPrintLevel)\n nlp.addOption('max_iter', nlpMaxIter)\n #nlp.addOption('dual_inf_tol',10.0) # defaut = 1\n nlp.addOption('constr_viol_tol',1e-4) # default = 1e-4\n nlp.addOption('compl_inf_tol',1e-4) # default = 1e-4\n nlp.addOption('acceptable_tol',1e-6) # default = 1e-6\n nlp.addOption('acceptable_constr_viol_tol',0.01) # default = 0.01\n\n return nlp\n except:\n print('Error in setup')","sub_path":"nlp.py","file_name":"nlp.py","file_ext":"py","file_size_in_byte":13932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"185026037","text":"from django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login\nfrom .forms import LoginForm, UserRegistrationForm\nfrom .models import Profile\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\n\ndef user_login(request):\n if request.method == 'POST':\n form = LoginForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n user = authenticate(username=cd['username'],\n password=cd['password'])\n\n if user is not None:\n if user.is_active:\n login(request, user)\n try:\n redirect_url = request.get_full_path().split('?next=')[1]\n except:\n redirect_url=\"/\"\n # print(request.build_absolute_uri(redirect_url))\n return redirect(redirect_url)\n # return HttpResponse('Authenticated successfully')\n else:\n return HttpResponse('Disabled account')\n else:\n return HttpResponse('Invalid login')\n else:\n form = LoginForm()\n return render(request, 'account/login.html', {'form': form})\n\n\ndef register(request):\n if request.method == 'POST':\n user_form = UserRegistrationForm(request.POST)\n if user_form.is_valid():\n # Create a new user object but avoid saving it yet\n new_user = user_form.save(commit=False)\n # Set the chosen password\n new_user.set_password(user_form.cleaned_data['password'])\n # Save the User object\n new_user.save()\n # Create the user profile\n profile = Profile.objects.create(user=new_user)\n return render(request,\n 'account/register_done.html',\n {'new_user': new_user})\n else:\n user_form = UserRegistrationForm()\n return render(request,\n 'account/register.html',\n {'user_form': user_form})\n\n\n\n\nfrom .forms import UserEditForm, ProfileEditForm\n\n@login_required(login_url=\"/account/login/\")\ndef edit(request):\n if request.method == 'POST':\n user_form = UserEditForm(data=request.POST)\n profile_form = ProfileEditForm(data=request.POST,\n files=request.FILES)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n messages.success(request, 'Profile updated successfully')\n else:\n messages.error(request, 'Profile updated Error')\n else:\n user_form = UserEditForm()\n profile_form = ProfileEditForm()\n return render(request,\n 'account/edit.html',\n {'user_form': user_form,\n 'profile_form': profile_form})\n","sub_path":"account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"140354800","text":"\"\"\"\nThe Voyager 1 spacecraft, launched September 15, 1977, is the farthest-traveling Earthmade\nobject. It is presently on the outer edges of our solar system. The NASA update\npage on September 25, 2009, reported it as being a distance of approximately\n16,637,000,000 miles from the sun, traveling away from the sun at 38,241 miles/hour.\nWrite a program that will prompt the user for an integer number that indicates the\nnumber of days after 9/25/09. You will calculate the distance of Voyager from the sun\nusing the numbers from 9/25/09 (assume that velocity is constant) plus the entered\nnumber of days, and report:\n\u0002 Distance in miles\n\u0002 Distance in kilometers (1.609344 kilometers/mile)\nDistance in astronomical units (AU, 92,955,887.6 miles/AU)\n\u0002 Round-trip time for radio communication in hours. Radio waves travel at the speed\nof light, listed at 299,792,458 meters/second.\nhttp://voyager.jpl.nasa.gov/where/index.html\n\"\"\"\nfrom datetime import datetime\n\ndays_launch_now = (datetime.now() - datetime(1977, 9, 15)).days\ndays_launch_09 = (datetime(2009, 9, 25) - datetime(1977, 9, 15)).days\ndays_09_15 = (datetime.now() - datetime(2009, 9, 25)).days\ndistance_09 = 16637000000\ndistance_now = round(distance_09 / days_launch_09 * days_launch_now)\nprint(distance_now)\ndistance_km = round(distance_now * 1.609344)\nprint(distance_km)\ndistance_AU = round(distance_now / 92955887.6)\nprint(distance_AU)\n\nmiles_per_day = 38241 * 24\nnew_distance = days_launch_now * miles_per_day\nprint(round(new_distance))\nnew_km = 1.609344 * new_distance\nprint(new_km)\n\nmeter_hour = 299792458 * 3600\ntime_return = 1000 * new_km * 2 / meter_hour\n\nprint(time_return)","sub_path":"chapter 1/where_is_voyager.py","file_name":"where_is_voyager.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"298087694","text":"import tensorflow as tf\nimport pandas as pd\nimport numpy as np\nfrom read_datas import *\n\ndef deepnn(x):\n \"\"\"deepnn builds the graph for a deep net for classifying digits.\n\n Args:\n x: an input tensor with the dimensions (N_examples, 784), where 784 is the\n number of pixels in a standard MNIST image.\n\n Returns:\n A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values\n equal to the logits of classifying the digit into one of 10 classes (the\n digits 0-9). keep_prob is a scalar placeholder for the probability of\n dropout.\n \"\"\"\n # Reshape to use within a convolutional neural net.\n # Last dimension is for \"features\" - there is only one here, since images are\n # grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.\n with tf.name_scope('reshape'):\n x_image = tf.reshape(x, [-1, 6, 9, 1])\n\n # First convolutional layer - maps one grayscale image to 32 feature maps.\n with tf.name_scope('conv1'):\n W_conv1 = weight_variable([5, 5, 1, 32])\n b_conv1 = bias_variable([32])\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n\n # Pooling layer - downsamples by 2X.\n with tf.name_scope('pool1'):\n h_pool1 = max_pool_2x2(h_conv1)\n\n # Second convolutional layer -- maps 32 feature maps to 64.\n with tf.name_scope('conv2'):\n W_conv2 = weight_variable([5, 5, 32, 64])\n b_conv2 = bias_variable([64])\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n\n # Second pooling layer.\n with tf.name_scope('pool2'):\n h_pool2 = max_pool_2x2(h_conv2)\n\n # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image\n # is down to 7x7x64 feature maps -- maps this to 1024 features.\n with tf.name_scope('fc1'):\n W_fc1 = weight_variable([2 * 3 * 64, 1024])\n b_fc1 = bias_variable([1024])\n\n h_pool2_flat = tf.reshape(h_pool2, [-1, 2*3*64])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n # Dropout - controls the complexity of the model, prevents co-adaptation of\n # features.\n with tf.name_scope('dropout'):\n keep_prob = tf.placeholder(tf.float32)\n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n # Map the 1024 features to 10 classes, one for each digit\n with tf.name_scope('fc2'):\n W_fc2 = weight_variable([1024, 2])\n b_fc2 = bias_variable([2])\n\n y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n return y_conv, keep_prob\n\n\ndef conv2d(x, W):\n \"\"\"conv2d returns a 2d convolution layer with full stride.\"\"\"\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n\ndef max_pool_2x2(x):\n \"\"\"max_pool_2x2 downsamples a feature map by 2X.\"\"\"\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')\n\n\ndef weight_variable(shape):\n \"\"\"weight_variable generates a weight variable of a given shape.\"\"\"\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n\ndef bias_variable(shape):\n \"\"\"bias_variable generates a bias variable of a given shape.\"\"\"\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n\ndef main(_):\n # Import data\n df = pd.read_csv(\"data/train.csv\")\n\n # Create the model\n x = tf.placeholder(tf.float32, [None, 54])\n\n # Define loss and optimizer\n y_ = tf.placeholder(tf.float32, [None, 2])\n\n # Build the graph for the deep net\n y, keep_prob = deepnn(x)\n\n cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\n # cross_entropy = tf.reduce_mean(\n # tf.nn.sigmoid_cross_entropy_with_logits(labels=y_, logits=y))\n\n # train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n\n sess = tf.InteractiveSession()\n tf.global_variables_initializer().run()\n\n # Feature extraction\n feature_M, labels_M = preproceesing(df)\n # print feature_M[0:2], labels_M[0:2]\n\n step_size = 2000\n batch_size = 100\n\n # Train\n for i in range(step_size):\n batch_xs, batch_ys = feature_M[i*batch_size:(i+1)*batch_size], labels_M[i*batch_size:(i+1)*batch_size]\n sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.5})\n\n # Test trained model\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n acry_rate_t = 0.0\n for j in range((len(labels_M)-step_size*batch_size)//batch_size):\n i += 1\n acry_rate = sess.run(accuracy, feed_dict={x: feature_M[i*batch_size:(i+1)*batch_size],\n y_: labels_M[i*batch_size:(i+1)*batch_size],\n keep_prob: 0.5})\n acry_rate_t += acry_rate\n print(acry_rate)\n print(\"-- Accuracy in total:\",acry_rate_t/j)\n print('test accuracy %g' % accuracy.eval(feed_dict={\n x: feature_M[step_size*batch_size+1:], y_: labels_M[step_size*batch_size+1:], keep_prob: 1.0}))\n\n\nif __name__ == '__main__':\n tf.app.run(main=main)\n","sub_path":"training_model_deep.py","file_name":"training_model_deep.py","file_ext":"py","file_size_in_byte":5190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"353506293","text":"# main file for whole database program\n# it's a controller for whole thing - royce wilson\n\n# imports!\nfrom datetime import datetime\nimport time\nfrom flask import Flask, request, session, g, redirect, url_for, \\\n abort, render_template, flash, jsonify, Response, send_from_directory\nfrom werkzeug.routing import BaseConverter\nfrom db.db_init import connect_db, db_resource\nfrom xbeecom import xbeecom\nfrom models.sample import Samples\nimport sys\nimport os\nimport itertools\nimport operator\nimport logging\n\n\nlogging.basicConfig(format=\"[%(asctime)s %(name)s %(levelname)s] %(message)s\",\n filename=\"log.log\",\n level=logging.DEBUG)\n\n\n# creation!\napp = Flask(__name__)\napp.config.from_object('db.dbconfig')\n\n\nclass ListConverter(BaseConverter):\n def to_python(self, value):\n return value.split(\"+\")\n\n def to_url(self, values):\n return \"+\".join(map(BaseConverter.to_url, values))\napp.url_map.converters[\"list\"] = ListConverter\n\n\niso_format_string = \"%Y-%m-%dT%H:%M:%S\"\n@app.context_processor\ndef utility_processor():\n def format_date(time):\n return datetime.utcfromtimestamp(time).strftime(\"%Y-%m-%d %H:%M:%S\")\n return dict(format_date=format_date)\n\n\n@app.before_request\ndef before_request():\n g.db = connect_db(app)\n\n\n@app.teardown_request\ndef teardown_request(excetion):\n db = getattr(g, 'db', None)\n if db is not None:\n db.close()\n\n\n#@app.route('/')\ndef dashboard():\n recent_samples = Samples.get_status(g.db)\n recent_samples = [dict(zip(recent_samples[\"fields\"], sample))\n for sample in recent_samples[\"results\"]]\n for node in recent_samples:\n node[\"dt\"] = datetime.strptime(node[\"time\"], iso_format_string)\n node[\"delta\"] = datetime.now() - node[\"dt\"]\n return render_template('dashboard.html', nodes=recent_samples)\n\n\n@app.route('/')\ndef graph():\n recent_samples = Samples.get_status(g.db)\n recent_samples = [dict(zip(recent_samples[\"fields\"], sample))\n for sample in recent_samples[\"results\"]]\n return render_template('graph2.html', nodes=range(1,8))\n\n\n@app.route('/download', methods=['GET', 'POST'])\ndef download():\n if request.method == 'POST':\n try:\n nodes = [int(n.partition('-')[-1])\n for n in request.form.keys()\n if n.startswith('node-')]\n nodes = set(nodes) & set(range(1, 8))\n except ValueError:\n pass\n\n # strip field- off of post parameters\n fields = [f.partition('-')[-1]\n for f in request.form.keys()\n if f.startswith('field-')]\n # only include actual field names\n fields = set(fields) & set(Samples._fields)\n\n start_date = int(time.mktime(datetime.strptime(request.form['start'], '%m/%d/%Y').timetuple()))\n end_date = int(time.mktime(datetime.strptime(request.form['end'], '%m/%d/%Y').timetuple()))\n end_date += 60*60*24\n\n result = Samples.get_samples(g.db, start_date, end_date, nodes,\n [f for f in Samples._fields if f in fields])\n csv = ','.join(result['fields']) + '\\n'\n csv += '\\n'.join(','.join(map(str, row)) for row in result['results'])\n return Response(\n csv,\n mimetype=\"text/csv\",\n headers={\"Content-disposition\":\n \"attachment; filename=data.csv\"})\n else:\n return render_template('download.html', nodes=range(1,8), fields=Samples.descriptions)\n\n@app.route('/data/status')\ndef node_status():\n return jsonify(Samples.get_status(g.db))\n\n\n@app.route(\"/data\")\ndef get_samples():\n try:\n start_date = request.args.get(\"start\")\n if start_date is not None:\n start_date = int(start_date) / 1000\n\n end_date = request.args.get(\"end\")\n if end_date is not None:\n end_date = int(end_date) / 1000\n\n nodes = request.args.get(\"nodes\")\n if nodes is not None:\n nodes = tuple(set(map(int, (n for n in nodes.split(\"+\")))))\n\n y_axis = request.args.get(\"field_y\")\n x_axis = request.args.get(\"field_x\")\n fields = set((y_axis, x_axis)) & set(Samples._fields)\n fields |= set((\"n_id\",))\n if len(fields) != 3:\n return jsonify({\"error\": \"Invalid axes.\"})\n except ValueError as e:\n return jsonify({\"error\": \"Error parsing fields: {0}\".format(e)})\n\n samples = Samples.get_samples(g.db, start_date, end_date,\n nodes, fields, exact_time=False)\n node_id_offset = samples[\"fields\"].index(\"n_id\")\n groups = itertools.groupby(samples[\"results\"],\n operator.itemgetter(node_id_offset))\n y_axis_offset = samples[\"fields\"].index(y_axis)\n chart_response = {\n \"start\": start_date*1000,\n \"end\": end_date*1000,\n \"series\": {\n k: [(s.time*1000, s[y_axis_offset]) for s in g]\n for k, g in groups\n }\n }\n\n return jsonify(chart_response)\n\n@app.route(\"/database\")\ndef get_database():\n return send_from_directory(os.path.dirname(__file__), \"database.db\")\n\n\n# -----------------------------------------------\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 1 and sys.argv[1] == \"demo\":\n logging.info(\"starting in demo mode\")\n app.run(debug=True, host='0.0.0.0')\n else:\n with xbeecom(db_resource(app)) as xbee:\n app.run(debug=False, host='0.0.0.0')\n","sub_path":"flaskr.py","file_name":"flaskr.py","file_ext":"py","file_size_in_byte":5491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"325084336","text":"import numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\nimport networkx as nx\nfrom vis_py import *\nfrom itertools import chain\nimport matplotlib.pyplot as plt\nimport copy\n\n\n'''df = np.genfromtxt('./data/italy.csv', delimiter=',')\ndf = df.reshape(len(df), 1)\ntime = np.arange(len(df), dtype=int).reshape(len(df), 1)\ndf = np.concatenate([time, df], axis=1)\n\n\n\nfor t in range(100, 1000, 100):\n df_temp = df[t - 100:t, :]\n W = visibility_graph(df_temp, directed=False)\n # W = np.heaviside(W, 0).astype(int)\n\n import matplotlib.pylab as plt\n import scipy.sparse as sps\n\n M = sps.csr_matrix(W)\n plt.spy(M)\n plt.show()\n\n plt.plot(df_temp[:, 0], df_temp[:, 1] / max(df_temp[:, 1]))\n plt.plot(df_temp[:, 0], np.sum(W, axis=1) / max(np.sum(W, axis=1)))\n plt.show()\n\n G_weight = nx.convert_matrix.from_numpy_matrix(W, create_using=nx.Graph)\n W = np.heaviside(W, 0).astype(int)\n G_unweight = nx.convert_matrix.from_numpy_matrix(W, create_using=nx.Graph)\n\n nx.draw(G_unweight, node_size=1)\n plt.draw()\n plt.show()'''\n\n# Placeholders to feed train and test data into the graph.\n# Since batch dimension is 'None', we can reuse them both for train and eval.\nclass get_data():\n def __init__(self):\n df = np.genfromtxt('./data/italy.csv', delimiter=',')\n df = df.reshape(len(df), 1)\n self.T = len(df)\n time = np.arange(len(df), dtype=int).reshape(self.T, 1)\n\n df = np.concatenate([time, df], axis=1)\n feature_matrix = np.zeros(shape=(self.T - 100, 17))\n label_vector = np.zeros(shape=(self.T - 100, 1))\n #get visibility graphs\n for t in range(100, self.T, 1):\n print(t, df[t, 1])\n df_temp = df[t - 100:t, :]\n W = visibility_graph(df_temp, directed=False)\n G = nx.convert_matrix.from_numpy_matrix(W, create_using=nx.Graph)\n\n avg_degree, avg_strength, std_strength, avg_clustering, diameter, degree_correlation, eigen_max, degrees_to_print = get_measures(G, W)\n last_prices = df[(t - 5):t, 1]\n to_add = np.concatenate([[avg_degree, avg_strength, std_strength, avg_clustering, diameter, degree_correlation, eigen_max], degrees_to_print, last_prices])\n feature_matrix[t - 100, :] += to_add\n label_vector[t - 100, :] += df[t, 1]\n\n self.feature_matrix = feature_matrix\n self.label_vector = label_vector\n self.count = 0\n\n\n def __call__(self):\n\n to_return = self.feature_matrix[ self.count : (self.count+batch_size), :], self.label_vector[ self.count : (self.count+batch_size), :]\n self.count += 1\n return to_return\n\n\n def all(self):\n to_return = self.feature_matrix, self.label_vector\n return to_return\n\n\n\n\n\n '''TFdataset = tf.data.Dataset.from_tensor_slices((feature_matrix, label_vector))\n\n dataset = TFdataset.repeat().batch(batch_size)\n\n iter = dataset.make_one_shot_iterator()\n return iter'''\n\n\ndef get_placeholders():\n x = tf.placeholder(tf.float32, [None, 17])\n y_ = tf.placeholder(tf.float32, [None, 1])\n return x, y_\n\n# Store results of runs with different configurations in a list.\n# Use a tuple (num_epochs, learning_rate) as keys, and a tuple (training_accuracy, testing_accuracy)\nexperiments_task1 = []\nsettings = [(5, 0.0001)]#, (5, 0.005), (15, 0.1)]\nlog_period_samples = 100\nbatch_size = 50\n\nprint('Training Model')\n# Train Model 1 with the different hyper-parameter settings.\nfor (num_epochs, learning_rate) in settings:\n\n # Reset graph, recreate placeholders and dataset.\n tf.reset_default_graph()\n x, y_ = get_placeholders()\n mnist = get_data()\n\n #####################################################\n # Define model, loss, update and evaluation metric. #\n\n # initialise weight matrix and bias for fully connected linear layer\n W_0 = tf.get_variable(\"w0\", dtype=tf.float32, shape=[17, 10], initializer=tf.contrib.layers.xavier_initializer())\n b_0 = tf.get_variable(\"b0\", dtype=tf.float32, shape=[1, 10], initializer=tf.contrib.layers.xavier_initializer())\n\n y_0 = tf.matmul(x, W_0) + b_0\n\n #relu_0 = tf.nn.relu(y_0)\n\n # initialise weight matrix and bias for fully connected linear layer\n W_1 = tf.get_variable(\"w1\", dtype=tf.float32, shape=[10, 5], initializer=tf.contrib.layers.xavier_initializer())\n b_1 = tf.get_variable(\"b1\", dtype=tf.float32, shape=[1, 5], initializer=tf.contrib.layers.xavier_initializer())\n\n y_1 = tf.matmul(y_0, W_1) + b_1\n\n relu_1 = tf.nn.relu(y_1)\n\n # initialise weight matrix and bias for fully connected linear layer\n W_2 = tf.get_variable(\"w2\", dtype=tf.float32, shape=[5, 1], initializer=tf.contrib.layers.xavier_initializer())\n b_2 = tf.get_variable(\"b2\", dtype=tf.float32, shape=[1, 1], initializer=tf.contrib.layers.xavier_initializer())\n\n y_2 = tf.matmul(relu_1, W_2) + b_2\n\n relu_2 = tf.nn.relu(y_2)\n\n\n #model_softmax = tf.nn.softmax(y_0) # apply softmax to the linear layer\n\n loss = tf.losses.mean_squared_error(relu_2, y_) # compute cross-entropy loss on the linear output (softmax applied internally)\n\n\n\n update = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss) # optimise to minimise loss via gradient descent\n\n #correct_preds = tf.equal(tf.argmax(model_softmax, 1), tf.argmax(y_, 1))\n get_accuracy = tf.metrics.mean_squared_error(relu_2, y_)\n\n get_prediction = [relu_2, y_]\n #get_accuracy = tf.reduce_mean(tf.cast(correct_preds, tf.float32)) # calculate the accuracy score\n\n #####################################################\n\n # Train.\n i, train_accuracy, test_accuracy = 0, [], []\n log_period_updates = int(log_period_samples / batch_size)\n with tf.train.MonitoredSession() as sess:\n while i < 1600:\n\n # Update.\n i += 1\n batch_xs, batch_ys = mnist()\n\n #################\n # Training step #\n sess.run(update, feed_dict={x: batch_xs, y_: batch_ys}) # run the gradient descent update\n\n #################\n\n # Periodically evaluate.\n if i % log_period_updates == 0:\n #####################################\n # Compute and store train accuracy. #\n\n temp_accuracy = sess.run(get_accuracy,\n feed_dict={x: batch_xs, y_: batch_ys}) # get the accuracy\n\n train_accuracy.append(temp_accuracy[1]) # append it to the list\n\n\n\n batch_xs, batch_ys = mnist.all()\n\n predictions = sess.run(get_prediction,\n feed_dict={x: batch_xs, y_: batch_ys})\n\n\n\n #####################################\n #experiments_task1.append(train_accuracy)\n #experiments_task1.append(\n #((num_epochs, learning_rate), train_accuracy, test_accuracy))\n'''train_plot = []\nfor i in train_accuracy:\n train_plot.append(i[1])\n\ntest_plot = []\nfor i in test_accuracy:\n test_plot.append(i[1])'''\n\nplt.plot(np.arange(len(train_accuracy)), train_accuracy, label = 'train')\nplt.legend()\n\nplt.show()\n\n'''pred_0 = []\npred_1 = []\nfor i in predictions[0]:\n pred_0.append(i)\nfor j in predictions[1]:\n pred_1.append(j)\nplt.plot(np.arange(len(pred_0)), pred_0, label = 'pred')\nplt.plot(np.arange(len(pred_1)), pred_1, label = 'label')\nplt.legend()\nplt.show()'''\nplt.plot(np.arange(len(predictions[0])), predictions[0], label = 'pred')\nplt.plot(np.arange(len(predictions[1])), predictions[1], label = 'label')\nplt.grid()\nplt.legend()\nplt.show()\n\n\nplt.plot(np.arange(len(predictions[0])), predictions[0], label = 'pred')\nplt.plot(np.arange(len(predictions[1])), predictions[1], label = 'label')\nplt.grid()\nplt.legend()\n\nplt.savefig('price_chart.eps', format='eps', dpi=1000)\n","sub_path":"ML_keep.py","file_name":"ML_keep.py","file_ext":"py","file_size_in_byte":7791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"356974814","text":"#!/usr/bin/env python\n# Copyright (C) 2010 McAfee, Inc. All rights reserved.\n# TestcaseID: lsh-1882\n# TestcaseDescription: Testcase to verify the installed version of VirusScan Enterprise for Linux\n\nimport sys\nimport logging\nimport re\n# Add common folder into the sys path for module importing\nsys.path.append(\"./Common\")\nsys.path.append(\"..\")\n#import commonFns\n#import commonOASFns\n#import commonAntiMalwareFns\nimport subprocess\n# Import CommonTest module into current namespace\nfrom CommonTest import *\n\n# Get testcase name\ntestcaseName = sys.argv[0][:-3]\n\nclass TestCase(BaseTest):\n def __init__(self):\n logging.info(\"Testcase ID : LSH-1882\")\n logging.info(\"Description : Testcase to verify the installed version of VirusScan Enterprise for Linux\")\n self.expected = '1.7.0'\n def init(self):\n logging.info(\"Initializing testcase %s\" % testcaseName)\n # Call the common initialization check\n _retval = BaseTest.init(self)\n if _retval != 0 :\n return _retval\n return 0\n\n def execute(self):\n logging.info(\"Executing testcase %s\" % testcaseName)\n try :\n _info = commonAntiMalwareFns.getProductInfo()\n if not _info :\n logging.error(\"Failed to retrieve product info\")\n return 1\n self._actual = _info['version']\n except :\n logging.error(\"Exception occured while running the task\")\n return 1\n return 0\n def verify(self):\n \n if self.expected == self._actual :\n logging.info(\"The installed product version matched with the exisiting version\")\n return 0\n else :\n logging.error(\"The installed build version(%s) does not match with the available built version(%s)\" %(self.line, self.productVersion))\n return 1\n return 0\n def cleanup(self):\n logging.info(\"Performing cleanup for testcase %s\" % testcaseName)\n \n # Copy logs and clean them.\n commonFns.cleanLogs()\n return 0\n\n def __del__(self):\n pass\n\nif __name__ == \"__main__\":\n # Setup testcase\n setupTestcase(sys.argv)\n\n testObj = TestCase()\n\n # Perform testcase operations\n retVal = testObj.init()\n\n # Perform execute once initialization succeeds... \n if(retVal == 0):\n retVal = testObj.execute()\n \n # Once execution succeeds, perform verification...\n if(retVal == 0):\n retVal = testObj.verify()\n\n # Perform testcase cleanup\n retVal = retVal + testObj.cleanup()\n\n if(retVal == 0):\n resultString = \"PASS\"\n else:\n resultString = \"FAIL\"\n \n logging.info(\"Result of testcase %s: %s\" % (testcaseName, resultString) )\n sys.exit(retVal)\n","sub_path":"McAfee/src/VSEL - TestAutomation/Testcases/Common/VselProductCheck.py","file_name":"VselProductCheck.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"92062747","text":"#Oppgave 2.1\ni=1\ntallene = []\nwhile i !=0: #Naar bruker legg inn 0 lokken stopper\n\ti=int(input(\"legg inn:\\n\"))\n\ttallene.append(i)\n\tif i==0:\n\t\tprint(\"Funnet\")\n\n#Skriver ut taller i listen\nfor tall in tallene:\n\tprint(tall)\n\n#Skriver ut summen av tallene i listen\nminSum=0\nfor tall in tallene:\n\tminSum+=tall\nprint (\"Summen er: \"+str(minSum))\n\n#Skriver ut minstetallen i listen\nminstetall=tallene[0]\nfor tall in tallene:\n\tif tallstorstetall:\n\t\tstorstetall=tall\n\t\t\n#Printer minstetallen og storstetallen\nprint(\"Mistetallet er: \"+str(minstetall))\nprint(\"Størstetallet er: \"+str(storstetall))","sub_path":"IN1000 v17 Python/OBLIG4/regnelokke.py","file_name":"regnelokke.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"276679291","text":"# Copyright The IETF Trust 2019-2020, All Rights Reserved\n# -*- coding: utf-8 -*-\n\n\nfrom ietf.review.factories import ReviewAssignmentFactory, ReviewRequestFactory\nfrom ietf.utils.test_utils import TestCase, reload_db_objects\nfrom .mailarch import hash_list_message_id\n\nclass HashTest(TestCase):\n\n def test_hash_list_message_id(self):\n for list, msgid, hash in (\n ('ietf', '156182196167.12901.11966487185176024571@ietfa.amsl.com', 'lr6RtZ4TiVMZn1fZbykhkXeKhEk'),\n ('codesprints', 'E1hNffl-0004RM-Dh@zinfandel.tools.ietf.org', 'N1nFHHUXiFWYtdzBgjtqzzILFHI'),\n ('xml2rfc', '3A0F4CD6-451F-44E2-9DA4-28235C638588@rfc-editor.org', 'g6DN4SxJGDrlSuKsubwb6rRSePU'),\n (u'ietf', u'156182196167.12901.11966487185176024571@ietfa.amsl.com','lr6RtZ4TiVMZn1fZbykhkXeKhEk'),\n (u'codesprints', u'E1hNffl-0004RM-Dh@zinfandel.tools.ietf.org', 'N1nFHHUXiFWYtdzBgjtqzzILFHI'),\n (u'xml2rfc', u'3A0F4CD6-451F-44E2-9DA4-28235C638588@rfc-editor.org','g6DN4SxJGDrlSuKsubwb6rRSePU'),\n (b'ietf', b'156182196167.12901.11966487185176024571@ietfa.amsl.com','lr6RtZ4TiVMZn1fZbykhkXeKhEk'),\n (b'codesprints', b'E1hNffl-0004RM-Dh@zinfandel.tools.ietf.org', 'N1nFHHUXiFWYtdzBgjtqzzILFHI'),\n (b'xml2rfc', b'3A0F4CD6-451F-44E2-9DA4-28235C638588@rfc-editor.org','g6DN4SxJGDrlSuKsubwb6rRSePU'),\n ):\n self.assertEqual(hash, hash_list_message_id(list, msgid))\n \n\nclass ReviewAssignmentTest(TestCase):\n def do_test_update_review_req_status(self, assignment_state, expected_state):\n review_req = ReviewRequestFactory(state_id='assigned')\n ReviewAssignmentFactory(review_request=review_req, state_id='part-completed')\n assignment = ReviewAssignmentFactory(review_request=review_req)\n\n assignment.state_id = assignment_state\n assignment.save()\n review_req = reload_db_objects(review_req)\n self.assertEqual(review_req.state_id, expected_state)\n\n def test_update_review_req_status(self):\n # Test change\n for assignment_state in ['no-response', 'rejected', 'withdrawn', 'overtaken']:\n self.do_test_update_review_req_status(assignment_state, 'requested')\n # Test no-change\n for assignment_state in ['accepted', 'assigned', 'completed', 'part-completed', 'unknown', ]:\n self.do_test_update_review_req_status(assignment_state, 'assigned')\n\n def test_no_update_review_req_status_when_other_active_assignment(self):\n # If there is another still active assignment, do not update review_req state\n review_req = ReviewRequestFactory(state_id='assigned')\n ReviewAssignmentFactory(review_request=review_req, state_id='assigned')\n assignment = ReviewAssignmentFactory(review_request=review_req)\n\n assignment.state_id = 'no-response'\n assignment.save()\n review_req = reload_db_objects(review_req)\n self.assertEqual(review_req.state_id, 'assigned')\n\n def test_no_update_review_req_status_when_review_req_withdrawn(self):\n # review_req state must only be changed to \"requested\", if old state was \"assigned\",\n # to prevent reviving dead review requests\n review_req = ReviewRequestFactory(state_id='withdrawn')\n assignment = ReviewAssignmentFactory(review_request=review_req)\n\n assignment.state_id = 'no-response'\n assignment.save()\n review_req = reload_db_objects(review_req)\n self.assertEqual(review_req.state_id, 'withdrawn')\n","sub_path":"ietf/review/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"323573268","text":"import os\nimport sys\nimport json\nimport base64\nimport zipfile\nfrom imageio import imread, imwrite\n\nwith open(sys.argv[1]) as data_file:\n data = json.load(data_file)\n\nif not os.path.exists(sys.argv[1].split('.')[0] + \".json\"):\n cnt = 0\n check = []\n tmp = None\n d = {}\n for x in data[\"log\"][\"entries\"]:\n found = False\n if \"d2vs6ffylckc3p.cloudfront.net/manga\" in x[\"request\"][\"url\"]:\n found = True\n if not found:\n continue\n else:\n if \"content\" not in x[\"response\"]:\n continue\n elif x[\"response\"][\"content\"][\"mimeType\"] != \"image/jpeg\" or x[\"response\"][\"content\"][\"encoding\"] != \"base64\":\n continue\n else:\n if x[\"response\"][\"content\"][\"text\"] not in check:\n check.append(x[\"response\"][\"content\"][\"text\"])\n d[cnt] = x[\"response\"][\"content\"][\"text\"]\n cnt += 1\n\n json.dump(d, open(sys.argv[1].split('.')[0] + \".json\", \"w\"), indent=4, sort_keys=True)\n print(sys.argv[1].split('.')[0] + \".json\" + \" done.\")\n","sub_path":"HPJ/reduce.py","file_name":"reduce.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"403469171","text":"import typing\n\nimport pycspr.serialisation.byte_array.encoder.cl_complex as complex_encoder\nimport pycspr.serialisation.byte_array.encoder.cl_primitive as primitives_encoder\nimport pycspr.serialisation.byte_array.encoder.cl_type as type_encoder\nfrom pycspr.types import CLTypeKey\nfrom pycspr.types import CLValue\n\n\n\n# Map: CL type <-> encoder.\nENCODERS = {\n CLTypeKey.ANY: complex_encoder.encode_any,\n CLTypeKey.BOOL: primitives_encoder.encode_bool,\n CLTypeKey.BYTE_ARRAY: primitives_encoder.encode_byte_array,\n CLTypeKey.I32: primitives_encoder.encode_i32,\n CLTypeKey.I64: primitives_encoder.encode_i64,\n CLTypeKey.KEY: complex_encoder.encode_storage_key,\n CLTypeKey.LIST: complex_encoder.encode_list, \n CLTypeKey.MAP: complex_encoder.encode_map, \n CLTypeKey.OPTION: complex_encoder.encode_option, \n CLTypeKey.PUBLIC_KEY: complex_encoder.encode_public_key,\n CLTypeKey.STRING: primitives_encoder.encode_string,\n CLTypeKey.TUPLE_1: complex_encoder.encode_tuple1,\n CLTypeKey.TUPLE_2: complex_encoder.encode_tuple2,\n CLTypeKey.TUPLE_3: complex_encoder.encode_tuple3,\n CLTypeKey.U8: primitives_encoder.encode_u8,\n CLTypeKey.U32: primitives_encoder.encode_u32,\n CLTypeKey.U64: primitives_encoder.encode_u64,\n CLTypeKey.U128: primitives_encoder.encode_u128, \n CLTypeKey.U256: primitives_encoder.encode_u256,\n CLTypeKey.U512: primitives_encoder.encode_u512,\n CLTypeKey.UNIT: primitives_encoder.encode_unit,\n CLTypeKey.RESULT: complex_encoder.encode_result,\n CLTypeKey.UREF: complex_encoder.encode_uref,\n}\n\n\ndef encode(value: CLValue) -> bytes:\n \"\"\"Encodes a CL value as an array of bytes.\n\n :param value: A CL value that encapsulates both the associated CL type & it's pythonic value representation.\n :returns: A byte array representation conformant to CL serialisation protocol.\n \n \"\"\"\n encoder = ENCODERS[value.cl_type.typeof]\n if value.cl_type.typeof in {CLTypeKey.LIST, CLTypeKey.OPTION}:\n return encoder(value.parsed, ENCODERS[value.cl_type.inner_type.typeof])\n else:\n return encoder(value.parsed)\n\n\ndef encode_cl_value(entity: CLValue) -> bytes:\n \"\"\"Encodes a CL value.\n \n \"\"\"\n return primitives_encoder.encode_u8_array(encode(entity)) + \\\n type_encoder.encode_cl_type(entity.cl_type)\n","sub_path":"pycspr/serialisation/byte_array/encoder/cl_value.py","file_name":"cl_value.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"575173597","text":"# standard libraries\nimport pygame\nimport sys\n\n# game files\nfrom bfrl import constants\nfrom bfrl import draw\nfrom bfrl import maps\nfrom bfrl import game\nfrom bfrl import globals\n\n\ndef main():\n\n # UI Addresses\n center_x, center_y = (constants.CAMERA_WIDTH / 2, constants.CAMERA_HEIGHT / 2)\n game_tile_x, game_tile_y = (center_x, center_y - 260)\n footer_x, footer_y = (center_x - 500, constants.CAMERA_HEIGHT - 10)\n continue_x, continue_y = (center_x, center_y + 240)\n new_game_x, new_game_y = (center_x, continue_y + 40)\n options_x, options_y = (center_x, new_game_y + 40)\n exit_x, exit_y = (center_x, options_y + 40)\n\n game_title = {\n 'display_surface': globals.SURFACE_MAIN,\n 'text_to_display': 'PythonRL',\n 'font': constants.FONT_TITLE_SCREEN,\n 'coordinates': (game_tile_x, game_tile_y),\n 'text_color': constants.COLOR_WHITE,\n 'back_color': constants.COLOR_BLACK,\n 'alignment': 'center',\n }\n\n footer = {\n 'display_surface': globals.SURFACE_MAIN,\n 'text_to_display': 'music by icons8.com',\n 'font': constants.FONT_MESSAGE_TEXT,\n 'coordinates': (footer_x, footer_y),\n 'text_color': constants.COLOR_GREY,\n 'alignment': 'center',\n }\n\n continue_button_attributes = {\n 'surface': globals.SURFACE_MAIN,\n 'button_text': 'continue',\n 'size': (150, 35),\n 'center_coordinates': (continue_x, continue_y)\n }\n continue_button = draw.UIButton(**continue_button_attributes)\n\n new_game_button_attributes = {\n 'surface': globals.SURFACE_MAIN,\n 'button_text': 'new game',\n 'size': (150, 35),\n 'center_coordinates': (new_game_x, new_game_y)\n }\n new_game_button = draw.UIButton(**new_game_button_attributes)\n\n options_attributes = {\n 'surface': globals.SURFACE_MAIN,\n 'button_text': 'options',\n 'size': (150, 35),\n 'center_coordinates': (options_x, options_y)\n }\n options_button = draw.UIButton(**options_attributes)\n\n quit_button_attributes = {\n 'surface': globals.SURFACE_MAIN,\n 'button_text': 'quit game',\n 'size': (150, 35),\n 'center_coordinates': (exit_x, exit_y)\n }\n quit_button = draw.UIButton(**quit_button_attributes)\n\n # loads theme music\n pygame.mixer.music.load(globals.ASSETS.main_menu)\n pygame.mixer.music.play(loops=-1)\n\n menu_running = True\n while menu_running:\n\n list_of_events = pygame.event.get()\n mouse_position = pygame.mouse.get_pos()\n\n game_input = (list_of_events, mouse_position)\n\n for event in list_of_events:\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n\n # button updates\n if continue_button.update(game_input):\n pygame.mixer.music.stop()\n game.start(continue_game=True)\n\n if new_game_button.update(game_input):\n pygame.mixer.music.stop()\n game.start(continue_game=False)\n\n if options_button.update(game_input):\n options()\n\n if quit_button.update(game_input):\n pygame.mixer.music.stop()\n pygame.quit()\n sys.exit()\n\n # draw menu\n globals.SURFACE_MAIN.blit(globals.ASSETS.main_menu_bg, (0, 0))\n draw.text(**game_title)\n draw.text(**footer)\n\n # update surfaces\n continue_button.draw()\n new_game_button.draw()\n options_button.draw()\n quit_button.draw()\n pygame.display.update()\n\n\ndef options():\n\n window_center = (constants.CAMERA_WIDTH / 2, constants.CAMERA_HEIGHT / 2)\n\n settings_menu_width = 200\n settings_menu_height = 200\n settings_menu_bg_color = constants.COLOR_DEFAULT_BG\n\n settings_menu_surface = pygame.Surface((settings_menu_width, settings_menu_height))\n settings_menu_rect = pygame.Rect(0, 0, settings_menu_width, settings_menu_width)\n settings_menu_rect.center = window_center\n menu_center_x, menu_center_y = settings_menu_rect.center\n\n # Define Sound Settings Slider\n slider_sound_text = {\n 'display_surface': globals.SURFACE_MAIN,\n 'text_to_display': 'sound',\n 'font': constants.FONT_MESSAGE_TEXT,\n 'coordinates': (menu_center_x, menu_center_y - 60),\n 'text_color': constants.COLOR_WHITE,\n 'alignment': 'center',\n }\n slider_sound_attributes = {\n 'size': (125, 15),\n 'surface': globals.SURFACE_MAIN,\n 'center_coordinates': (menu_center_x, menu_center_y - 40),\n 'color_background': constants.COLOR_WHITE,\n 'color_foreground': constants.COLOR_GREEN,\n 'value': globals.PREFERENCES.volume_sound\n }\n slider_sound = draw.UISlider(**slider_sound_attributes)\n\n # Define Music Settings Slider\n slider_music_text = {\n 'display_surface': globals.SURFACE_MAIN,\n 'text_to_display': 'music',\n 'font': constants.FONT_MESSAGE_TEXT,\n 'coordinates': (menu_center_x, menu_center_y),\n 'text_color': constants.COLOR_WHITE,\n 'alignment': 'center',\n }\n slider_music_attributes = {\n 'size': (125, 15),\n 'surface': globals.SURFACE_MAIN,\n 'center_coordinates': (menu_center_x, menu_center_y + 20),\n 'color_background': constants.COLOR_WHITE,\n 'color_foreground': constants.COLOR_GREEN,\n 'value': globals.PREFERENCES.volume_music\n }\n slider_music = draw.UISlider(**slider_music_attributes)\n\n # Create save globals.PREFERENCES button\n save_preferences_button_attributes = {\n 'surface': globals.SURFACE_MAIN,\n 'button_text': 'save',\n 'size': (100, 35),\n 'center_coordinates': (menu_center_x, menu_center_y + 70)\n }\n save_preferences_button = draw.UIButton(**save_preferences_button_attributes)\n\n menu_close = False\n while not menu_close:\n\n list_of_events = pygame.event.get()\n mouse_position = pygame.mouse.get_pos()\n\n game_input = (list_of_events, mouse_position)\n\n # for event in list_of_events:\n # if event.type == pygame.KEYDOWN:\n # if event.key == pygame.K_ESCAPE:\n # menu_close = True\n\n slider_sound.update(game_input)\n if globals.PREFERENCES.volume_sound != slider_sound.value:\n globals.PREFERENCES.volume_sound = slider_sound.value\n globals.ASSETS.sound_adjust()\n\n slider_music.update(game_input)\n if globals.PREFERENCES.volume_music != slider_music.value:\n globals.PREFERENCES.volume_music = slider_music.value\n globals.ASSETS.sound_adjust()\n\n if save_preferences_button.update(game_input):\n game.preferences_save()\n menu_close = True\n\n settings_menu_surface.fill(settings_menu_bg_color)\n globals.SURFACE_MAIN.blit(settings_menu_surface, settings_menu_rect.topleft)\n\n draw.text(**slider_sound_text)\n slider_sound.draw()\n\n draw.text(**slider_music_text)\n slider_music.draw()\n\n save_preferences_button.draw()\n\n pygame.display.update()\n\n\ndef pause():\n \"\"\"\n This menu pauses the game and displays a simple message\n \"\"\"\n\n menu_close = False\n\n window_width = constants.CAMERA_WIDTH\n window_height = constants.CAMERA_HEIGHT\n\n menu_text = 'PAUSED'\n menu_font = constants.FONT_DEBUG_MESSAGE\n\n text_height = draw.helper_text_height(menu_font)\n text_width = draw.helper_text_width(menu_font) * len(menu_text)\n\n text_location = (int(window_width/2 - text_width/2), int(window_height/2 - text_height/2))\n\n while not menu_close:\n events_list = pygame.event.get()\n for event in events_list:\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_p:\n menu_close = True\n if event.key == pygame.K_ESCAPE:\n menu_close = True\n\n font_color = constants.COLOR_WHITE\n bg_color = constants.COLOR_BLACK\n draw.text(globals.SURFACE_MAIN, menu_text, constants.FONT_DEBUG_MESSAGE, text_location, font_color, bg_color)\n globals.CLOCK.tick(constants.GAME_FPS)\n pygame.display.flip()\n\n\ndef inventory():\n\n menu_width = 200\n menu_height = 200\n\n window_width = constants.CAMERA_WIDTH\n window_height = constants.CAMERA_HEIGHT\n\n menu_x = int(window_width/2 - menu_width/2)\n menu_y = int(window_height/2 - menu_height/2)\n\n menu_location = (menu_x, menu_y)\n\n menu_font = constants.FONT_MESSAGE_TEXT\n menu_text_height = draw.helper_text_height(menu_font)\n\n inventory_surface = pygame.Surface((menu_width, menu_height))\n\n menu_close = False\n while not menu_close:\n\n menu_font = constants.FONT_MESSAGE_TEXT\n menu_font_color = constants.COLOR_WHITE\n menu_bg_color = constants.COLOR_BLACK\n menu_mouse_over_bg = constants.COLOR_GREY\n\n # Clear the menu\n inventory_surface.fill(constants.COLOR_BLACK)\n\n # Collect list of item names\n item_list = [item.display_name for item in globals.PLAYER.container.inventory]\n\n # Get list of input events\n events_list = pygame.event.get()\n\n # Get mouse coordinates relative to inventory window\n mouse_x, mouse_y = pygame.mouse.get_pos()\n mouse_x_relative = mouse_x - menu_x\n mouse_y_relative = mouse_y - menu_y\n\n # Check if mouse is in the window\n mouse_in_window = (0 < mouse_x_relative < menu_width and 0 < mouse_y_relative < menu_height)\n\n # convert mouse height to inventory line\n mouse_line_selection = int(mouse_y_relative / menu_text_height)\n\n for event in events_list:\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_i:\n menu_close = True\n if event.key == pygame.K_ESCAPE:\n menu_close = True\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1 and mouse_in_window and mouse_line_selection <= len(item_list):\n globals.PLAYER.container.inventory[mouse_line_selection].item.use()\n globals.CLOCK.tick(constants.GAME_FPS)\n # TODO keep inventory open if item is an equipment\n menu_close = True\n\n # Draw item list\n for line, name in enumerate(item_list):\n name_location = (0, 0 + line * menu_text_height)\n if line == mouse_line_selection and mouse_in_window:\n draw.text(inventory_surface, name, menu_font, name_location, menu_font_color, menu_mouse_over_bg)\n else:\n draw.text(inventory_surface, name, menu_font, name_location, menu_font_color, menu_bg_color)\n\n # Render Game\n\n draw.game()\n\n # Display Menu\n globals.SURFACE_MAIN.blit(inventory_surface, menu_location)\n globals.CLOCK.tick(constants.GAME_FPS)\n pygame.display.flip()\n\n\ndef tile_select(origin=None, max_range=None, ignore_walls=True, ignore_creatures=True, radius=None):\n \"\"\"\n This menu lets the player select a tile on the map.\n The game pauses, produces a screen rectangle, and returns the map address when the LMB is clicked.\n :return: (x,y) map address tuple\n \"\"\"\n\n menu_close = False\n while not menu_close:\n\n # get mouse position\n mouse_coordinates = pygame.mouse.get_pos()\n map_coordinate_x, map_coordinate_y = globals.CAMERA.window_to_map(mouse_coordinates)\n\n map_address_x = int(map_coordinate_x / constants.CELL_WIDTH)\n map_address_y = int(map_coordinate_y / constants.CELL_HEIGHT)\n\n if origin:\n list_of_tiles = maps.find_line(origin, (map_address_x, map_address_y))\n else:\n list_of_tiles = [(map_address_x, map_address_y)]\n\n if max_range:\n list_of_tiles = list_of_tiles[:max_range + 1]\n\n for i, (x, y) in enumerate(list_of_tiles):\n if i == 0:\n continue\n if not ignore_walls and globals.GAME.current_map.check_for_wall(x, y):\n list_of_tiles = list_of_tiles[:i + 1]\n break\n if not ignore_creatures and globals.GAME.current_map.check_for_creature(x, y):\n list_of_tiles = list_of_tiles[:i + 1]\n break\n\n # get button clicks\n events_list = pygame.event.get()\n for event in events_list:\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_l:\n menu_close = True\n if event.key == pygame.K_ESCAPE:\n menu_close = True\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n return list_of_tiles[-1]\n\n # draw game first\n globals.SURFACE_MAIN.fill(constants.COLOR_DEFAULT_BG)\n globals.SURFACE_MAP.fill(constants.COLOR_DEFAULT_BG)\n\n globals.CAMERA.update()\n\n # draw the map first\n draw.map_surface(globals.GAME.current_map.map_tiles)\n for obj in globals.GAME.objects_on_map:\n obj.draw()\n\n # draw rectangle at mouse position on top of game\n if len(list_of_tiles) > 1:\n for tile in list_of_tiles[1:]:\n if tile == list_of_tiles[-1]:\n draw.tile_rect(tile, marker='X')\n else:\n draw.tile_rect(tile)\n\n # TODO: Show radius if len = 1\n if radius:\n area_of_effect = maps.find_radius(list_of_tiles[-1], radius)\n for x, y in area_of_effect:\n draw.tile_rect((x, y), tile_color=constants.COLOR_RED)\n\n else:\n draw.tile_rect((map_address_x, map_address_y), marker='X')\n\n # update main surface with the new map\n globals.SURFACE_MAIN.blit(globals.SURFACE_MAP, (0, 0), globals.CAMERA.rectangle)\n\n draw.debug()\n draw.messages()\n\n globals.CLOCK.tick(constants.GAME_FPS)\n pygame.display.flip()\n","sub_path":"bfrl/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":14018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"122087587","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 13 16:57:09 2019\r\n\r\n@author: VIJ Global\r\n\"\"\"\r\n\r\n\r\nbalance = int(input(\"please put your balance on your credit card: \"))\r\nannualInterestRate = float(input(\"please put your annual interest rate in decimal: \"))\r\n\r\nmonthlyInterestRate = annualInterestRate/12\r\ni = 0\r\nmonth = 0\r\ndiff = 0.01\r\n\r\nmonthlyPaymentLower = balance/12\r\nmonthlyPaymentUpper = (balance*(1 + monthlyInterestRate)**12)/12.0\r\namount= 0\r\nunit_balance = balance\r\nwhile abs(amount-balance) >= diff:\r\n balance = balance - amount + ((balance - amount) * monthlyInterestRate)\r\n if balance != 0 and i < 12:\r\n amount = (monthlyPaymentLower+monthlyPaymentUpper)/2\r\n i += 1\r\n print(\"month: \", month, \"amount: \",amount, \"my balance is: \", balance)\r\n else:\r\n break \r\n month += 1\r\namount = round (amount,2)\r\nprint (\"Lowest Payment: \", amount, \"after \", month, \" months\")\r\n\r\n\r\n\r\n\"\"\"___Real code without bisection___\r\namount = 0\r\ninit_balance = balance\r\nmonthlyInterestRate = annualInterestRate/12\r\n\r\nwhile balance > 0:\r\n for i in range(12):\r\n balance = balance - amount + ((balance - amount) * monthlyInterestRate)\r\n if balance > 0:\r\n amount += 10\r\n balance = init_balance\r\n elif balance <= 0:\r\n break\r\nprint('Lowest Payment:', amount)\r\n\"\"\"\r\n\r\n\r\n\r\n\r\n\"\"\"___Real code without bisection___\r\n\r\nbalance = int(input(\"please put your balance on your credit card: \"))\r\nannualInterestRate = float(input(\"please put your annual interest rate in decimal: \"))\r\n\r\nmonthlyInterestRate = annualInterestRate/12\r\n\r\nmonthlyPaymentLower = balance/12\r\nmonthlyPaymentUpper = (balance*(1+monthlyInterestRate)**12)/12.0\r\namount = (monthlyPaymentLower+monthlyPaymentUpper)/2\r\n\r\n\r\ninit_balance = balance\r\ndiff = 0.01\r\n\r\nwhile abs(amount-balance) >= diff:\r\n for i in range(12):\r\n balance = balance - amount + ((balance - amount) * monthlyInterestRate)\r\n print (amount)\r\n if balance > 0:\r\n amount += 10\r\n balance = init_balance\r\n elif balance <= 0:\r\n break\r\namount = round (amount,2)\r\nprint('Lowest Payment:', amount)\r\n\"\"\"","sub_path":"Projects/p3_w2.py","file_name":"p3_w2.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"490725802","text":"import logging, sys, os, time\nfrom configs import config\nimport paramiko\n\nsys.path.append(os.path.dirname(os.getcwd()))\n# from automation.config import HOSTNAME\n\ndef send_to_sftpserver(request, dir=None):\n # setup a SSHClient object\n user = request.request['user']\n if not dir:\n dir = os.path.join(config.DEFAULT_DIR, user)\n\n ssh = paramiko.SSHClient()\n\n\n t = paramiko.Transport((config.SFTP_HOSTIP, config.SFTP_PORT))\n t.connect(username=config.SFTP_USERNAME, password=config.SFTP_PASSWORD)\n sftp = paramiko.SFTPClient.from_transport(t)\n #\n # # 允许将信任的主机自动加入到host_allow 列表,此方法必须放在connect方法的前面\n # ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n # # connect to host\n # try:\n # ssh.connect(hostname=config.SFTP_HOSTIP,\n # port=config.SFTP_PORT,\n # username=config.SFTP_USERNAME,\n # password=config.SFTP_PASSWORD,\n # timeout=15)\n # except Exception as e:\n # raise\n # else:\n # stdin, stdout, stderr = ssh.exec_command('ls')\n # for line in stdout:\n # print('stdout',line)\n #\n # for line in stderr:\n # print('stderr',line)\n # sftp = paramiko.SFTPClient.from_transport(ssh)\n\n for file in os.listdir(dir):\n if file.endswith('.csv'):\n try:\n new_file_name = '{}_{}'.format(user, file)\n try:\n os.rename(os.path.join(dir,file), os.path.join(dir, new_file_name))\n except Exception as e:\n logging.error('rename error')\n raise\n else:\n local_file = os.path.join(dir, new_file_name)\n remote_dir = os.path.join(config.SFTP_DIR, new_file_name)\n sftp.put(local_file, remote_dir)\n except Exception as e:\n raise\n else:\n logging.info('process for file {}'.format(os.path.join(dir, new_file_name)))\n os.remove(os.path.join(dir, new_file_name))\n\n ssh.close()\n\ndef mkdir(user):\n # make up the dirctory in selenium server to store the report for user\n # and return the dirctory name\n logging.debug('in make dir')\n dir_name = os.path.join(config.DEFAULT_DIR, user)\n if not os.path.exists(dir_name):\n output = os.popen('mkdir {}'.format(dir_name))\n logging.debug('create dir, the result is {}'.format(output.read()))\n return dir_name\n\nif __name__ == '__main__':\n # trigger_send_to_ftpserver(HOSTNAME)\n # set_environment(hostname='9.112.56.150')\n\n from downloader import MyRequest\n\n request = {}\n\n r = MyRequest(request)\n r.dirname = os.path.curdir\n send_to_sftpserver(r, r.dirname)\n","sub_path":"utils/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"293517992","text":"#!/usr/bin/env python\n\ntry:\n from setuptools import setup\n test_extras = {\n 'test_suite': 'pythonosc.test',\n }\nexcept ImportError:\n from distutils.core import setup\n test_extras = {}\n\n\nsetup(\n name='python-osc',\n version='1.6',\n author='attwad',\n author_email='tmusoft@gmail.com',\n description=(\n 'Open Sound Control server and client implementations in pure Python'),\n long_description=open('README.rst').read(),\n url='https://github.com/attwad/python-osc',\n platforms='any',\n packages=[\n 'pythonosc',\n 'pythonosc.parsing',\n 'pythonosc.test',\n 'pythonosc.test.parsing',\n ],\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 3',\n 'Topic :: Multimedia :: Sound/Audio',\n 'Topic :: System :: Networking',\n ],\n **test_extras\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"223997739","text":"'''\r\nread precatórios from text file\r\n'''\r\nimport csv\r\nimport sys\r\nimport re\r\n\r\nclass Precatorio():\r\n def __init__(self, num):\r\n self.code = num\r\n self.value = -1\r\n self.description = ''\r\n \r\n def set_description(self, desc:str):\r\n self.description = desc\r\n \r\n def set_value(self, val:int):\r\n self.value = val\r\n\r\n def has_description(self):\r\n return self.description != '' and self.description is not None\r\n\r\n def __str__(self):\r\n return '[{}] {} (R$ {})'.format(\r\n self.code,\r\n self.description,\r\n self.value\r\n )\r\n\r\ndef is_precatorio(text:str):\r\n m = re.search('\\d{20}', text)\r\n return m is not None\r\n\r\ndef is_money_value(text:str):\r\n m = re.search('[^\\d\\.]', text)\r\n return m is None\r\n\r\ntext_file = open(sys.argv[1], 'r', encoding='utf-8')\r\nprevious_line = ''\r\nprecatorios = []\r\nvalores = []\r\nreading_values = False\r\n\r\nsilent = '-s' in sys.argv\r\n\r\nfor line in text_file:\r\n line = line.replace('\\n', '')\r\n if line == '':\r\n continue\r\n if is_precatorio(line):\r\n # significa que temos o código do precatório, então\r\n # a linha anterior é a descrição dele\r\n p = Precatorio(line)\r\n precatorios.append(p)\r\n else:\r\n if precatorios and not reading_values and not precatorios[len(precatorios) - 1].has_description():\r\n precatorios[len(precatorios) - 1].set_description(line)\r\n \r\n if 'VALOR (R$)' in line:\r\n reading_values = True\r\n\r\n if is_money_value(line) and reading_values:\r\n valores.append(int(line.replace('.', '')))\r\n\r\nif len(precatorios) == len(valores):\r\n with open(sys.argv[2], mode='w', newline='') as csvfile:\r\n writer = csv.writer(csvfile, delimiter=',', quotechar='\"')\r\n for p, v in zip(precatorios, valores):\r\n p.set_value(v)\r\n if not silent:\r\n print(p)\r\n writer.writerow([p.code, p.description, p.value])\r\nelse:\r\n print('Lista de valores e lista de precatórios não batem.')\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"220039046","text":"# -*- coding: utf-8 -*-\n\n# (c) 2017\n# This is an example plugin\n\nimport PyQt5.QtCore as core\nimport PyQt5.QtWidgets as gui\n\nimport ecu\nimport options\n\n_ = options.translator('ddt4all')\n\nplugin_name = _(\"Megane/Scenic II UCH Reset\")\ncategory = _(\"UCH Tools\")\nneed_hw = True\n\n\nclass Virginizer(gui.QDialog):\n def __init__(self):\n super(Virginizer, self).__init__()\n self.megane_uch = ecu.Ecu_file(\"UCH_84_J84_03_60\", True)\n layout = gui.QVBoxLayout()\n infos = gui.QLabel(\n _(\"MEGANE II UCH VIRGINIZER
THIS PLUGIN WILL ERASE YOUR UCH
GO AWAY IF YOU HAVE NO IDEA OF WHAT IT MEANS\"))\n infos.setAlignment(core.Qt.AlignHCenter)\n check_button = gui.QPushButton(_(\"Check UCH Virgin\"))\n self.status_check = gui.QLabel(_(\"Waiting\"))\n self.status_check.setAlignment(core.Qt.AlignHCenter)\n self.virginize_button = gui.QPushButton(_(\"Virginize UCH\"))\n layout.addWidget(infos)\n layout.addWidget(check_button)\n layout.addWidget(self.status_check)\n layout.addWidget(self.virginize_button)\n self.setLayout(layout)\n self.virginize_button.setEnabled(False)\n self.virginize_button.clicked.connect(self.reset_ecu)\n check_button.clicked.connect(self.check_virgin_status)\n self.ecu_connect()\n\n def ecu_connect(self):\n connection = self.megane_uch.connect_to_hardware()\n if not connection:\n options.main_window.logview.append(_(\"Cannot connect to ECU\"))\n self.finished()\n\n def check_virgin_status(self):\n self.start_diag_session_aftersales()\n\n virigin_check_request = self.megane_uch.requests[u'Status général des opérations badges Bits']\n request_values = virigin_check_request.send_request()\n\n if request_values is not None:\n virgin = request_values[u\"VSC UCH vierge (NbBadgeAppris=0)\"]\n if virgin == u'Vierge':\n self.virginize_button.setEnabled(False)\n self.status_check.setText(_(\"UCH virgin\"))\n return\n\n if virgin == u'Codée':\n self.virginize_button.setEnabled(True)\n self.status_check.setText(_(\"UCH coded\"))\n return\n\n self.status_check.setText(_(\"UNEXPECTED RESPONSE\"))\n\n def start_diag_session_study(self):\n sds_request = self.megane_uch.requests[u\"StartDiagSession Etude\"]\n sds_stream = \" \".join(sds_request.build_data_stream({}))\n if options.simulation_mode:\n print(\"SdSA stream\", sds_stream)\n return\n options.elm.start_session_can(sds_stream)\n\n def start_diag_session_aftersales(self):\n sds_request = self.megane_uch.requests[u\"Start Diagnostic Session\"]\n sds_stream = \" \".join(sds_request.build_data_stream({}))\n if options.simulation_mode:\n print(\"SdSS stream\", sds_stream)\n return\n options.elm.start_session_can(sds_stream)\n\n def reset_ecu(self):\n self.start_diag_session_study()\n\n reset_request = self.megane_uch.requests[u\"RAZ EEPROM\"]\n request_response = reset_request.send_request()\n\n if request_response is not None:\n self.status_check.setText(_(\"CLEAR EXECUTED\"))\n else:\n self.status_check.setText(_(\"CLEAR FAILED\"))\n\n\ndef plugin_entry():\n v = Virginizer()\n v.exec_()\n","sub_path":"megane2_uch_reset.py","file_name":"megane2_uch_reset.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"513703723","text":"#!/usr/bin/env python\n# -*- coding=utf-8 -*-\n# Web Page抓取器\n\nimport sys\nimport urllib\nimport urllib2\nimport cookielib\nfrom gzip import GzipFile\nfrom StringIO import StringIO\nfrom encoding_processor import *\n\nclass WebFetcher:\n ''' web page fetcher class '''\n\n def set_proxy_support(self, type, host):\n proxy = urllib2.ProxyHandler({type:host})\n opener = urllib2.build_opener(proxy, urllib2.HTTPHandler)\n urllib2.install_opener(opener)\n\n def set_cookie_support(self):\n cookie = urllib2.HTTPCookieProcessor(cookielib.CookieJar())\n opener = urllib2.build_opener(cookie, urllib2.HTTPHandler)\n urllib2.install_opener(opener)\n\n def set_encoding_support(self):\n encoding = ContentEncodingProcessor()\n opener = urllib2.build_opener(encoding, urllib2.HTTPHandler)\n urllib2.install_opener(opener)\n\n def set_proxy_and_cookie_and_encoding_support(self, type, host):\n proxy = urllib2.ProxyHandler({type:host})\n cookie = urllib2.HTTPCookieProcessor(cookielib.CookieJar())\n encoding = ContentEncodingProcessor()\n opener = urllib2.build_opener(proxy, cookie, encoding, urllib2.HTTPHandler)\n urllib2.install_opener(opener)\n\n def get_url_host(self, url):\n url_list = url.split(\"/\")\n url_host = url_list[2]\n return url_host\n\n def make_http_header(self, url):\n host = self.get_url_host(url)\n http_header = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20100101 Firefox/12.0',\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3\",\n \"DNT\": \"1\",\n 'Host': host}\n return http_header\n\n def make_http_request(self, url_base, post_field_dict):\n http_header = self.make_http_header(url_base)\n\n if post_field_dict is None:\n post_data = None\n else:\n post_data = urllib.urlencode(post_field_dict)\n\n http_request = urllib2.Request(url = url_base + \"?\" + post_data, data = None, headers = http_header)\n return http_request\n\n def do(self, url_base, post_field_dict = None):\n http_request = self.make_http_request(url_base, post_field_dict)\n res_content = urllib2.urlopen(http_request).read()\n return res_content\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 4:\n sys.exit(1)\n\n url = sys.argv[1]\n post_field_dict = eval(sys.argv[2])\n out_file = sys.argv[3]\n\n fetcher = WebFetcher()\n fetcher.set_encoding_support()\n page_content = fetcher.do(url, post_field_dict)\n\n fd = open(out_file, \"w\")\n fd.write(page_content + \"\\n\")\n fd.flush()\n fd.close()\n\n","sub_path":"solution/tradesman/python/web_fetcher.py","file_name":"web_fetcher.py","file_ext":"py","file_size_in_byte":2809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"159020021","text":"from glob import glob\n\nfrom .filecontext import FileContext\nfrom .utils import typename, printable_string_sequence\nfrom .sfdata import SFData\nfrom .sfdatafile import SFDataFile\n\n\nclass SFDataFiles(FileContext, SFData):\n\n def __init__(self, *patterns):\n self.fnames = fnames = explode_filenames(patterns)\n if not fnames:\n patterns = printable_string_sequence(patterns)\n raise ValueError(f\"No matching file for patterns: {patterns}\")\n self.files = [SFDataFile(fn) for fn in fnames]\n super().__init__()\n for f in self.files:\n self.update(f)\n\n def close(self):\n for f in self.files:\n f.close()\n\n def __repr__(self):\n tn = typename(self)\n fns = self.fnames\n fns = \"\\\", \\\"\".join(fns)\n entries = len(self)\n return f\"{tn}(\\\"{fns}\\\"): {entries} channels\"\n\n\n\ndef explode_filenames(patterns):\n fnames = []\n for p in patterns:\n fns = glob(p)\n fnames.extend(fns)\n fnames = sorted(set(fnames))\n return fnames\n\n\n\n","sub_path":"sfdata/sfdatafiles.py","file_name":"sfdatafiles.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"565785226","text":"import math\nimport numpy as np\nfrom scipy import interpolate\nfrom sklearn.model_selection import KFold\n\n\ndef calculate_distance(embeddings1, embeddings2, distance_metric=0):\n '''\n Calculate distance of embeddings1 and embeddings2\n\n Args:\n embeddings1: the first embeddings\n embeddings2: the second embeddings\n distance_metric: the metric use for distance calculate[0:Euclidian 1:Cosine]\n '''\n if distance_metric == 0:\n # Euclidian distance\n diff = np.subtract(embeddings1, embeddings2)\n dist = np.sum(np.square(diff), 1)\n elif distance_metric == 1:\n # Distance based on cosine similarity\n dot = np.sum(np.multiply(embeddings1, embeddings2), 1)\n norm = np.linalg.norm(embeddings1, axis=1)*np.linalg.norm(embeddings2, axis=1)\n similarity = dot / norm\n dist = np.arccos(similarity) / math.pi\n else: \n raise 'Undefined distance metric %d' % distance_metric\n return dist.reshape(-1, 1)\n\n\ndef calculate_accuracy(threshold, dist, actual_issame):\n '''\n Calculate accuracy of the fixed threshold\n\n Args:\n threshold: distance less than threshold will be regard as identical\n dist: the distance of embedding1 and embbeding2\n actual_issame: the true flag indicate which pair is same\n '''\n predict_issame = np.less(dist, threshold)\n tp = np.sum(np.logical_and(predict_issame, actual_issame))\n fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))\n tn = np.sum(np.logical_and(np.logical_not(predict_issame), \n np.logical_not(actual_issame)))\n fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))\n\n tpr = 0 if (tp+fn==0) else tp / (tp+fn)\n fpr = 0 if (fp+tn==0) else fp / (fp+tn)\n acc = (tp+tn) / dist.size\n return tpr, fpr, acc\n\n\ndef calculate_roc(thresholds, embeddings1, embeddings2, actual_issame, \n nrof_folds=10, distance_metric=0, subtract_mean=False):\n '''\n Calculate roc\n\n Args:\n thresholds: all the thresholds use for calculate roc\n embeddings1: the first embeddings\n embeddings2: the second embeddings\n actual_issame: the true flag indicate which pair is same\n nrof_folds: number of folds\n distance_metric: the metric use for distance calculate[0:Euclidian 1:Cosine]\n subtract_mean: Subtract feature mean before calculating distance\n '''\n assert(embeddings1.shape[0] == embeddings2.shape[0])\n assert(embeddings1.shape[1] == embeddings2.shape[1])\n\n nrof_pairs = min(len(actual_issame), embeddings1.shape[0])\n nrof_thresholds = len(thresholds)\n k_fold = KFold(n_splits=nrof_folds, shuffle=False)\n\n tprs = np.zeros((nrof_folds, nrof_thresholds))\n fprs = np.zeros((nrof_folds, nrof_thresholds))\n accuracy = np.zeros((nrof_folds))\n\n indices = np.arange(nrof_pairs)\n\n for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):\n if subtract_mean:\n # providing the dimension(features) of embeddings1/2 is 512\n # calculate mean of each features\n mean = np.mean(np.concatenate([embeddings1[train_set], \n embeddings2[train_set]]), axis=0)\n else:\n mean = 0\n dist = calculate_distance(embeddings1-mean, embeddings2-mean, distance_metric)\n\n # Find the best threshold for the fold\n acc_train = np.zeros((nrof_thresholds))\n for threshold_idx, threshold in enumerate(thresholds):\n _, _, acc_train[threshold_idx] = calculate_accuracy(threshold, \n dist[train_set], actual_issame[train_set])\n best_threshold_idx = np.argmax(acc_train)\n for threshold_idx, threshold in enumerate(thresholds):\n tprs[fold_idx, threshold_idx], fprs[fold_idx, threshold_idx], _ = \\\n calculate_accuracy(threshold, dist[test_set], actual_issame[test_set])\n _, _, accuracy[fold_idx] = calculate_accuracy(thresholds[best_threshold_idx],\n dist[test_set], actual_issame[test_set])\n\n tpr = np.mean(tprs, 0)\n fpr = np.mean(fprs, 0)\n return tpr, fpr, accuracy\n\n\ndef calculate_tar_far(threshold, dist, actual_issame):\n '''\n Calculate true acceptance rate and false acceptance rate\n '''\n predict_issame = np.less(dist, threshold)\n true_accept = np.sum(np.logical_and(predict_issame, actual_issame))\n false_accept = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))\n n_same = np.sum(actual_issame)\n n_diff = np.sum(np.logical_not(actual_issame))\n tar = float(true_accept) / float(n_same)\n far = float(false_accept) / float(n_diff)\n return tar, far\n\n\ndef calculate_tar(thresholds, embeddings1, embeddings2, actual_issame, far_target, \n nrof_folds=10, distance_metric=0, substract_mean=False):\n '''\n Using cross validation, Find the threshold in thresholds that fase acceptance \n will less than far_target. And then calculate true acceptance rate and \n corresponding std, fase acceptance rate\n\n Args:\n thresholds: all the thresholds use for calculate roc\n embeddings1: the first embeddings\n embeddings2: the second embeddings\n actual_issame: the true flag indicate which pair is same\n far_target: false acceptance rate target for find best threshold\n nrof_folds: number of folds\n distance_metric: the metric use for distance calculate[0:Euclidian 1:Cosine]\n subtract_mean: Subtract feature mean before calculating distance\n '''\n assert(embeddings1.shape[0] == embeddings2.shape[0])\n assert(embeddings1.shape[1] == embeddings2.shape[1])\n\n nrof_pairs = min(len(actual_issame), embeddings1.shape[0])\n nrof_thresholds = len(thresholds)\n k_fold = KFold(n_splits=nrof_folds, shuffle=False)\n\n tar = np.zeros(nrof_folds)\n far = np.zeros(nrof_folds)\n\n indices = np.arange(nrof_pairs)\n\n for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):\n if substract_mean:\n mean = np.mean(np.concatenate([embeddings1[train_set], \n embeddings2[train_set]], axis=0), axis=0)\n else:\n mean = 0\n dist = calculate_distance(embeddings1-mean, embeddings2-mean, distance_metric)\n\n # Find the threshold that gives FAR = far_target\n # which means that find threshold to make false acceptance rate \n # less than(or equal to) far_target\n far_train = np.zeros(nrof_thresholds)\n for threshold_idx, threshold in enumerate(thresholds):\n _, far_train[threshold_idx] = calculate_tar_far(threshold, \n dist[train_set], actual_issame[train_set])\n if np.max(far_train) >= far_target:\n f = interpolate.interp1d(far_train, thresholds, kind='slinear')\n threshold = f(far_target)\n else:\n threshold = 0.0\n\n tar[fold_idx], far[fold_idx] = calculate_tar_far(threshold, \n dist[test_set], actual_issame[test_set])\n \n tar_mean = np.mean(tar)\n far_mean = np.mean(far)\n tar_std = np.std(tar)\n return tar_mean, tar_std, far_mean","sub_path":"ml_utils/eval_metrics.py","file_name":"eval_metrics.py","file_ext":"py","file_size_in_byte":7125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"147562528","text":"class Solution:\n def exist(self, board, word):\n \"\"\"\n :type board: List[List[str]]\n :type word: str\n :rtype: bool\n \"\"\"\n for i in range(len(board)):\n for j in range(len(board[0])):\n if self.dfs(board, i, j, word):\n return True\n return False\n\n def dfs(self,board,i,j,word):\n if not word:\n return True\n if i not in range(len(board)) or j not in range(len(board[0])):\n return False\n if board[i][j]!=word[0]:\n return False\n tmp=board[i][j]\n board[i][j]='#'\n res=self.dfs(board,i-1,j,word[1:]) or self.dfs(board,i+1,j,word[1:]) or self.dfs(board,i,j-1,word[1:]) or self.dfs(board,i,j+1,word[1:])\n board[i][j]=tmp\n return res\n\ndef main():\n so=Solution()\n print(so.exist([[\"A\",\"B\",\"C\",\"E\"],[\"S\",\"F\",\"C\",\"S\"],[\"A\",\"D\",\"E\",\"E\"]],\"ABCCED\"))\n\nif __name__ == '__main__':\n main()","sub_path":"79_WordSearch.py","file_name":"79_WordSearch.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"595111786","text":"# TO DELETE vvvvvvvvvvvvvvv\nsign_up = [1,0]\nscans = [[1, 2, 4], [3, 6]]\n# TO DELETE ^^^^^^^^^^^^^^^\n\nf = open(\"solution\",\"w+\")\nf.write(\"%d\\n\" % len(sign_up))\ni = 0\nfor lib in sign_up :\n\tf.write(\"%d %d\\n\" % (lib, len(scans[i])))\n\tprint(*scans[i], sep=' ', end='\\n', file=f)\n\ti += 1\nf.close()","sub_path":"output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"466158112","text":"# KMP算法\n\n# 首先计算next数组,即我们需要怎么去移位\n# 接着我们就是用暴力解法求解即可\n# next是用递归来实现的\n# 这里是用回溯进行计算的\ndef calNext(str2):\n i=0\n next=[-1]\n j=-1\n while(i=len(s2)):\n return i -len(s2)#说明匹配到最后了\n else:\n return 0\ns1 = \"acabaabaabcacaabc\"\ns2 = \"abaabcac\"\nprint(KMP(s1,s2))\n","sub_path":"practice/practice_4/kmp1.py","file_name":"kmp1.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"190119283","text":"\"\"\"\r\nD E P A R T E M E N F I S I K A - U G M\r\nBulaksumur Yogyakarta, Kabupaten Sleman 55281\r\n-------------------------------------------------------------------------------\r\nAuthor : Reizkian Yesaya .R\r\nEmail : reizkianyesaya@gmail.com\r\nProgram : Advection\r\nCreated : Wed Apr 24 22:23:10 2019\r\n\"\"\"\r\nimport numpy as np\r\nimport mayavi.mlab as mlab\r\nimport matplotlib.pyplot as plt\r\n\r\nN=2**9\r\nL=1\r\n#dt=0.01\r\nimg=(0+1.j)\r\nx = np.linspace(-L,L,N)\r\n\r\nu=0.02\r\nd=0\r\nnu=0.001\r\n\r\nf=np.zeros(N)\r\nfor i in range(int(N/2-50),int(N/2+50)):\r\n f[i]=1\r\n\r\n\r\nf = 1/(np.cosh(10.0*x)**2)\r\nf_hat = np.fft.fft(f)\r\n\r\n# k-space construction\r\nk_plus=np.arange(0,N/2+1,1)\r\nk_minus=np.arange(-N/2,0,1)\r\nk_array=np.hstack((k_plus,k_minus))\r\n\r\ndk=np.pi/L\r\nk=k_array*dk\r\nk2=k**2\r\n\r\ntmax=10\r\nN_t=100\r\ndt=tmax/N_t\r\n\r\nplt.plot(x,f)\r\nfor t in range(0,N_t,10):\r\n for i in range (0,N):\r\n c = (img*k[i]*u)-(k2[i]*d)\r\n f_hat[i]=f_hat[i]*np.exp(c*(t*dt))\r\n\r\n f_xt=np.real(np.fft.ifft(f_hat))\r\n\r\n time=dt*t\r\n print(\"time = \",time,\" sec\")\r\n plt.plot(x,f_xt)\r\n plt.grid(True)\r\n plt.show()\r\n\r\nprint(\"=== PROGRAM HAS BEEN SUCESSFULLY EXECUTED ===\")\r\n\r\n# =============================================================================\r\n# fig1 = plt.figure()\r\n# ax1 = fig1.add_subplot(111)\r\n# ax1.plot(k_array,f_fft,'r')\r\n# plt.title('')\r\n# plt.grid(True) \r\n# \r\n# fig2 = plt.figure()\r\n# ax2 = fig2.add_subplot(111)\r\n# ax2.plot(x,f,'b')\r\n# plt.title('')\r\n# plt.grid(True) \r\n# =============================================================================\r\n","sub_path":"Advection.py","file_name":"Advection.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"109073185","text":"import numpy as np \nimport pandas as pa\nfrom scipy.sparse import csr_matrix, isspmatrix\nimport pyensembl \nfrom pyensembl import EnsemblRelease\n\ndef normalizeUMIWithscalefactor(data, scale_factor=10e6):\n cellNormalizedData=np.log(1+(data/np.sum(data,axis=1)[0]))*scale_factor\n return(cellNormalizedData)\n\ndef gene_Length(list_of_genes, ensembl_id=True, Ensembl_Release=75):\n gn=EnsemblRelease(Ensembl_Release)\n if ensembl_id:\n gene_pos=list(map(gn.locus_of_gene_id, list_of_genes))\n gene_pos_end=[i.end for i in gene_pos]\n gene_pos_start=[i.start for i in gene_pos]\n \n else:\n gene_pos=list(map(gn.loci_of_gene_names, list_of_genes))\n gene_pos_end=[i[0].end for i in gene_pos]\n gene_pos_start=[i[0].start for i in gene_pos]\n\n gene_len=np.array(gene_pos_end)-np.array(gene_pos_start)\n return pa.DataFrame({'gene_names':list_of_genes, \"gene_length\":gene_len/1000}) \n\ndef getTPM(rowCountData, gene_Names=None, index_column=None, ensembol_gene=False, Ensembl_release=75):\n\n if(isspmatrix(rowCountData)):\n rowCountData=pa.DataFrame(rowCountData.toarray())\n rowCountData.index=gene_Names\n \n else:\n if index_column is not None:\n rowCountData.index=rowCountData[index_column]\n rowCountData=rowCountData.drop([index_column], axis=1)\n\n if ensembol_gene:\n known_genes=list(map(pyensembl.common.is_valid_ensembl_id,gene_Names)) \n rowCountData=rowCountData.iloc[known_genes,:]\n \n rowCountData.index=np.array(gene_Names)[np.array(known_genes)]\n \n gene_length=gene_Length(gene_Names, ensembl_id=ensembol_gene, Ensembl_Release=Ensembl_release) \n else:\n\n gene_length=gene_Length(gene_Names, ensembl_id=ensembol_gene, Ensembl_Release=Ensembl_release) \n \n if(gene_length.shape[0]==rowCountData.shape[0]):\n \n count_data_RKB=rowCountData.div(list(gene_length[\"gene_length\"]), axis=0)\n \n count_data_TPM=np.array(count_data_RKB)/ np.array(rowCountData.sum(axis=0)).reshape((1,len(rowCountData.sum(axis=0))))\n \n count_data_TPM=pa.DataFrame(count_data_TPM*10e6)\n count_data_TPM.index=list(rowCountData.index)\n count_data_TPM.columns=list(rowCountData.columns)\n \n count_data_TPM=count_data_TPM[(count_data_TPM.T != 0).any()].dropna(axis=0)\n else:\n print(\"one or more gene ids are not annoatated under\", Ensembl_Release, \"Please use different release\")\n \n return count_data_TPM.T, list(count_data_TPM.index)\n\ndef ENSEMBLID_to_geneSymbol(ENSEMBL, Ensembl_Release=75): \n data=EnsemblRelease(Ensembl_Release)\n if type(ENSEMBL) is list:\n Genes=list(map(data.gene_name_of_gene_id,ENSEMBL))\n else:\n Genes=data.gene_name_of_gene_id(ENSEMBL)\n return Genes\n\n","sub_path":"MICTI/normalize.py","file_name":"normalize.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"335232351","text":"# Este es el procedimientos para crear y llenar la lista o el arreglo\ndef crear_lista():\n\tglobal lista\n\tlista=[]\n\tprint(\"Cuanto elementos tendra la lista\")\n\telementos=input()\n\telementos=int(elementos)\n\tfor i in range(0,elementos):\n\t\tprint(\"Ingrese el valor de el elemento: \",i)\n\t\tvalor=input()\n\t\tvalor=int(valor)\n\t\tlista.append(valor)\n\t\t\n#Agregar Elemento a la lista \ndef agregar_elemento():\n\tprint (\"Ingrese la posicion que quiere agregar :\")\n\tposicion=input()\n\tposicion=int(posicion)\n\tprint (\"Ingrese el valor que quiere agregar :\")\n\tnuevo_elemento=input()\n\tnuevo_elemento=int(nuevo_elemento)\n\tlongitud=len(lista)\n\tlongitud=int(longitud)\n\tif posicion>longitud or posicion<0:\n\t\tprint(\"Indice debe de estar entre 0 y \",longitud )\n\telse:\n\t\tlista.insert(posicion,nuevo_elemento)\n\t\tprint(\"Nuevo Elemento Agregado Correctamente \")\n\t\n#Modificar Elemento a la lista \ndef modificar_elemento():\n\tprint (\"Ingrese la posicion que quiere modificar :\")\n\tposicion=input()\n\tposicion=int(posicion)\n\tprint (\"Ingrese el valor que quiere modificar :\")\n\tnuevo_elemento=input()\n\tnuevo_elemento=int(nuevo_elemento)\n\tlongitud=len(lista)\n\tlongitud=int(longitud)\n\tif posicion>longitud or posicion<0:\n\t\tprint(\"Para modificar el indice debe de estar entre 0 y \",longitud )\n\telse:\n\t\tlista[posicion]=nuevo_elemento\n\t\tprint(\"El Elemento ha sido modificado correctamente \")\n\n#Eliminar Elemento a la lista \ndef eliminar_elemento():\n\tprint (\"indique el indice a eliminar\")\n\tindice=input()\n\tindice=int(indice)\n\tlongitud=len(lista)\n\tlongitud=int(longitud)\n\tif indice>longitud or indice<0:\n\t\tprint(\"Para eliminar el indice debe de estar entre 0 y \",longitud-1 )\n\telse:\n\t\tdel lista[indice]\n\t\tprint(\"Elemento eliminado \")\n\t\n\n#Este es el procedimiento que me muestra la lista \ndef mostrar_lista():\n\tprint(\"La lista es : \" ,lista)\n\t \n#---------------Cuerpo principal-------------------------------\nsalir=0\nwhile salir!=5:\n\tprint (\"\\t\\t\\t================================\\n\")\n\tprint (\"\\t\\t\\tOPERACIONES CON LISTAS\\n\\n\")\n\tprint (\"\\t\\t\\t[1. Crear una lista ]\\n\")\n\tprint (\"\\t\\t\\t[2. Ingresar datos a la lista ]\\n\")\n\tprint (\"\\t\\t\\t[3. Modificar datos de la lista]\\n\")\n\tprint (\"\\t\\t\\t[4. Eliminar datos de la lista ]\\n\")\n\tprint (\"\\t\\t\\t[5. Salir del programa ]\\n\")\n\topcion=int(input(\"\\t\\t\\t[Ingrese una opcion : \"))\n\tprint (\"\\t\\t\\t================================\\n\")\n\t\n\tif opcion==1:\n\t\tcrear_lista()\n\t\tmostrar_lista()\n\telif opcion==2:\n\t\ttry:\n\t\t\tagregar_elemento()\n\t\t\tmostrar_lista()\n\t\texcept:\n\t\t\tprint (\"Se genero un error es posible que la lista no ha sido creada,\\n Verifique opcion 1 \")\n\telif opcion==3:\n\t\ttry:\n\t\t\tmodificar_elemento()\n\t\t\tmostrar_lista()\n\t\texcept:\n\t\t\tprint (\"Se genero un error es posible que la lista no ha sido creada,\\n Verifique opcion 1 \")\n\telif opcion==4:\n\t\ttry:\n\t\t\teliminar_elemento()\n\t\t\tmostrar_lista()\n\t\texcept:\n\t\t\tprint (\"Se genero un error es posible que la lista no ha sido creada,\\n Verifique opcion 1 \")\n\telif opcion==5:\n\t\tprint(\"\\t\\t\\tFin del programa\")\n\t\tsalir=5\n\telse:\n\t\tprint(\"\\t\\t\\tOpcion incorrecta\")","sub_path":"EjercicioListas.py","file_name":"EjercicioListas.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"418195838","text":"from __future__ import division\nfrom __future__ import print_function\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 25 17:06:17 2017\n\n@author: paulcabrera\n\nRUN:\n$ source ~/tensorflow/bin/activate\n$ python 5-4-17.py ./annotate/\n\"\"\"\n\n\"\"\"\nComments:\n \n \n - We're allowed to submit our model to TAs before the deadline so they can run it and let us know the results.\n \n - maybe try to make some predictions on the examples folder\n \n - More training and test data? MNIST?\n http://www.ee.surrey.ac.uk/CVSSP/demos/chars74k/: may have to invert colors and delete folders for symbols that\n don't appear. \n - An issue with using MNIST is it has more digits than necessary. We just need 0, 1, 2, 3, 4, 6. So maybe filter the\n other digits out\n - equations.pdf:\n - Other symbols: =, +, -, division sign, bar division sign, (, ), pi, sqrt, delta, ..., +- sign\n - Having trouble finding data for the above symbols.\n - letters: x, y, a, b, c, m, n, d, p, k, f, s, i, o, t, A\n \n - We may not have enough data for the more complicated neural network to work properly.\n \n - Perhaps reducing the number of layers would lead to better results\n \n - I increased the batch size to train the model properly given the more complex neural network\n \n - Consider using TA's input_wrapper instead of the various functions I use to transform the image.\n \n - predictions.txt results are far more inaccurate currently using the more complex neural network compared to the simple one\n\"\"\"\n\nimport sys\nimport glob\nfrom PIL import Image, ImageFilter\nimport skimage.measure as skm\nimport scipy.misc as scm\nimport numpy as np\nimport skimage.morphology as morphology\nimport tensorflow as tf\nfrom skimage.transform import resize,warp,AffineTransform\n\ntrainpath = sys.argv[-1] # # path for the folder containg the training images i.e. the path for 'annotated'\ntf.reset_default_graph() # http://stackoverflow.com/questions/41400391/tensorflow-saving-and-resoring-session-multiple-variables\n\nclass SymPred():\n\t# prediction is a string; the other args are ints\n\tdef __init__(self,prediction, x1, y1, x2, y2):\n\t\t\"\"\"\n\t\t is the top-left and bottom-right coordinates for the bounding box\n\t\t(x1,y1)\n\t\t\t .--------\n\t\t\t |\t\t|\n\t\t\t |\t\t|\n\t\t\t\t--------.\n\t\t\t\t\t\t (x2,y2)\n\t\t\"\"\"\n\t\tself.prediction = prediction \n\t\tself.x1 = x1\n\t\tself.y1 = y1\n\t\tself.x2 = x2\n\t\tself.y2 = y2\n\tdef __str__(self):\n\t\treturn self.prediction + '\\t' + '\\t'.join([\n\t\t\t\t\t\t\t\t\t\t\t\tstr(self.x1),\n\t\t\t\t\t\t\t\t\t\t\t\tstr(self.y1),\n\t\t\t\t\t\t\t\t\t\t\t\tstr(self.x2),\n\t\t\t\t\t\t\t\t\t\t\t\tstr(self.y2)])\n\ndef padim(im):\n\t\"\"\" Pads image to make it into a square.\n\t\n\tParameters\n\t----------\n\tim : ndarray\n\t\tAn image to be padded.\n\t\t\n\tReturns\n\t-------\n\tndarray\n\t\tA copy of im with padding.\n\t\"\"\"\n\trows = len(im)\n\tcols = len(im[0])\n\tzeros = max(rows, cols) - min(rows, cols)\n\tleft, right, top, bottom = 0, 0, 0, 0\n\tif rows > cols:\n\t\tleft = zeros//2\n\t\tright = zeros - left\n\telif rows < cols:\n\t\ttop = zeros//2\n\t\tbottom = zeros - top\n\treturn np.pad(im, ((top, bottom), (left, right)), 'constant')\n\ndef fullpadim(im):\n\t\"\"\" Pads left, right, bottom, and top with zeros and then do additional padding to make image into a square.\n\t\n\tParameters\n\t----------\n\tim : ndarray\n\t\tAn image to be padded.\n\t\t\n\tReturns\n\t-------\n\tndarray\n\t\tA copy of im with padding.\n\t\"\"\"\n\trows = len(im)\n\tcols = len(im[0])\n\tzeros = max(rows, cols) - min(rows, cols)\n\tleft = zeros//2\n\tright = zeros - left\n\tleft = right\n\tbottom = zeros//2\n\ttop = zeros - bottom\n\tbottom = top\n\tim = np.pad(im, ((top, bottom), (left, right)), 'constant')\n\tif len(im) != len(im[0]):\n\t\tim = padim(im)\n\treturn im\n\ndef cropim(im):\n\t\"\"\" Returns image that has been cropped using a bounding box.\n\t\n\tReference: http://chayanvinayak.blogspot.com/2013/03/bounding-box-in-pilpython-image-library.html\n\t\n\tParameters\n\t----------\n\tim : ndarray\n\t\tAn image to be cropped.\n\t\n\tReturns\n\t-------\n\tndarray\n\t\tA copy of im cropped using bound box obtained from ???\n\t\"\"\"\n\tim = Image.fromarray(im)\n\tz = im.split()\n\tleft,upper,right,lower = z[0].getbbox() \n\t#im = (im.crop((left,upper,right,lower))).filter(ImageFilter.SHARPEN) # filter doesn't work for some reason \n\tim = (im.crop((left,upper,right,lower)))\n\treturn np.array(im.getdata()).reshape((im.size[1], im.size[0])) # confirmed it's im.size[1] and im.size[0] in that order\n\t\ndef normalize(im):\n\t\"\"\" Normalize ndarray to values between 0 and 1\n\t\n\tParameters\n\t----------\n\timg : ndarray\n\t\tImage data to be normalized.\n\t\t\n\tReturns\n\t-------\n\tndarray\n\t\tA normalized copy of im.\n\t\"\"\"\n\treturn im / im.max() # MNIST data says 0 means white and 255 means black. MNIST images are normalized between 0 and 1. \n\t\ndef newim(im):\n\t\"\"\" Returns a normalized and padded square 28x28 pixel copy of an equation component.\n\t\n\tParameters\n\t----------\n\tim : ndarray\n\t\tImage data.\n\t\n\tReturns\n\t-------\n\tndarray\n\t\tA normalized, padded, square copy of im.\n\t\n\t\"\"\"\n\treturn normalize(fullpadim(im))\n\ndef connectedcomps(im):\n\t\"\"\" Returns a list of connected components as ndarrays that have more than 50 pixels\n\t\n\tParameters\n\t----------\n\tim : ndarray\n\t\tImage of an equation.\n\t\t\n\tReturns\n\t-------\n\t(ndarray, ndarray)\t\n\t\tA kist of the equation's components and a list of corresponding bounding box coordinates.\n\t\"\"\"\n\tcomps = skm.regionprops(skm.label(im > 0)) # im > 0 leads to all values greater than 0 becoming True i.e. 1 and all equal to 0 False i.e. 0\n\t# I am not entirely sure if im > 0 is necessary since I omit components with fewer than 50 pixels in the code below\n\t# Without the if condition and without im > 0, however, we get an unreasonably high number of components, most of which are useless\n\tbbcoords = []\n\tnewcomps = []\n\tfor i in range(len(comps)):\n\t\tif comps[i].area < 50:\n\t\t\tcontinue\n\t\tbbcoords += [comps[i].bbox]\n\t\tnewcomps += [normalize(morphology.dilation(\n\t\t\t\t\t\t\t scm.imresize(\n\t\t\t\t\t\t\t\t\tfullpadim(cropim(np.asarray(comps[i].image, dtype=np.float32))), \n\t\t\t\t\t\t\t\t\t(32, 32), 'bicubic')))]\n\treturn (newcomps, bbcoords)\t \n\ndef getlocalpath(path):\n\t\"\"\" Returns the last value of a filepath.\n\t\n\tParameters\n\t----------\n\tpath : string\n\t\tA complete image file path.\t Ex: 'path/to/a/file.png'\n\t\n\tReturns\n\t-------\n\tstring\n\t\tThe containing directory of path.\n\t\"\"\"\n\treturn path.split('/')[-1]\n\ndef geteqnpath(path):\n\t\"\"\" Given the full path for a symbol, return the path of the corresponding equation.\n\t\n\tParameters\n\t----------\n\tpath : string\n\t\tA complete image component file path. Ex: '$home/annotated/SKMBT_36317040717260_eq2_sqrt_22_98_678_797.png'\n\t\t\n\tReturns\n\t-------\n\tstring\n\t\tPath of the corresponding equation image. Ex: '$home/annotated/SKMBT_36317040717260_eq2.png'\t\t\n\t\"\"\"\n\ts = \"\"\n\tcount = 0 # keeps track of number of underscores encountered\n\tfor c in path:\n\t\tif c == '_':\n\t\t\tcount += 1\n\t\tif count == 3:\n\t\t\tbreak\n\t\ts += c\n\tif '.png' in s:\n\t\treturn s\n\treturn s + '.png'\n\t\t\n\ndef getdict(folder):\n\t\"\"\" Returns a dictionary where the key is the equation image path and the value is a list of paths for the symbols of the equation.\n\t\n\tParameters\n\t----------\n\tfolder : string\n\t\tThe full path of the folder containing the annotated images.\n\t\n\tReturns\n\t-------\n\tdict(string, list(string))\n\t\tA dictionary of image paths keys and component path list values.\n\t\"\"\"\n\tpaths = glob.glob(folder+'/*.png')\n\teqns = {}\n\td = {}\n\tiseqn = False\n\ti = -5\n\ts = ''\n\tfor p in paths:\n\t\tc = p[i] # p[-5], which is the character right before the .png\n\t\t# use this loop to see if 'eq' occurs before the first instance of '_' when going in reverse order\n\t\twhile c != '_' and (not iseqn) and abs(i) <= len(p): \n\t\t\ts += c\n\t\t\tif 'eq' in s[::-1]: # reverse of s since s is being built up in reverse\n\t\t\t\tiseqn = True\n\t\t\ti -= 1\n\t\t\tif abs(i) <= len(p):\n\t\t\t\tc = p[i]\n\t\tif iseqn: \n\t\t\teqns[p] = []\n\t\telse: # path is for an image of a symbol, not equation\n\t\t\teqnpath = geteqnpath(p)\n\t\t\tif eqnpath in eqns: # otherwise: FileNotFoundError\n\t\t\t\tif eqnpath not in d:\n\t\t\t\t\td[eqnpath] = []\n\t\t\t\td[eqnpath] += [p]\n\t\ts = ''\n\t\tiseqn = False\n\t\ti = -5\n\treturn d\n\t\ndef getsypaths(folder):\n\td = getdict(folder)\n\tlst = list(d.values())\n\tsypaths = []\n\tfor e in lst:\n\t\tif e:\t# not the empty list\n\t\t\tsypaths += e\n\treturn sypaths\n\ndef geteqpaths(folder):\n\td = getdict(folder)\n\treturn list(d.keys())\n\n ### CHANGED: Using a (32, 32) image now and I moved the np.resize function elsewhere ###\ndef transform(im):\n\treturn normalize(morphology.dilation(scm.imresize(fullpadim(im), (32, 32), 'bicubic')))\n \ndef geteqims(folder):\n\treturn [(scm.imread(impath), impath) for impath in geteqpaths(folder)]\n\t\t \n# Get the images of the symbols. These will be used as training data\n# list of tuples: (ndarray length 28*28 of image, imagepath)\ndef getsyims(folder):\n\treturn [(transform(scm.imread(impath)), impath) for impath in getsypaths(folder)]\n\t\t\t\n# given the path for a symbol in the format of images in annotated, extract the label\ndef getlabel(path):\n\t# once you get to the 4th underscore as you move backwards through the path, build the string until you reach the 5th underscore\n\tcount = 0 # count of underscores\n\tlabel = ''\n\ti = -1\n\twhile count < 5 and abs(i) <= len(path):\n\t\tif path[i] == '_':\n\t\t\tcount += 1\n\t\telif count == 4: # assuming '_' is not a valid symbol\n\t\t\tlabel += path[i]\n\t\ti -= 1\n\treturn label[::-1] # reverse\n\t\n# Add the corresponding label to each tuple for the argument trainims, which is the result of getsyims(trainpath)\ndef addlabel(trainims):\n\t\"\"\" Add the corresponding label to each tuple for the argument trainims, which is the result of getsyims(trainpath).\n\t\n\tParameters\n\t----------\n\ttrainims : *** type ***\n\t\t*** Description of trainims ***\n\t\n\tReturns\n\t-------\n\t*** return type ***\n\t\t*** Description of return type ***\n\t\"\"\"\n\treturn [(im, impath, getlabel(impath)) for (im, impath) in trainims]\n\t\ndef unpack(syims):\n\t\"\"\" *** Description here ***\n\t\n\tParameters\n\t----------\n\tsyims : ** type **\n\t\t** Description here. **\n\t\n\tReturns\n\t-------\n\t(array.**type**, array.**type**, array.**type**)\n\t\tims - \n\t\tpaths -\n\t\tlabels -\n\t\"\"\"\n\tims, paths, labels = [], [], []\n\tfor e in syims:\n\t\tims += [e[0]]\n\t\tpaths += [e[1]]\n\t\tlabels += [e[2]]\n\t#return (np.asarray(ims), np.asarray(paths), np.asarray(labels)) # currently seems unnecessary based on what I'm doing in my_next_batch\n\treturn (ims, paths, labels)\t\t \n\n# args: lst - sorted list of unique labels e.g. labellst = list(set(labels)).sorted()\n# returns dictionary of onehot lists for each label\ndef oneHot(lst):\n\t\"\"\" *** Description ***\n\t\n\tParameters\n\t----------\n\tlst : list\n\t\tSorted list of unique labels. e.g. labellst = list(set(labels)).sorted()\n\t\t\n\tReturns\n\t-------\n\tdict.***type***\n\t\tDictionary of onehot lists for each label.\n\t\"\"\"\n\td = {}\t\n\tn = len(lst)\n\tonehotlst = [0]*n # list of zeros of length len(lst)\n\ti = 0\n\tfor label in lst:\n\t\tonehotlst[i] = 1\n\t\td[label] = onehotlst\n\t\tonehotlst = [0]*n\n\t\ti += 1\n\treturn d\n\t\n# return an ndarray of one-hot lists for every element. INCOMPLETE\ndef oneHotTotal(lst):\n\t\"\"\" Return an ndarray of one-hot lists for every element. INCOMPLETE\n\t\n\tParameters\n\t----------\n\tlst : list\n\t\tList of component labels.\n\t\n\tReturns\n\t-------\n\tarray.list.\n\t\tArray of one-hot lists.\n\t\"\"\"\n\tarr = np.asarray(oneHot(lst[0]))\n\tfor i in range(1, len(lst)):\n\t\tarr = np.vstack((arr, oneHot(lst[i])))\n\treturn arr\n\nsyims = addlabel(getsyims(trainpath)) # symbol (not equation) images; result is list of 3-element tuples: \n\n(trainims, trainpaths, labels) = unpack(syims)\nlabellst = list(set(labels)) \nlabellst.sort() # sorted list of unique labels\nonehotdict = oneHot(labellst)\n\n\t\n# affine transformation\ndef image_deformation(image):\n random_shear_angl = np.random.random() * np.pi/6 - np.pi/12\n random_rot_angl = np.random.random() * np.pi/6 - np.pi/12 - random_shear_angl\n random_x_scale = np.random.random() * .4 + .8\n random_y_scale = np.random.random() * .4 + .8\n random_x_trans = np.random.random() * image.shape[0] / 4 - image.shape[0] / 8\n random_y_trans = np.random.random() * image.shape[1] / 4 - image.shape[1] / 8\n dx = image.shape[0]/2. \\\n - random_x_scale * image.shape[0]/2 * np.cos(random_rot_angl)\\\n + random_y_scale * image.shape[1]/2 * np.sin(random_rot_angl + random_shear_angl)\n dy = image.shape[1]/2. \\\n - random_x_scale * image.shape[0]/2 * np.sin(random_rot_angl)\\\n - random_y_scale * image.shape[1]/2 * np.cos(random_rot_angl + random_shear_angl)\n trans_mat = AffineTransform(rotation=random_rot_angl,\n translation=(dx + random_x_trans,\n dy + random_y_trans),\n shear = random_shear_angl,\n scale = (random_x_scale,random_y_scale))\n return warp(image,trans_mat.inverse,output_shape=image.shape)\n \n# uses variables defined outside of this function: trainims, trainpaths, labellst\n### CHANGED ###\ndef my_next_batch(batch_size=10):\n\t\"\"\" *** Description ***\n\t\t\n\tParameters\n\t----------\n\ttrainims : ** type **\n\t\t*** Description of trainims ***\n\t\n\tReturns\n\t-------\n\t(array, array, array)\n\t\tbatch_x - numpy pixel arrays for each symbol\n\t\tbatch_y - one hot tensors for each symbol\n\t\tbatch_z - image path for the symbol's associate equation\n\t\"\"\"\n\t# randomly pick ten elements from trainims\n\tsize = len(trainims)\n\tindices = [np.random.randint(0, size) for j in range(batch_size)]\n\tnumlabels = len(labellst)\n\tbatch_x = np.zeros((batch_size, 32*32))\n\tbatch_y = np.zeros((batch_size, numlabels)) # rows = batch_size and cols = # of unique symbols\n\tbatch_z = np.empty((batch_size, 1), dtype='= 1e-6:\n learn_rate /= 2.\n phist = train_accuracy\t\n\ttrain_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1., l_rate: learn_rate}) # changed keep_prob to 1.\n\t\nsave_path = saver.save(sess, 'my-model2') \nprint ('Model saved in file: ', save_path) \n\neqims = geteqims(trainpath) # tuple: (ndarray, path) for images of equations\n#ims comps is a list of 2-element tuples: ((list of ndarrays for components, list of corresponding bounding box coordinates), equationpath)\nimscomps = [(connectedcomps(i[0]), i[1]) for i in eqims] # \n\n# uses variables defined outside function: imscomps\ndef formatcomps():\n\ttestdata = [] \n\tfor eq in imscomps: # components and path for a particular equation\n\t\t# eq[0] is a tuple: (list of ndarrays for components, list of corresponding bounding box coordinates)\n\t\tnumcomps = len(eq[0][0])\n\t\tfor i in range(len(eq[0][0])):\n\t\t\ttestdata += [(np.resize(eq[0][0][i], 32*32), eq[0][1][i], eq[1], numcomps)]\n\treturn testdata \n\ntestdata = formatcomps()\n\ndef structuretestdata(): \n\t\"\"\" ** Description of method ***\n\t\n\tReturns\n\t-------\n\t(array, array, array)\n\t\tx - 28x28 tensor for image pixels (one single component)\n\t\ty - bounding box coordinates for x (in equation)\n\t\tz - holds the image path for the original equation\n\t\tnum - number of components for the equation in z\n\t\"\"\"\n\tsize = len(testdata)\n\tx = np.zeros((size, 32*32), dtype=np.float32) # important to specify dtype=np.float32, otherwise UnicodeDecodeError\n\ty = np.empty((size, 4), dtype=np.int32) # holds bounding box coordinates\n\tz = np.empty((size, 1), dtype=' math.sqrt(n): break\n\t\telif n % p == 0:\n\t\t\tprime = False\n\t\t\tbreak\n\tif prime: primes.append(n)\n\tn += 2\n\nprint('Answer:', primes[10000])\n","sub_path":"007.py","file_name":"007.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"620824093","text":"from django.conf.urls import url\nfrom django.contrib import admin\n\nfrom index import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^sql/$', views.sql_view, name='sql_view'),\n url(r'^sql_insert/$', views.sql_insert, name='sql_insert'),\n url(r'^table/$', views.table_view, name='table_view'),\n url(r'^table/(?P.+)$', views.table_view, name='table_view'),\n url(r'^init/$', views.init_db, name='init_db'),\n]\n","sub_path":"index/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"377311903","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/pierre/workspace/django-survey/survey/tests/exporter/tex/test_question2tex_sankey.py\n# Compiled at: 2020-02-25 02:49:28\n# Size of source mod 2**32: 1192 bytes\nfrom survey.exporter.tex.question2tex_sankey import Question2TexSankey\nfrom survey.tests.management.test_management import TestManagement\n\nclass TestQuestion2TexSankey(TestManagement):\n\n def test_other_question_type(self):\n \"\"\" We get a type error if we do not give a Question. \"\"\"\n question = self.survey.questions.get(text='Aèbc?')\n self.assertRaises(TypeError, Question2TexSankey.__init__, question, {'other_question': 'other_question'})\n other_question = self.survey.questions.get(text='Aèbc?')\n q2s = Question2TexSankey(question, other_question=other_question)\n self.assertIsNotNone(q2s.tex())","sub_path":"pycfiles/django_survey_and_report-1.3.21-py3-none-any/test_question2tex_sankey.cpython-37.py","file_name":"test_question2tex_sankey.cpython-37.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"362065137","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\nn= int(input())\na = set(map(int, input().split()))\nN = int(input())\nfor i in range(N):\n str_ = input().split()\n cmd = str_[0]\n b = set(map(int, input().split()))\n if(cmd == \"update\"):\n a.update(b)\n elif(cmd == \"intersection_update\"):\n a.intersection_update(b)\n elif(cmd == \"difference_update\"):\n a.difference_update(b)\n elif(cmd == \"symmetric_difference_update\"):\n a.symmetric_difference_update(b)\nprint(sum(a))\n","sub_path":"HackerRank-Python Practice/04. Set/010. Set Mutations.py","file_name":"010. Set Mutations.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"653568726","text":"import cv2\r\nimport numpy as np\r\nimport math\r\nimg = cv2.imread('picture.jpg')#读取原图像,左上为原点\r\nrows = img.shape[0] #取图片的行数,即高度\r\ncols = img.shape[1] #取图片的列数,即宽度\r\nprint(\"图像的高度:\",rows,\"图像的宽度:\",cols)\r\ncenter_x=int(input(\"请输入旋转中心x:(请在图像的宽度范围内输入):\"))\r\ncenter_y=int(input(\"请输入旋转中心y:(请在图像的宽度范围内输入):\"))\r\ncenter=[center_x,center_y] #设置图片中心\r\nresult=np.zeros((rows,cols,3),dtype=np.uint8) #创建一样大小的转换结果\r\nbeta=int(input(\"请输入旋转角度:\"))*math.pi/180\r\ntransform=np.array([[math.cos(beta),-math.sin(beta),0],\r\n [math.sin(beta), math.cos(beta),0],\r\n [0,0,1]]) #转换矩阵\r\ncv2.imshow('original_picture',img)#显示原图片\r\nfor i in range(rows):\r\n for j in range(cols):\r\n img_pos=np.array([i-center[0],j-center[1],1]) #记录结果位置\r\n [x, y, z] = np.dot(transform, img_pos) #转换为原图位置坐标\r\n x = int(x)+center[0] #取整\r\n y = int(y)+center[1] #取整\r\n if x >= rows or y >= cols or x < 0 or y < 0: #如果出界\r\n result[i][j] = 255 #该点为白色\r\n else:\r\n result[i][j] = img[x][y] #不出界把原图位置对应值取来\r\ncv2.imshow('result_process', result) #显示结果\r\ncv2.waitKey(0) #按任意键继续\r\n","sub_path":"homework01/code/lianxi02_ImageRotation_2.py","file_name":"lianxi02_ImageRotation_2.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"262023001","text":"from django.shortcuts import render, HttpResponse, redirect\nfrom .models import * # import ALL models\nfrom django.contrib import messages # validation\nimport bcrypt # password encryption\nfrom django.conf import settings # map settings\nfrom datetime import datetime # events/activities date -time\n\n\ndef index(request):\n request.session.flush()\n return render(request, 'index.html')\n\ndef logout(request):\n request.session.flush()\n messages.success(request, 'You have logged out successfully!')\n return redirect('/ABC')\n\ndef login(request):\n if request.method == 'POST':\n print(request.POST) # should see QueryDict\n\n errors = User.objects.login_validator(request.POST)\n print(errors)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n\n return render(request, 'partialMsgs.html') # AJAX!!!\n # return redirect('/ABC') #redirect the user back to the form to fix the errors\n else:\n\n this_user = User.objects.get(email=request.POST['email'])\n request.session['user_id'] = this_user.id\n # messages.success(request, \"You have successfully logged in!\")\n return redirect('/ABC/myEvents')\n\n\ndef regForm(request):\n return render(request, 'regForm.html')\n\n\ndef register(request):\n if request.method == 'POST':\n print(request.POST) # should see QueryDict\n\n errors = User.objects.reg_validator(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return render(request, 'partialMsgs.html') # AJAX!!!\n # return redirect('/ABC/regForm') #redirect the user back to the form to fix the errors\n else:\n hashed_pw = bcrypt.hashpw(\n request.POST['password'].encode(), bcrypt.gensalt()).decode()\n new_user = User.objects.create(\n first_name=request.POST['first_name'],\n last_name=request.POST['last_name'],\n email=request.POST['email'],\n password=hashed_pw)\n request.session['user_id'] = new_user.id\n # messages.success(request, \"You have successfully registered!\")\n return redirect('/ABC/dashboard')\n\n\ndef childForm(request):\n if 'user_id' not in request.session:\n return redirect('/ABC')\n\n user = User.objects.get(id=request.session['user_id'])\n context = {\n 'user': user,\n }\n return render(request, 'childForm.html', context)\n\n\ndef regChild(request):\n if request.method == \"POST\":\n\n errors = Child.objects.child_validator(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('/ABC/childForm') # redirect the user back to the form to fix the errors\n\n else:\n user = User.objects.get(id=request.session['user_id'])\n\n Child.objects.create(\n first_name=request.POST['first_name'],\n last_name=request.POST['last_name'],\n birth_date=request.POST['birth_date'],\n gender=request.POST['child_gender'],\n age=request.POST['child_age'],\n program=request.POST['child_program'],\n parent_child=user,\n )\n\n # when successful Register a Child is click redirect back to myProfile\n return redirect('/ABC/myProfile')\n\n\ndef myProfile(request):\n if 'user_id' not in request.session:\n return redirect('/ABC')\n user = User.objects.get(id=request.session['user_id'])\n children = user.enrolled_parent.all()\n context = {\n 'user': user,\n 'children': children,\n }\n return render(request, 'myProfile.html', context)\n\n\ndef update_myProfile(request):\n if request.method == \"POST\":\n errors = User.objects.password_validator(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('/ABC/myProfile')\n else:\n hashed_pw = bcrypt.hashpw(\n request.POST['password'].encode(), bcrypt.gensalt()).decode()\n user = User.objects.get(id=request.session['user_id'])\n user.password = hashed_pw\n user.save()\n return redirect('/ABC/dashboard')\n\n\ndef remove_child_myProfile(request):\n if 'user_id' not in request.session:\n return redirect('/ABC')\n else:\n if request.method == \"POST\":\n child = Child.objects.get(id=request.POST['child_id'])\n child.delete()\n return redirect('/ABC/myProfile')\n else:\n return redirect('/ABC')\n\ndef remove_event_myEvents(request):\n # if 'event_id' not in request.session:\n # return redirect('/ABC/myEvents')\n # else:\n if request.method == \"POST\":\n event= Event.objects.get(id=request.POST['event_id'])\n event.delete()\n return redirect('/ABC/myEvents')\n else:\n return redirect('/ABC')\n\ndef myEvents(request):\n if 'user_id' not in request.session:\n return redirect('/ABC')\n user = User.objects.get(id=request.session['user_id'])\n past_events=Event.objects.filter(event_date__lte = datetime.today(), user_event = User.objects.get(id=request.session['user_id']) )\n future_events=Event.objects.filter(event_date__gte = datetime.today(), user_event = User.objects.get(id=request.session['user_id']) )\n child=Child.objects.filter(parent_child=user)\n context = {\n 'user': user,\n 'past_events': past_events,\n 'future_events':future_events,\n 'child': child,\n }\n return render(request, 'myEvents.html', context)\n\n\ndef dashboard(request):\n if 'user_id' not in request.session:\n messages.error(request, \"Need to register or login buddy!\")\n return redirect('/ABC')\n user = User.objects.get(id=request.session['user_id'])\n events = Event.objects.filter(event_date__gte = datetime.today())\n\n context = {\n 'user': user,\n 'events': events,\n # 'total_num': total_num,\n }\n return render(request, 'dashboard.html', context)\n\n\ndef viewJoin(request, event_id):\n if 'user_id' not in request.session:\n return redirect('/ABC')\n user = User.objects.get(id=request.session['user_id'])\n children = user.enrolled_parent.all()\n event = Event.objects.get(id=event_id)\n context = {\n 'user': user,\n 'children': children,\n 'user_event': event,\n }\n return render(request, 'newJoin.html', context)\n\n\ndef requestJoin(request, event_id):\n if 'user_id' not in request.session:\n return redirect('/ABC')\n else:\n if request.method == \"POST\":\n user = User.objects.get(id=request.session['user_id'])\n children = user.enrolled_parent.all()\n event = Event.objects.get(id=event_id)\n check_boxes = request.POST.getlist('childrenJoin', [])\n for checkbox_result in check_boxes:\n selected_child = Child.objects.get(\n id=checkbox_result)\n event.child_event.add(selected_child)\n print(checkbox_result)\n event.save()\n return redirect('/ABC/myEvents')\n else:\n return redirect('/ABC/{event_id}/newJoin')\n\n\n\ndef confirmJoin(request, event_id):\n this_event = Event.objects.filter(id=event_id) #d_id comes from the urls.py parm. FILTER is SO important here -do not use GET! \n if len(this_event) != 1:\n return redirect('/ABC/dashboard')\n user = User.objects.get(id=request.session['user_id'])\n context = {\n 'one_event': this_event[0], #need this because it is a list. grab \"value\" to initially populate record for the update/view\n 'user': user,\n 'api_key': settings.SECRET_KEY2, #if inspect -unfortunately you can still see the key!\n 'messages_list': this_event[0].eventmessages_join.all().order_by(\"-created_at\"), #only messages for this SPECIFIC event\n }\n return render(request, 'confirmJoin.html', context) \n\n\ndef create_msg(request, event_id):\n errors = Message.objects.msg_validator(request.POST)\n if len(errors):\n for key, value in errors.items():\n messages.error(request, value)\n else: \n if request.method == \"POST\":\n this_event = Event.objects.filter(id=event_id) \n Message.objects.create(\n msg_content=request.POST['msg_content'],\n msg_UsrJoin=User.objects.get(id=request.session['user_id']), #comes from the login \n msg_EventJoin=this_event[0], \n )\n return redirect(f'/ABC/{event_id}/confirmJoin')\n\ndef create_comment(request, event_id, msg_id):\n errors = Comment.objects.comm_validator(request.POST)\n if len(errors):\n for key, value in errors.items():\n messages.error(request, value) \n else: \n if request.method == \"POST\": \n this_msg = Message.objects.get(id=msg_id)\n Comment.objects.create(com_content=request.POST['com_content'],\n com_UserJoin=User.objects.get(id=request.session['user_id']), #c#comes from the login \n msg_CommJoin=this_msg, #join the comment with the message\n ) \n return redirect(f'/ABC/{event_id}/confirmJoin')\n\ndef delete_comment(request, event_id, comm_id):\n this_comm = Comment.objects.get(id=comm_id)\n this_Logged_user = User.objects.get(id=request.session['user_id'])\n\n if this_comm.com_UserJoin == this_Logged_user: #only owner of comment can delete OR in html -just show \"delete\" to owner. \n this_comm.delete() \n return redirect(f'/ABC/{event_id}/confirmJoin')\n\ndef add_like(request, event_id, msg_id):\n liked_message = Message.objects.get(id=msg_id)\n user_liking = User.objects.get(id=request.session['user_id'])\n liked_message.user_likes.add(user_liking)\n return redirect(f'/ABC/{event_id}/confirmJoin')\n\ndef remove_like(request, event_id, msg_id):\n liked_message = Message.objects.get(id=msg_id)\n user_liking = User.objects.get(id=request.session['user_id'])\n liked_message.user_likes.remove(user_liking)\n return redirect(f'/ABC/{event_id}/confirmJoin')\n\n\n\n","sub_path":"group_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"621244945","text":"#!/usr/bin/python\nfrom pycam import VideoCapturePlayer\nfrom pycam import pygameFaceDetect\nfrom pycam.filters import outlineEdges\n\ndef process(surf):\n faces = pygameFaceDetect.getFaces(surf)\n surf = outlineEdges(surf)\n if faces:\n pygameFaceDetect.drawFacesOnSurface(surf, faces)\n return surf\n\nif __name__ == \"__main__\":\n vcp = VideoCapturePlayer(processFunction=process)\n vcp.main()\n pygame.quit()\n \n","sub_path":"pycam/examples/opencv/FaceAndEdgeDetect.py","file_name":"FaceAndEdgeDetect.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"358062976","text":"# -*- coding: utf-8 -*-\n\n# MIT License\n#\n# Copyright (c) 2018 Tijme Gommers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom urlparse import urlparse\n\nimport re\nimport json\n\nclass GraphWaveConfig:\n \"\"\"The GraphWave config contains dicts that can be used or loaded into Burp Suite.\n\n Attributes:\n data dict(obj): A Burp Suite JSON dict that can be imported into Burp Suite.\n includeList list(str): A list with unique code flow URLs.\n excludeList list(str): A list with similar code flow URLs.\n\n \"\"\"\n\n data = {}\n\n includeList = []\n\n excludeList = []\n\n def __init__(self, callbacks):\n \"\"\"Initiate the config by resetting to a clean state.\n\n Args:\n callbacks (obj): The Burp Suite callbacks (a Java Jython class).\n\n \"\"\"\n\n self.callbacks = callbacks\n self.reset()\n\n def reset(self):\n \"\"\"Reset the config to a clean state.\"\"\"\n\n # Load the current Burp Suite config and tweak it for GraphWave use.\n self.data = json.loads(self.callbacks.saveConfigAsJson(\"target.scope\"))\n self.data[\"target\"][\"scope\"][\"advanced_mode\"] = True\n self.data[\"target\"][\"scope\"][\"exclude\"] = []\n del self.data[\"target\"][\"scope\"][\"include\"]\n\n # Reset the include and exclude list.\n self.includeList = []\n self.excludeList = []\n\n def generateExcludeObject(self, url):\n \"\"\"Generate an exclude object from an URL so it can be loaded into the\n 'advanced scope control' option from Burp Suite.\n\n Args:\n url (str): The URL that should be converted to a Burp Suite scope control object.\n\n Returns:\n obj: The Burp Suite scope control object for this specific URL.\n\n \"\"\"\n\n parsed = urlparse(url)\n\n port = parsed.port if parsed.port else \"\"\n\n query = \"?\" + parsed.query if parsed.query else \"\"\n file = re.escape(parsed.path + query)\n\n return {\n \"enabled\": True,\n \"file\": \"^\" + file + \"$\",\n \"host\": \"^\" + re.escape(parsed.netloc.split(':')[0]) + \"$\",\n \"port\": \"^\" + str(port) + \"$\",\n \"protocol\": parsed.scheme\n }\n\n def include(self, url):\n \"\"\"Add a specific URL to the include list. The include list can be\n exported to a TXT file by the user.\n\n Args:\n url (str): The URL that should be included.\n\n \"\"\"\n\n if url not in self.includeList:\n self.includeList.append(url)\n\n def exclude(self, url):\n \"\"\"Add a specific URL to the exclude list. The exclude list can be\n exported to a TXT file or be marked out of scope by the user.\n\n Args:\n url (str): The URL that should be excluded.\n\n \"\"\"\n\n if url not in self.excludeList:\n self.excludeList.append(url)\n\n self.data[\"target\"][\"scope\"][\"exclude\"].append(\n self.generateExcludeObject(url)\n )\n","sub_path":"extension/GraphWaveConfig.py","file_name":"GraphWaveConfig.py","file_ext":"py","file_size_in_byte":3972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"568101081","text":"import os\nfrom functools import partial\n\nimport torch\nfrom keras.utils import to_categorical\nfrom torchvision.transforms import transforms\nimport torch.utils.data as Data\nimport numpy as np\nfrom PIL import Image\nfrom tqdm import tqdm\n\nfrom few_shot.capsule_network.capsule_tst_predict import ConvCapsule, device\nfrom torchvision import datasets\n\nnp.random.seed(123)\ndef main():\n root = os.path.dirname(__file__) + '/data'\n\n x_trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n y_trans = partial(to_categorical, num_classes=10)\n\n # 重构dataset\n x_test, y_test = torch.load(os.path.join(root, 'processed', 'test.pt'))\n x_test, y_test = x_test.numpy(), y_test.numpy()\n\n y_test_oh = y_trans(y_test)\n\n\n idx = list(range(len(y_test_oh)))\n np.random.shuffle(idx)\n X_test = np.concatenate([x_test, x_test[idx]], 1)\n Y_test = np.vstack([y_test_oh.argmax(1), y_test_oh[idx].argmax(1)]).T\n\n not_equal = Y_test[:, 0] != Y_test[:, 1]\n\n y_test_oh = y_test_oh[not_equal]\n y_test_upt = y_test[not_equal]\n X_test = X_test[not_equal]\n Y_test = Y_test[not_equal]\n Y_test.sort(axis=1)\n\n X_test = np.array([x_trans(Image.fromarray(X_test[i], mode='L')).numpy() for i in range(len(X_test))])\n\n testsets = Data.TensorDataset(torch.tensor(X_test), torch.tensor(y_test_oh))\n test_loader = Data.DataLoader(\n dataset=testsets,\n batch_size=128,\n shuffle=False\n )\n\n model = ConvCapsule()\n model.to(device)\n\n model.load_state_dict(torch.load(os.path.dirname(__file__) + '/model_dir/caps_model.pt',\n map_location='cpu' if not torch.cuda.is_available() else None))\n\n with torch.no_grad():\n model.eval()\n test_pred_list = []\n for x, y_oh in tqdm(test_loader):\n x, y_oh = x.to(device), y_oh.to(device)\n\n _, test_pred = model(x, y_oh)\n test_pred_list.extend(test_pred.numpy())\n\n greater = np.sort(np.array(test_pred_list), axis=1)[:, -2] > 0.5\n test_preds = np.array(test_pred_list).argsort()[:, -2:]\n test_preds.sort(axis=1)\n\n acc = 1.*(np.prod(test_preds == Y_test, axis=1)).sum()/len(X_test)\n print(u'CNN+Capsule,不考虑置信度的准确率为:%s' % acc)\n acc = 1.*(np.prod(test_preds == Y_test, axis=1)*greater).sum()/len(X_test)\n print(u'CNN+Capsule,考虑置信度的准确率为:%s' % acc)\n\n # testsets = Data.TensorDataset(torch.tensor(x_test),\n # torch.tensor(y_test))\n # test_loader = Data.DataLoader(\n # dataset=testsets,\n # batch_size=128,\n # shuffle=False\n # )\n #\n # test_pred_list = []\n # total_test_acc = 0\n #\n # with torch.no_grad():\n # model.eval()\n # for data, target in tqdm(test_loader):\n # data, target = data.to(device), target.to(device)\n # _, test_pred = model(data, target)\n #\n # test_pred_list.extend(test_pred.numpy())\n #\n # total_test_acc += len(np.where(np.argmax(target, 1) == np.argmax(test_pred, 1))[0])\n #\n # # 方法1\n # total_test_acc_rate = total_test_acc/len(x_test)\n # print(f'方法1 acc: {total_test_acc_rate}')\n #\n # # 方法2\n # test_preds = np.array(test_pred_list).argsort()[:, -1]\n # acc = 1. * (test_preds == np.argmax(y_test, 1)).sum() / len(x_test)\n # print(f'方法2 acc: {acc}')\n #\n #\n #\n #\n #\n # test_set = datasets.MNIST(root=root, train=False, transform=x_trans, download=False)\n #\n # test_loader = torch.utils.data.DataLoader(\n # dataset=test_set,\n # batch_size=128,\n # shuffle=False)\n #\n # total_test_loss = 0\n # total_test_acc = 0\n # with torch.no_grad():\n # model.eval()\n #\n # for data, target in tqdm(test_loader):\n # data = data.to(device)\n # new_target = torch.from_numpy(to_categorical(target.numpy(), 10)).to(device)\n # test_loss, test_pred = model(data, new_target)\n # total_test_loss += test_loss.item()\n #\n # total_test_acc += len(np.where(target == np.argmax(test_pred.cpu(), axis=1))[0])\n #\n # total_test_acc_rate = total_test_acc/len(test_loader.dataset)\n # print(f'test acc: {total_test_acc_rate}')\n #\n # print('Done')\n\n\n # build DataLoader\n\n\n\n # load model\n # predict\n\n\nif __name__ == '__main__':\n main()","sub_path":"few_shot/capsule_network/capsule_tst_predict_2.py","file_name":"capsule_tst_predict_2.py","file_ext":"py","file_size_in_byte":4433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"648523088","text":"from datetime import datetime\n\nfrom django.test import TestCase\n\nfrom .models import ShippingLabel\nfrom .service.constants.service_codes import *\n\nfrom django.contrib.auth.models import User\nfrom apps.common.models import Address\nfrom apps.customers.models import Customer\nfrom apps.externals.models import Brand\nfrom apps.sales.models import Cart, CartedOffer, SalesOrder, Parcel, SentProduct\nfrom apps.spod.models import Sku, Product, Offer\n\n\nclass ShippingLabelTests(TestCase):\n\n def setUp(self):\n user = User.objects.create_user('some user', '', 'password', first_name='some', last_name='user')\n\n customer = Customer(user_profile=user)\n customer.save()\n\n address = Address(\n street1='213 N Racine',\n city='Chicago',\n state='IL',\n postal_code='60642'\n )\n address.save()\n customer.shipping_addresses.add(address)\n\n brand = Brand(name='some brand')\n brand.save()\n\n sku_under = Sku(\n number=123,\n name='some sku',\n brand=brand,\n weight=10\n )\n sku_under.save()\n\n product_under = Product(\n sku=sku_under,\n skus_per=1\n )\n product_under.save()\n\n offer_under = Offer(\n title='some title',\n price=10.99\n )\n offer_under.save()\n offer_under.products.add(product_under)\n\n cart_under = Cart(\n customer=customer\n )\n cart_under.save()\n\n carted_offer_under = CartedOffer(\n cart=cart_under,\n offer=offer_under,\n quantity=1\n )\n carted_offer_under.save()\n\n sales_order_under = SalesOrder(\n cart=cart_under,\n shipping=0,\n tax=0\n )\n sales_order_under.save()\n\n self.parcel_under = Parcel(\n sales_order=sales_order_under,\n sent_on=datetime.now()\n )\n self.parcel_under.save()\n\n sent_prod = SentProduct(\n parcel=self.parcel_under,\n product=product_under,\n quantity=1\n )\n sent_prod.save()\n\n sku_over = Sku(\n number=124,\n name='some sku',\n brand=brand,\n weight=20\n )\n sku_over.save()\n\n product_over = Product(\n sku=sku_over,\n skus_per=1\n )\n product_over.save()\n\n offer_over = Offer(\n title='some title over a pound',\n price=10.99\n )\n offer_over.save()\n offer_over.products.add(product_over)\n\n cart_over = Cart(\n customer=customer\n )\n cart_over.save()\n\n carted_offer_over = CartedOffer(\n cart=cart_over,\n offer=offer_over,\n quantity=1\n )\n carted_offer_over.save()\n\n sales_order_over = SalesOrder(\n cart=cart_over,\n shipping=0,\n tax=0\n )\n sales_order_over.save()\n\n self.parcel_over = Parcel(\n sales_order=sales_order_over,\n sent_on=datetime.now()\n )\n self.parcel_over.save()\n\n sent_prod = SentProduct(\n parcel=self.parcel_over,\n product=product_over,\n quantity=1\n )\n sent_prod.save()\n\n def test_as_image(self):\n import os\n self.parcel_over.make_shipping_label(UPS_GROUND)\n sl = ShippingLabel.objects.filter(parcel=self.parcel_over)[0]\n sl.as_image('test.png')\n os.remove('test.png')\n\n def test_dhl_express_under_pound(self):\n self.parcel_under.make_shipping_label(DHL_EXPEDITED_UNDER_1_LB)\n sl = ShippingLabel.objects.filter(parcel=self.parcel_under)\n self.assertGreater(sl.count(), 0)\n\n def test_dhl_express_over_pound(self):\n self.parcel_over.make_shipping_label(DHL_EXPEDITED_OVER_1_LB)\n sl = ShippingLabel.objects.filter(parcel=self.parcel_over)\n self.assertGreater(sl.count(), 0)\n\n def test_dhl_ground_under_pound(self):\n self.parcel_under.make_shipping_label(DHL_GROUND_UNDER_1_LB)\n sl = ShippingLabel.objects.filter(parcel=self.parcel_under)\n self.assertGreater(sl.count(), 0)\n\n def test_dhl_ground_over_pound(self):\n self.parcel_over.make_shipping_label(DHL_GROUND_OVER_1_LB)\n sl = ShippingLabel.objects.filter(parcel=self.parcel_over)\n self.assertGreater(sl.count(), 0)\n\n def test_ups_ground(self):\n self.parcel_over.make_shipping_label(UPS_GROUND)\n sl = ShippingLabel.objects.filter(parcel=self.parcel_over)\n self.assertGreater(sl.count(), 0)\n","sub_path":"apps/abol/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"192430539","text":"#!/usr/bin/python3\n#---Import---#\n#---ROS\nimport rospy,sys,os\nfrom std_msgs.msg import Float32\nfrom sensor_msgs.msg import CompressedImage\nfrom BienBaoClasssifier import BienBaoClassifier\nimport time\nimport cv2\nimport numpy as np\nimport scipy.ndimage as sp\nfrom threading import Thread\nimport os\ntry:\n\tos.chdir(os.path.dirname(__file__))\t\n\tos.system('clear')\n\tprint(\"\\nWait for initial setup, please don't connect anything yet...\\n\")\n\tsys.path.remove('/opt/ros/lunar/lib/python2.7/dist-packages')\nexcept: pass\ndef print_ros(str_):\n rospy.loginfo(str_)\n\ndef nothing(x):\n pass\n\nclass Team500_CDS_ROS(object):\n def __init__(self, NameTeam='team500', drawImage=False, drawBird=False, limitSpeed=[0, 60], limitAngle=[-30, 30]):\n self.frame = None\n self.limitSpeed = limitSpeed\n self.limitAngle = limitAngle\n self.Speed = 0\n self.Angle = 0\n self.First_time = True\n self.dsize_cut = [0, 0]\n self.Topic_Image = NameTeam + '_image/compressed'\n self.Topic_Speed = NameTeam + '_speed'\n self.Topic_Angle = NameTeam + '_steerAngle'\n self.pub_Speed = None\n self.pub_Angle = None\n self.sub_Image = None\n self.Speed = 50\n self.drawBird = drawBird\n self.drawImage = drawImage\n self.draw_Top = 80\n self.draw_Bot = 77\n self.BienBao_delay_set = 10\n self.BienBao_flag = 2\n self.BienBao_delay = 0\n self.Goc = 30\n self.draw_TopCenter = 25\n self.draw_BotCenter = 160\n self.BirdA = []\n self.Label_bienbao = ['Re Trai', 'Re Phai']\n self.On_Play = False\n self.Continue = True\n self.Trai = None\n self.Phai = None\n self.Thang = None\n self.Thang_flag = 0\n self.Control = 0\n self.draw_Lane()\n self.run()\n\n def draw_Lane(self):\n h, w = [240, 320]\n Trai = np.array( [[[0, h//4],[w//2,h//4],[(w//4)*3,h//2],[(w//4)*3,h],[0,h]]], dtype=np.int32)\n self.Trai = cv2.fillPoly( np.zeros([240,320],dtype=np.uint8), Trai, 255 )\n Phai = np.array( [[[w, h//4],[w//2,h//4],[w//4,h//2],[w//4,h],[w,h]]], dtype=np.int32)\n self.Phai = cv2.fillPoly( np.zeros([240,320],dtype=np.uint8), Phai, 255 )\n Thang = np.array( [[[w//4, 0],[(w//4)*3,0],[(w//4)*3,h],[w//4,h]]], dtype=np.int32)\n self.Thang = cv2.fillPoly( np.zeros([240,320],dtype=np.uint8), Thang, 255 )\n\n def RoadDetect(self, image, Low=[30, 8, 69], High=[66, 35, 99]):\n h, w = image.shape[:2]\n # shadow = self.Shadow(image)\n # LineWhite = self.LineWhite(image)\n # HSV = cv2.cvtColor(image[h - 80:h, w // 2 - 90:w // 2 + 90, :], cv2.COLOR_BGR2HSV)\n # H = HSV[..., 0]\n # S = HSV[..., 1]\n # V = HSV[..., 2]\n Low_HSV = Low\n High_HSV = High\n image = cv2.inRange(cv2.cvtColor(image, cv2.COLOR_BGR2HSV), np.array(Low_HSV),\n np.array(High_HSV))\n # print_ros(self.Trai.dtype)\n # print_ros(self.Phai.dtype)\n # print_ros(self.BienBao_flag)\n if self.BienBao_flag == 0:\n image = cv2.bitwise_or(image, image, mask=self.Trai)\n elif self.BienBao_flag == 1:\n image = cv2.bitwise_or(image, image, mask=self.Phai)\n image, pts, K = self.Road_Find(image)\n return image, pts, K\n\n def Road_Find(self, img, winsize=9, margin=150, minpix=1500):\n h, w = img.shape[:2]\n histogram = np.sum(img[2 * (h // 3):, w // 2 - 30: w // 2 + 30], axis=0)\n F_img = cv2.merge((img, img, img))\n mid_Road = int(np.mean(np.where(histogram == np.max(histogram)))) + w // 2 - 30\n\n win_heigh = np.int(h / winsize)\n\n nonzero_y, nonzero_x = img.nonzero()\n F = np.zeros_like(img)\n mid_x = mid_Road\n mid_road = []\n KKKK = 0\n for win in range(winsize):\n win_y_low = h - (win + 1) * win_heigh\n win_y_high = h - win * win_heigh\n win_x_low = mid_x - margin\n win_x_high = mid_x + margin\n cv2.rectangle(F_img, (win_x_high, win_y_high), (win_x_low, win_y_low), (255, 0, 0), 2)\n\n mid_x_good = ((nonzero_x >= win_x_low) & (nonzero_x < win_x_high)\n & (nonzero_y >= win_y_low) & (nonzero_y < win_y_high)).nonzero()[0]\n if len(mid_x_good) > minpix:\n mid_x = np.int(np.mean(nonzero_x[mid_x_good]))\n KKKK += 1\n mid_road.append(mid_x_good)\n mid_road = np.concatenate(mid_road)\n #\n mid_x_road, mid_y_road = nonzero_x[mid_road], nonzero_y[mid_road]\n x_fit_plot = np.linspace(0, h - 1, h)\n if len(mid_x_road) > 0:\n mid_fit = np.polyfit(mid_y_road, mid_x_road, 2)\n mid_fit_plot = mid_fit[0] * x_fit_plot ** 2 + mid_fit[1] * x_fit_plot + mid_fit[2]\n if self.drawImage:\n for i, mid in enumerate(mid_fit_plot):\n cv2.circle(F_img, (int(mid), i), 1, (255, 0, 0), -1)\n # print(len(mid_fit_plot))\n return F_img, mid_fit_plot, KKKK\n\n def BirdEye(self, img):\n h, w = img.shape[:2]\n self.BirdA = [(w // 2 + self.draw_BotCenter, h - self.draw_Bot),\n (w // 2 - self.draw_BotCenter, h - self.draw_Bot),\n (w // 2 - self.draw_TopCenter, self.draw_Top),\n (w // 2 + self.draw_TopCenter, self.draw_Top)]\n src = np.float32(self.BirdA)\n dst = np.float32([[w, h], [0, h], [0, 0], [w, 0]])\n M = cv2.getPerspectiveTransform(src, dst)\n N = cv2.getPerspectiveTransform(dst, src)\n F = cv2.warpPerspective(img, M, (w, h), flags=cv2.INTER_LINEAR)\n return F, M, N\n\n def run(self):\n self.pub_Speed = rospy.Publisher(self.Topic_Speed, Float32, queue_size=10)\n self.pub_Angle = rospy.Publisher(self.Topic_Angle, Float32, queue_size=10)\n self.sub_Image = rospy.Subscriber(self.Topic_Image, CompressedImage, self.get_image)\n rospy.init_node('talker', anonymous=True)\n print_ros(\"Team 500 Let's Go!!!\")\n\n self.On_Play = True\n self.BienBaoClassifier = BienBaoClassifier()\n rospy.spin()\n\n def Core_thread(self):\n while self.On_Play:\n if self.Continue & (not self.frame is None):\n try:\n image__ = self.frame.copy()\n image = self.BirdEye(image__)[0]\n image, pts, K = self.RoadDetect(image)\n if self.On_Play:\n self.Publish_Angle(self.AxisControl(pts, K))\n self.Publish_Speed(self.Speed)\n else:\n self.Publish_Speed(0)\n cv2.imshow('Team500_DUT ETE', image)\n if cv2.waitKey(1) == 27:\n self.On_Play = False\n print_ros('Turn OFF')\n except BaseException as be:\n print_ros('{}'.format(be))\n\n\n def BienBao_thread(self):\n print_ros('Thread_Online')\n self.Continue = False\n self.BienBaoClassifier = BienBaoClassifier()\n self.Continue = True\n while self.On_Play:\n if not self.frame is None:\n try:\n self.Goc = 30\n self.Speed = 50\n self.BienBao_flag, self.BienBao_VT = self.BienBao(self.frame)\n if self.BienBao_flag in [0,1]:\n self.Goc = 45\n self.Speed = 40\n self.Thang_flag = False\n time.sleep(2)\n except BaseException as be:\n print_ros('{}'.format(be))\n # time.sleep(1)\n print_ros('Thread OFF')\n\n def BienBao(self, image):\n image_ = cv2.inRange(cv2.cvtColor(image, cv2.COLOR_BGR2HSV), (0, 150, 100), (255, 255, 255))\n image_ = cv2.erode(image_, None, iterations=1)\n image_ = cv2.dilate(image_, None, iterations=2)\n ret, labels = cv2.connectedComponents(image_)\n B = 2\n for r in range(ret):\n y, x = np.where(labels == r)\n if (x.shape[0] > 200) & (image_[y[0], x[0]] != 0):\n image_cut = image[np.min(y):np.max(y), np.min(x):np.max(x),:]\n h, w = image_cut.shape[:2]\n if (0.8 <= h/w <= 1.2) & (0.8 <= w/h <= 1.2):\n B = self.BienBaoClassifier.detect(image[np.min(y):np.max(y), np.min(x):np.max(x), :])\n if B in [0, 1]:\n print_ros('Result: {}'.format(self.Label_bienbao[B]))\n # if self.drawImage:\n # image = cv2.rectangle(image, (np.min(x), np.min(y)), (np.max(x), np.max(y)), (0, 0, 255), 2)\n # image = cv2.putText(image, self.Label_bienbao[B],(np.min(x), np.min(y)),cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.7, (0, 255, 255))\n # self.First_time = False\n return B, ((np.min(x), np.min(y)), (np.max(x), np.max(y)))\n\n def AxisControl(self, pts, K, top=60, thresh=1):\n # print(pts)\n AxisLine = sp.filters.gaussian_filter1d(pts, 0.5)\n # print(AxisLine)\n if self.BienBao_flag in [0, 1]:\n Control = ((AxisLine[240-top] - 360//2)/180)*self.Goc\n self.Thang_flag = 0\n else:\n if (K < 4) & (self.Thang_flag > 20):\n self.Thang_flag = 0\n Control = self.Control\n else:\n self.Thang_flag += 1\n Control = ((AxisLine[240-top] - 360//2)/180)*self.Goc\n # if self.BienBao_flag in [0, 1] & self.BienBao_delay<=self.BienBao_delay_set:\n # self.BienBao_delay += 1\n # if self.Bien\n # Control = -abs(Control) - 10\n # else:\n # self.BienBao_delay = 0\n # print(Control)\n if Control > thresh:\n Control -= thresh\n Text = \"Turn Right: %d\" % Control\n elif Control < -thresh:\n Control += thresh\n Text = \"Turn Left: %d\" % abs(Control)\n else:\n Control = 0\n Text = \"Go On\"\n self.Control = Control\n print_ros(Text)\n return Control\n\n def get_image(self, data):\n try:\n if self.First_time:\n self.First_time=False\n Thread(target=self.BienBao_thread).start()\n Thread(target=self.Core_thread).start()\n self.Continue = True\n Array_JPG = np.fromstring(data.data, np.uint8)\n cv_image = cv2.imdecode(Array_JPG, cv2.IMREAD_COLOR)\n self.frame = cv_image\n except BaseException as be:\n print_ros('{}'.format(be))\n self.Continue = True\n\n def Publish_Speed(self, speed):\n speed = min(self.limitSpeed[1], speed)\n speed = max(self.limitSpeed[0], speed)\n self.Speed = speed\n self.pub_Speed.publish(float(speed))\n\n def Publish_Angle(self, angle):\n angle = min(self.limitAngle[1], angle)\n angle = max(self.limitAngle[0], angle)\n self.Angle = angle\n self.pub_Angle.publish(float(angle))\n\n\nif __name__ == '__main__':\n Team500_CDS_ROS(NameTeam='team500', drawImage=True)\n","sub_path":"File/team500/scripts/main_team500.py","file_name":"main_team500.py","file_ext":"py","file_size_in_byte":11277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"184052045","text":"# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nimport numpy as np\nimport pandas as pd\nimport config_data as c\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom event_thread import EventThread, TaskType\nfrom event_identification import EventType\nimport images\n\n\nclass EventWidget(QtWidgets.QWidget):\n\n setPositionSignal = QtCore.pyqtSignal(float, name='setPositionSignal')\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.tripData = None\n self.resultItems = []\n self.eventThread = EventThread()\n self.eventThread.resultReady.connect(self.ShowEvents)\n self.SetupUI()\n\n def SetupUI(self):\n # table\n self.table = QtWidgets.QTableWidget()\n self.table.setColumnCount(6)\n self.table.setHorizontalHeaderLabels('EventId Type Time Video Chart Para'.split())\n self.table.verticalHeader().setHidden(True)\n header = self.table.horizontalHeader()\n font = header.font()\n font.setBold(True)\n header.setFont(font)\n self.table.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)\n\n # other component\n hlayout = QtWidgets.QHBoxLayout()\n self.buttonGroup = QtWidgets.QButtonGroup()\n self.buttonGroup.setExclusive(False)\n self.buttonGroup.buttonToggled[QtWidgets.QAbstractButton, bool].connect(self.CheckBoxClicked)\n # add 'All' button\n self.allbtn = QtWidgets.QCheckBox('All')\n self.allbtn.setChecked(True)\n self.allbtn.clicked.connect(self.AllBtnClicked)\n startBtn = QtWidgets.QCheckBox('Start')\n startBtn.setChecked(True)\n stopbtn = QtWidgets.QCheckBox('Stop')\n stopbtn.setChecked(True)\n # self.buttonGroup.addButton(allbtn)\n self.buttonGroup.addButton(startBtn)\n self.buttonGroup.addButton(stopbtn)\n hlayout.addWidget(self.allbtn)\n hlayout.addWidget(startBtn)\n hlayout.addWidget(stopbtn)\n # add other button for all types\n for i in TaskType:\n if i == TaskType.StartStop:\n continue\n checkBox = QtWidgets.QCheckBox(i.name)\n checkBox.setChecked(True)\n self.buttonGroup.addButton(checkBox)\n hlayout.addWidget(checkBox)\n \n self.configBtn = QtWidgets.QPushButton(QtGui.QIcon(\":/img/configuration\"), 'config')\n self.updateBtn = QtWidgets.QPushButton(QtGui.QIcon(\":/img/updating\"), 'update')\n\n # layout\n hlayout.addWidget(self.configBtn)\n hlayout.addWidget(self.updateBtn)\n\n vlayout = QtWidgets.QVBoxLayout()\n vlayout.addWidget(self.table)\n vlayout.addLayout(hlayout)\n self.setLayout(vlayout)\n\n def UpdateTripData(self, data):\n self.tripData = data\n self.UpdateEvents()\n\n def UpdateEvents(self):\n # start the thread\n if not self.eventThread.isRunning():\n self.eventThread.start()\n # clear the origin contens\n self.table.clearContents()\n self.table.setRowCount(0)\n # prepare for search\n self.resultItems.clear()\n self.searchItems = self.CheckSelectedItem()\n self.recvSet = set()\n\n self.eventThread.AddTasks(self.tripData, self.searchItems)\n\n def CheckSelectedItem(self):\n ''' 从配置文件加载需要检测的事件 '''\n items = []\n allcfg = [c.StartStop, c.HardBrake, c.HardSwerve, c.LaneChange, c.CutIn, c.CarFollowing, c.PhoneEvent, c.FatigueDriving]\n alltype = [TaskType.StartStop, TaskType.HardBrake, TaskType.HardSwerve, TaskType.LaneChange, TaskType.CutIn,\n TaskType.CarFollowing, TaskType.PhoneEvent, TaskType.FatigueDriving]\n for i, val in enumerate(allcfg):\n if val:\n items.append(alltype[i])\n return items\n\n def ShowEvents(self, task: TaskType, items):\n self.recvSet.add(task)\n self.resultItems.extend(items)\n # wait all task returned\n if len(self.recvSet) == len(self.searchItems):\n # exit the thread when all task returned\n self.eventThread.Stop()\n # sorted by timestamp\n self.resultItems.sort(key=lambda x: x[1])\n # show event\n cameraIcon = QtGui.QIcon(':/img/camera-simple')\n chartIcon = QtGui.QIcon(':/img/chart')\n for item in self.resultItems:\n row = self.table.rowCount()\n self.table.insertRow(row)\n\n eventIdItem = QtWidgets.QTableWidgetItem(str(row + 1))\n typeItem = QtWidgets.QTableWidgetItem(item[0].name)\n\n t = item[1] // 10 # to s\n eventTimeStr = \"%.2d:%.2d:%.2d\" % (t // 3600, t % 3600 // 60, t % 3600 % 60)\n timeItem = QtWidgets.QTableWidgetItem(eventTimeStr)\n\n videoBtn = QtWidgets.QPushButton(cameraIcon, 'Video')\n videoBtn.setLayoutDirection(QtCore.Qt.RightToLeft)\n videoBtn.setProperty('timestamp', item[1])\n videoBtn.clicked.connect(self.VideoBtnClicked)\n\n chartBtn = QtWidgets.QPushButton(chartIcon, 'Charts')\n chartBtn.setLayoutDirection(QtCore.Qt.RightToLeft)\n chartBtn.setProperty('timestamp', item[1])\n chartBtn.clicked.connect(self.ChartsBtnClicked)\n\n self.table.setItem(row, 0, eventIdItem)\n self.table.setItem(row, 1, typeItem)\n self.table.setItem(row, 2, timeItem)\n self.table.setCellWidget(row, 3, videoBtn)\n self.table.setCellWidget(row, 4, chartBtn)\n\n # QtWidgets.QApplication.processEvents()\n\n def VideoBtnClicked(self):\n obj = self.sender()\n timestamp = obj.property('timestamp')\n self.setPositionSignal.emit(timestamp)\n\n def ChartsBtnClicked(self):\n obj = self.sender()\n timestamp = obj.property('timestamp')\n print(timestamp)\n \n def CheckBoxClicked(self, btn, checked):\n items = self.table.findItems(btn.text(), QtCore.Qt.MatchContains)\n for item in items:\n self.table.setRowHidden(item.row(), not checked)\n if not checked:\n self.allbtn.setChecked(False)\n\n def AllBtnClicked(self, checked):\n for btn in self.buttonGroup.buttons():\n btn.setChecked(checked)\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n tripData = pd.read_csv('D:\\\\TongJi\\\\NatureDriving\\\\data\\\\359932\\\\File_ID_359932.csv', encoding='utf-8')\n\n # data handle : fill and transform\n\n fieldList = [c.SpeedField, c.AccelXField, c.AccelYField, c.AccelZField,\n c.LaneWidthField, c.LaneOffsetField, c.LeftLaneField, c.RightLaneField,\n c.T0_ObjectID, c.T0_XRange, c.T0_YRange,\n c.T0_ObjectID.replace('0', '1'), c.T0_XRange.replace('0', '1'), c.T0_YRange.replace('0', '1'),\n c.T0_ObjectID.replace('0', '2'), c.T0_XRange.replace('0', '2'), c.T0_YRange.replace('0', '2'),\n c.T0_ObjectID.replace('0', '3'), c.T0_XRange.replace('0', '3'), c.T0_YRange.replace('0', '3')]\n tripData[fieldList] = tripData[fieldList].fillna(method='pad')\n tripData[fieldList] = tripData[fieldList].fillna(0)\n tripData[c.SpeedField] = tripData[c.SpeedField] * 3.6\n mm2mList = [c.LaneWidthField, c.LaneOffsetField, c.LeftLaneField, c.RightLaneField]\n tripData[mm2mList] = tripData[mm2mList] / 1000\n\n win = EventWidget()\n win.UpdateTripData(tripData)\n win.resize(800, 800)\n win.show()\n\n sys.exit(app.exec())\n","sub_path":"event_widget.py","file_name":"event_widget.py","file_ext":"py","file_size_in_byte":7576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"547336187","text":"import mnist\nimport numpy as np\n'''\n卷积神经网络里的卷积在计算机视觉里是“相关”操作\n填充是为了输入和输出有相同的尺寸\n如果不进行填充(padding)操作,就是称为“有效”填充\nMINIST CNN\n每张图像是28*28,使用3*3的过滤器,一共8个过滤器,输出26*26*8\n'''\n\n\nclass Conv3x3:\n # A Convolution layer using 3x3 filters\n def __init__(self, num_filters):\n self.num_filters = num_filters\n # filters is a 3d array with dimensions (num_filters ,3, 3)\n # we divide by 9 to reduce the variance of our initial values\n self.filters = np.random.rand(num_filters, 3, 3) / 9\n\n def iterate_regions(self, image: np.ndarray):\n '''\n Generates all possible 3x3 image regions using valid padding\n image is a 2d numpy array\n :param image:\n :return:\n '''\n h, w = image.shape # minist 28*28\n for i in range(h-2):\n for j in range(w-2):\n im_region = image[i:(i + 3), j:(j + 3)]\n yield im_region, i, j\n # 返回图像上的一个3*3的区域和它最左上的坐标\n\n def forward(self, input: np.ndarray):\n '''\n performs a forward pass of the conv layer using the given input\n return a 3d numpy array with dimensions (h, w, num_filters)\n input is a 2d numpy array\n :param input:\n :return:\n '''\n h, w = input.shape # 28*28\n\n # 把output想象成一个二维数组,每一个元素是维度为self.num_filters的向量。\n output = np.zeros((h-2, w-2, self.num_filters)) #26*26*8,因为过滤器是8*3*3\n\n # im_region一个包含相关图像区域的3x3阵列\n # self.filter 一个3d数组 8*3*3\n # im_region * self.filters 使用广播功能\n for im_region, i, j in self.iterate_regions(input):\n # self.filters is a 3d array with 8 x 3 x 3\n # im_region multi every 3x3 array in self.filters\n output[i, j] = np.sum(im_region * self.filters, axis=(1, 2))\n\n return output\n'''\n图像中相邻的像素倾向具有相似的值。\n池化层,减少通过猜测在输入中产生的汇总值。简单的max或average等这些操作。\n在开始的conv layer 之后放置一个池大小为2的max pooling 把 26*26*8 变成 13*13*8\n'''\n\n\nclass MaxPool2:\n # A max pooling layer using a pool size of 2\n def iterate_region(self, image: np.ndarray):\n '''\n Generate non-ovrlapping 2x2 image regions to pool over\n image is a 3d numpy array\n :param image:\n :return:\n '''\n h, w, _ = image.shape\n new_h, new_w = h // 2, w // 2\n\n for i in range(new_h):\n for j in range(new_w):\n image_region = image[(i*2):(i*2+2), (j*2):(j*2+2)]\n yield image_region, i, j\n\n def forward(self, input: np.ndarray):\n '''\n :param input: a 3d array with dimensions (h, w, num_filters)\n :return: a 3d array with dimensions (h/2, w/2, num_filters)\n '''\n h, w, num_filters = input.shape # 来自卷积层的输出26*26*8\n output = np.zeros((h//2, w//2, num_filters))\n\n for image_region, i, j in self.iterate_region(input):\n output[i, j] = np.max(image_region, axis=(0, 1))\n\n return output\n\n\nclass Softmax:\n '''\n 将任意实际值转换为概率,\n 13*13*8 --> (0, 9)\n 帮我们量化对预测的确定程度\n '''\n # A standard fully-connected layer with softmax activation\n def __init__(self, input_len, nodes):\n self.weights = np.random.randn(input_len, nodes) / input_len\n self.biases = np.zeros(nodes)\n\n def forward(self, input: np.ndarray):\n '''\n\n :param input: any array with any dimensions\n :return: a 1d numpy array containing the respective probability values\n '''\n input = input.flatten()\n\n input_len, nodes = self.weights.shape\n\n totals = np.dot(input, self.weights) + self.biases\n exp = np.exp(totals)\n return exp / np.sum(exp, axis=0)\n\n\ntrain_images = mnist.train_images()\ntrain_labels = mnist.train_labels()\ntest_images = mnist.test_images()[:1000]\ntest_labels = mnist.test_labels()[:1000]\n\nprint(test_images[0].shape)\n\nconv = Conv3x3(8) # 28*28*1 -> 26*26*8\npool = MaxPool2() # 26*26*8 -> 13*13*8\nsoftmax = Softmax(13 * 13 * 8, 10) # 13*13*8 -> 10\n\n'''\noutput = conv.forward(train_images[0])\nprint(output.shape)\noutput = pool.forward(output)\nprint(output.shape)\n'''\n\n\ndef forward(image: np.ndarray, label):\n # we transform the image from [0, 255] to [-0.5, 0.5] to make it easier\n # to work with, this is standard practice\n out = conv.forward((image / 255) - 0.5)\n out = pool.forward(out)\n out = softmax.forward(out)\n\n # Calculate cross-entropy loss and accuracy\n loss = np.log(out[label])\n acc = 1 if np.argmax(out) == label else 0\n\n return out, loss, acc\n\n\nprint('MNIST CNN initialized')\n\n\nloss, num_correct = 0, 0\nfor i, (im, label) in enumerate(zip(test_images, test_labels)):\n _, l, acc = forward(im, label)\n loss += 1\n num_correct += acc\n\n if i % 100 == 99:\n print(\n '[Step %d] Past 100 steps: Average Loss %.3f | Accuracy: %d%%' %\n (i + 1, loss / 100, num_correct)\n )\n loss = 0\n num_correct = 0\n","sub_path":"DL/NeuralNetwork/ConvNeuralNetwork.py","file_name":"ConvNeuralNetwork.py","file_ext":"py","file_size_in_byte":5398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"613636815","text":"import sys\nimport math\nimport collections\n\n\nlast = 100\n\n\ndef main(k=2):\n distribution = collections.defaultdict(int)\n\n for i in range(1, last + 1):\n power_of_k = k**i\n first_digit = get_first_digit(power_of_k)\n distribution[first_digit] += 1\n \n print(\"{0:3}th power of {1} is: {2}, first digit is: {3}\".format(\n i,\n k,\n power_of_k,\n first_digit))\n\n print(end=\"\\n\\n\")\n\n show_distribution(distribution)\n\n\ndef get_first_digit(x):\n while x >= 10:\n x //= 10\n return x\n\n\ndef show_distribution(distribution):\n for i in range(1, 10):\n print(\"Digit {0} has frequency of {1}.\".format(\n i, distribution[i]))\n\n\nmain() if len(sys.argv) <= 1 else main(int(sys.argv[1]))","sub_path":"powersof2.py","file_name":"powersof2.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"47249298","text":"##### Run this like: #####\n##### python plotHistogram.py #######\n\nimport os, sys, re, time, getopt, glob, array\nfrom copy import copy, deepcopy\nfrom ROOT import *\nsys.path.insert(0, '/home/arka/arka/include')\nsys.path.insert(0, '/mnt/droplet/home/arka/arka/include')\nfrom Functions import * \n\ndef main():\n directory='plotBeta'\n if not os.path.exists(directory):\n os.makedirs(directory)\n rootFileName = sys.argv[1]\n allRootFileList = sys.argv[2]\n fileRoot = TFile(rootFileName, \"UPDATE\")\n \n with open(allRootFileList) as inputRootFileName:\n for line in inputRootFileName.readlines():\n if '.root' not in line: continue\n massValue = line.split('Mass')\n mass = massValue[1].split('.')[0]\n nameOfFile = line.rstrip()\n fileInput = TFile(nameOfFile, 'READ')\n NTDeff = fileInput.Get('NTDeffEz_Beta_eff')\n \n ### Draw 1D histograms\n drawLine = False\n logY = False\n leftLegend = False\n doAtlas = False\n\n file1D = [NTDeff]; legName = ['']; colorPlot = [4]\n actualMass = mass.split('_')[1]\n DrawHists(file1D, legName, colorPlot, '#beta', 'Efficiency', 0.0, 1.0, 0.0, 0.4, directory+'/NTDeffMass'+mass, 0, 0, drawLine, logY, 'Particle mass: '+actualMass+' GeV', 'NTD efficiency', '', leftLegend, doAtlas)\n \n fileRoot.cd()\n NTDeff.Write('NTDeffMass'+mass)\n fileRoot.Write()\n fileInput.Close()\n \n \n fileRoot.Close()\n \n \n\n\nif __name__==\"__main__\":\n main()\n","sub_path":"plotBeta.py","file_name":"plotBeta.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"553843282","text":"A={}\nB={}\nC={}\nl=[]\nseq=[]\nfrom Bio import SeqIO\nfor record in SeqIO.parse('BSRD.fasta', 'fasta'):\n A[record.description]=record.seq \n if len(record.seq) >= 30 and len(record.seq)<=530: # filtering based on length\n l.append(record)\n B[record.description]=record.seq\n else:\n C[record.description]=record.seq\n \n \nwith open('filtered_BSRD.fasta', 'w') as f:\n [f.write('>{0},len={1}\\n{2}\\n'.format(key,len(value),value)) for key,value in B.items()]\n \nwith open('unfiltered_bsrd.txt', 'w') as f:\n [f.write('>{0},len={1}\\n{2}\\n'.format(key,len(value),value)) for key, value in C.items()]\n\noutput_handle = open(\"long_seqs.fasta\", \"w\") \nSeqIO.write(l, output_handle, \"fasta\") # SEQIO way of writing an output file\n\n\n#### This code is for filtering the fasta file based on the length. #######\n","sub_path":"Fasta_length_fileter.py","file_name":"Fasta_length_fileter.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"259381934","text":"from math import ceil\r\n\r\nclass PrimeIterator:\r\n def __init__(self, high):\r\n self.high = high\r\n self.number = 1\r\n\r\n def __iter__(self):\r\n return self\r\n\r\n def is_prime(self, number):\r\n if number == 2 or number == 3:\r\n return True\r\n for n in range(2, ceil(number * 0.5)):\r\n if number % n == 0:\r\n return False\r\n return True\r\n\r\n def __next__(self):\r\n while self.number < self.high:\r\n self.number += 1\r\n if self.is_prime(self.number):\r\n return self.number\r\n raise StopIteration\r\n\r\n\r\nhigh = int(input('Input the upper bound:\\n>'))\r\niterator = PrimeIterator(high)\r\nprint(*[i for i in iterator])\r\n","sub_path":"iter_prime.py","file_name":"iter_prime.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"296977029","text":"# Exercise 07\n\nimport math\n\nnum = int(input(\"Type a new number: \"))\n\ndouble = num * 2\ntriple = num * 3\nsquare = math.sqrt(num)\n\nprint(f\"The number {num} double is {double}, it's triple is {triple}, and it's square root is {square}\")","sub_path":"ex_007.py","file_name":"ex_007.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"547891405","text":"from popeye.visual_stimulus import VisualStimulus\nfrom popeye import og, dog\nimport popeye.utilities as utils\nimport numpy as np\nimport os.path as op\nfrom scipy import signal\nimport ctypes\nfrom popeye.visual_stimulus import VisualStimulus, simulate_bar_stimulus\n\nderivatives = '/data/odc/derivatives'\n\nv = np.load('/data/odc/zooi/tr_prf.npy')\ndata = np.mean(v, 0)\ndata = data[:, (data != 0).all(0)]\n\ndm = np.load(op.join(derivatives, 'prf/dm.npy'))\ndm = signal.resample(dm, 318, axis=-1)\n\nviewing_distance=15\nscreen_width=6\nTR=2.7\nn_pixels=50\n\nsweeps = np.array([-1,45,135,-1,225,315,-1]) # in degrees, -1 is blank\n#stimulus = VisualStimulus(stim_arr=dm,\n #viewing_distance=viewing_distance,\n #screen_width=screen_width,\n #scale_factor=1,\n #tr_length=1.0,\n #dtype=np.short)\nbar = simulate_bar_stimulus(100, 100, 40, 40, sweeps, 30, 30, 20, 0.67)\nstimulus = VisualStimulus(bar, 50, 25, 0.50, 1.0, ctypes.c_int16)\n\nmodel= og.GaussianModel(stimulus, utils.double_gamma_hrf)\nmodel.hrf_delay = 0\n\npars = dict(np.load('/data/odc/derivatives/voxel_prf/modelfree/sub-tr_desc-None_prf_pars.npz'))\n\nx = np.cos(pars['angle']) * pars['ecc']\ny = np.cos(pars['angle']) * pars['ecc']\n\n\n### FIT\n## define search grids\n# these define min and max of the edge of the initial brute-force search. \nx_grid = (-10,10)\ny_grid = (-10,10)\ns_grid = (1/stimulus.ppd + 0.25,5.25)\nh_grid = (-1.0,1.0)\n\n## define search bounds\n# these define the boundaries of the final gradient-descent search.\nx_bound = (-12.0,12.0)\ny_bound = (-12.0,12.0)\ns_bound = (1/stimulus.ppd, 12.0) # smallest sigma is a pixel\nb_bound = (1e-8,None)\nu_bound = (None,None)\nh_bound = (-3.0,3.0)\n\n \n## package the grids and bounds\ngrids = (x_grid, y_grid, s_grid)\nbounds = (x_bound, y_bound, s_bound, h_bound, b_bound, u_bound,)\n# fit\n#fit = dog.DifferenceOfGaussiansFit(model, data, grids, bounds, Ns=5,\n #voxel_index=(1,2,3), auto_fit=True, verbose=1) \n# fit\nfit = og.GaussianFit(model, data[:, 0], grids, bounds, Ns=4,\n voxel_index=(1,2,3), auto_fit=True, verbose=0)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nimport ctypes, multiprocessing\nimport numpy as np\nimport sharedmem\nimport popeye.og_hrf as og\nimport popeye.utilities as utils\nfrom popeye.visual_stimulus import VisualStimulus, simulate_bar_stimulus\n\n# seed random number generator so we get the same answers ...\nnp.random.seed(2764932)\n\n### STIMULUS\n## create sweeping bar stimulus\nsweeps = np.array([-1,0,90,180,270,-1]) # in degrees, -1 is blank\nbar = simulate_bar_stimulus(100, 100, 40, 20, sweeps, 30, 30, 10)\n\n## create an instance of the Stimulus class\nstimulus = VisualStimulus(bar, 50, 25, 0.50, 1.0, ctypes.c_int16)\n\n### MODEL\n## initialize the gaussian model\nmodel = og.GaussianModel(stimulus, utils.double_gamma_hrf)\n\n## generate a random pRF estimate\nx = -5.24\ny = 2.58\nsigma = 1.24\nhrf_delay = -0.25\nbeta = 0.55\nbaseline = -0.88\n\n## create the time-series for the invented pRF estimate\ndata = model.generate_prediction(x, y, sigma, hrf_delay, beta, baseline)\n\n## add in some noise\ndata += np.random.uniform(-data.max()/10,data.max()/10,len(data))\n\n### FIT\n## define search grids\n# these define min and max of the edge of the initial brute-force search. \nx_grid = (-10,10)\ny_grid = (-10,10)\ns_grid = (1/stimulus.ppd + 0.25,5.25)\nh_grid = (-1.0,1.0)\n\n## define search bounds\n# these define the boundaries of the final gradient-descent search.\nx_bound = (-12.0,12.0)\ny_bound = (-12.0,12.0)\ns_bound = (1/stimulus.ppd, 12.0) # smallest sigma is a pixel\nb_bound = (1e-8,None)\nu_bound = (None,None)\nh_bound = (-3.0,3.0)\n\n## package the grids and bounds\ngrids = (x_grid, y_grid, s_grid, h_grid)\nbounds = (x_bound, y_bound, s_bound, h_bound, b_bound, u_bound,)\n\n## fit the response\n# auto_fit = True fits the model on assignment\n# verbose = 0 is silent\n# verbose = 1 is a single print\n# verbose = 2 is very verbose\nfit = og.GaussianFit(model, data, grids, bounds, Ns=3,\n voxel_index=(1,2,3), auto_fit=True,verbose=1)\n\n## plot the results\nimport matplotlib.pyplot as plt\nplt.plot(fit.prediction,c='r',lw=3,label='model',zorder=1)\nplt.scatter(range(len(fit.data)),fit.data,s=30,c='k',label='data',zorder=2)\nplt.xticks(fontsize=16)\nplt.yticks(fontsize=16)\nplt.xlabel('Time',fontsize=18)\nplt.ylabel('Amplitude',fontsize=18)\nplt.xlim(0,len(fit.data))\nplt.legend(loc=0)\n\n## multiprocess 3 voxels\ndata = [data,data,data]\nindices = ([1,2,3],[4,6,5],[7,8,9])\nbundle = utils.multiprocess_bundle(og.GaussianFit, model, data, \n grids, bounds, indices, \n auto_fit=True, verbose=1, Ns=3)\n","sub_path":"analysis/prf/optimize_prfs.py","file_name":"optimize_prfs.py","file_ext":"py","file_size_in_byte":4690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"516451654","text":"#from tensorflow import keras\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D\nfrom tensorflow.keras.layers import Dense, Activation, Dropout, Flatten\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras.models import Sequential\n\nimg_width = 150\nimg_height = 150\n\n\n\ndef load_model(MODEL_2):\n\tmodel =Sequential()\n\n\tmodel.add(Conv2D(32,(3,3), input_shape=(img_width, img_height, 3)))\n\tmodel.add(Activation('relu'))\n\tmodel.add(MaxPooling2D(pool_size=(2,2)))\n\n\tmodel.add(Conv2D(32,(3,3), input_shape=(img_width, img_height, 3)))\n\tmodel.add(Activation('relu'))\n\tmodel.add(MaxPooling2D(pool_size=(2,2)))\n\n\tmodel.add(Conv2D(64,(3,3), input_shape=(img_width, img_height, 3)))\n\tmodel.add(Activation('relu'))\n\tmodel.add(MaxPooling2D(pool_size=(2,2)))\n\n\tmodel.add(Flatten())\n\tmodel.add(Dense(64))\n\tmodel.add(Activation('relu'))\n\tmodel.add(Dropout(0.5))\n\tmodel.add(Dense(1))\n\tmodel.add(Activation('sigmoid'))\n\tmodel.load_weights(MODEL_2)\n\treturn model\n\n","sub_path":"predict/img_model.py","file_name":"img_model.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"382277269","text":"# import nessary\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport glob\nimport os\nimport math\nfrom tqdm import tqdm\n\n\n__author__ = 'cristian'\n\n\ndef preprocess_image(image):\n image = tf.image.decode_jpeg(image, channels=3)\n image = tf.image.resize_images(image, [160, 160])\n image /= 255.0 # normalize to [0, 1] range\n\n return image\n\n\ndef load_and_preprocess_image(path_to_image):\n image = tf.read_file(path_to_image)\n\n return preprocess_image(image)\n\n\ndef get_employee(path_employee):\n \"\"\"\n return dict{id: name}\n \"\"\"\n data_frame = pd.read_excel(path_employee)\n rows, columns = data_frame.shape\n # data_frame contains two columns: x_emp_code, name\n # get data by row correspond employee code and name\n dict_code_name = {data_frame.iloc[i]['x_emp_code']: data_frame.iloc[i]['name'] for i in range(rows)}\n\n return dict_code_name\n\n\ndef get_employee_code(path_to_dataset, path_employee):\n \"\"\"\n\n \"\"\"\n all_classes = os.listdir(path_to_dataset)\n employee_code = {each_class.split('_')[-1]: '_'.join(each_class.split('_')[:-1]) \\\n for each_class in all_classes}\n print(employee_code)\n # check employee code incorrect\n ds_employee = get_employee(path_employee)\n # count percent checking\n for employee in tqdm(employee_code):\n if employee in ds_employee:\n continue\n else:\n print(\"Occurs when check employee: {}-{}\".format(employee, employee_code[employee]))\n return employee_code\n\n\ndef get_dataset(path_ds, ratio):\n \"\"\"\n return: train_set path, test_set path, number of classes, number of images\n \"\"\"\n\n all_path = glob.glob(os.path.join(path_ds, '*/*'))\n employees = os.listdir(os.path.join(path_ds))\n number_classes = len(employees)\n number_images = len(all_path)\n np.random.shuffle(all_path)\n split = int(math.floor(number_images*(1-ratio)))\n train_set = all_path[:split]\n test_set = all_path[split:]\n\n return train_set, test_set, number_classes, number_images\n\n\ndef data_loader():\n pass\n\nif __name__ == '__main__':\n train_set, test_set, number_classes, number_images = get_dataset('datasets', 0.2)\n print(number_classes)\n print(number_images)\n print(len(train_set))\n print(len(test_set))\n _ = get_employee_code('datasets', 'employee/employee.xlsx')\n","sub_path":"data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"518525964","text":"# %%\nimport os\nimport tridy\nfrom tridy import GeoConcept, SubGeoConcept, MetaData, Table, View, DBStorage, DataSource, Feature, FeatureWithID, AdmUnitFeature, OLUFeature, Grid, Imagee, ds_from_metadata, xml_lpis_cz_reader, lpis_cz__posledni_aktualizace, get_listvalues_from_generator, apply_function, select_nodes_from_graph, unzip_file, find_neighbors_till, connection_parameters_to_pg, transform_name_to_postgresql_format, world_to_pixel \nfrom importlib import reload\nimport requests\nimport datetime\nimport re\nfrom io import BytesIO\n\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.util.retry import Retry\n\nfrom osgeo import ogr, osr, gdal\nimport networkx as nx\nimport numpy as np\nimport json\nimport binascii\nimport copy\nimport time\n\nfrom lxml import etree\n\nfrom ipyleaflet import Map, GeoJSON\n\n# %%\ndel(GeoConcept, SubGeoConcept, MetaData, Table, View, DBStorage, DataSource, Feature, FeatureWithID, AdmUnitFeature, OLUFeature, Grid, Imagee, ds_from_metadata,xml_lpis_cz_reader,get_listvalues_from_generator,apply_function,select_nodes_from_graph,world_to_pixel)\nreload(tridy)\nfrom tridy import GeoConcept, SubGeoConcept, MetaData, Table, View, DBStorage, DataSource, Feature, FeatureWithID, AdmUnitFeature, OLUFeature, Grid, Imagee, ds_from_metadata, xml_lpis_cz_reader, get_listvalues_from_generator, apply_function, select_nodes_from_graph,world_to_pixel\n\n# %%\ndef compilable_tree_dictionary(object): \n g_dict=\\\n {'admunit':{'object':object},\\\n 'admunit__tree':{'object':'admunit','function':'return_graph_representation'},\\\n 'admunit__tree__reverse':{'object':'admunit__tree','function':'reverse'},\\\n 'admunit__tree__level3':{'function':select_nodes_from_graph,'parameters':['admunit__tree','level',3]},\\\n 'admunit__tree__level4':{'function':select_nodes_from_graph,'parameters':['admunit__tree','level',4]}}\n return g_dict\n\n# %%\ndef find_neighbors_level(graph,start_node,level):\n if graph.nodes()[start_node]['level']==level:\n yield start_node\n else:\n for n in graph.neighbors(start_node):\n yield from find_neighbors_level(graph,n,level) \n\n# %%\ndef get_ruian_au_feature_geometry_from_wfs(gml_id):\n url='https://services.cuzk.cz/wfs/inspire-au-wfs.asp?service=WFS&request=GetFeature&typeName=au:AdministrativeUnit&maxFeatures=1&featureID=%s&version=2.0.0' %gml_id\n r=requests.get(url,stream=False)\n if r.status_code==200:\n tree=etree.parse(BytesIO(r.content))\n root=tree.getroot()\n geom=root.find('.//{http://www.opengis.net/gml/3.2}MultiSurface')\n geom_ogr=ogr.CreateGeometryFromGML(etree.tostring(geom).decode())\n return geom_ogr.ExportToWkt()\n else:\n return 'WFS no works'\n\n# %%\n#for the case when data has to be downloaded externally initialization of requests session variable with setting of number of retries\ns = requests.Session()\nretries = Retry(total=5, backoff_factor=1, status_forcelist=[ 502, 503, 504 ])\ns.mount('http://', HTTPAdapter(max_retries=retries))\n\n# %%\nreplacement_dictionary = {\"[posledni_den_mesice]\":(datetime.datetime.today().replace(day=1)-datetime.timedelta(days=1)).strftime('%Y%m%d'),\"[lpis_cz__posledni_aktualizace]\":lpis_cz__posledni_aktualizace().strftime('%Y%m%d'), \"[vcera]\":(datetime.datetime.today().replace(day=1)-datetime.timedelta(days=1)).strftime('%Y%m%d')} \njson_feature_structure=[{\"name\":\"id\",\"type\":\"serial primary key\"},{\"name\":\"geom\",\"type\":\"geometry\"},{\"name\":\"data\",\"type\":\"json\"}]\njson_feature_with_bigid_structure=[{\"name\":\"id\",\"type\":\"bigint primary key\"},{\"name\":\"geom\",\"type\":\"geometry\"},{\"name\":\"data\",\"type\":\"json\"}]\njson_admin_unit_structure=[{\"name\":\"id\",\"type\":\"integer primary key\"},{\"name\":\"geom\",\"type\":\"geometry\"},{\"name\":\"data\",\"type\":\"json\"},{\"name\":\"level\",\"type\":\"integer\"},{\"name\":\"parent_id\",\"type\":\"text\"}]\njson_admin_unit_structure_at=[{\"name\":\"id\",\"type\":\"text primary key\"},{\"name\":\"geom\",\"type\":\"geometry\"},{\"name\":\"data\",\"type\":\"json\"},{\"name\":\"level\",\"type\":\"integer\"},{\"name\":\"parent_id\",\"type\":\"text\"}]\njson_feature_with_raster_structure=[{\"name\":\"id\",\"type\":\"serial primary key\"},{\"name\":\"geom\",\"type\":\"geometry\"},{\"name\":\"data\",\"type\":\"json\"},{\"name\":\"raster_maps\",\"type\":\"raster\"}]\n\n# %%\nadmunit_cz__metadata=MetaData('Administrative units in Czech Republic',\n {\"url\":\"https://vdp.cuzk.cz/vymenny_format/soucasna/[posledni_den_mesice]_ST_UKSG.xml.zip\",\n \"format\":\"GML\", \"compression\":\"zip\"},'data')\n\n# %%\nadmunit_cz__ds=ds_from_metadata(admunit_cz__metadata)\nadmunit_cz=GeoConcept('Administrative units in Czech Republic','Administrative units in Czech Republic. All levels.',\n 'AdmUnitFeature',json_admin_unit_structure, data_source=admunit_cz__ds, subgeoconcepts=[] )\n\n# %%\nurl_adresa=admunit_cz.get_data_source().get_attributes()['url']\nfor i in re.findall('\\[.*?\\]',url_adresa):\n if i in list(replacement_dictionary.keys()):\n url_adresa=url_adresa.replace(i,replacement_dictionary[i])\n \nadmunit_cz.get_data_source().set_attribute({'url':url_adresa})\ndel(url_adresa)\n\n# %%\n#admunit_cz.get_data_source().download_data('archive.zip',s,'all',os.getcwd())\nadmunit_cz.get_data_source().set_data_file('20201031_ST_UKSG.xml')\n\n# %%\nconcept_list=['Staty','Vusc','Okresy','Obce','KatastralniUzemi']\nconcept_additional_attributes={'Staty':{'level_value':0,'parent_value':'null','id_attribute':'Kod'},\n 'Vusc':{'level_value':1,'parent_value':'1','id_attribute':'Kod'},\n 'Okresy':{'level_value':2,'parent_attribute':'VuscKod','id_attribute':'Kod'},\n 'Obce':{'level_value':3,'parent_attribute':'OkresKod','id_attribute':'Kod'},\n 'KatastralniUzemi':{'level_value':4,'parent_attribute':'ObecKod','id_attribute':'Kod'}}\n\n# %%\nfor l in list(set(concept_list).intersection(set(admunit_cz.get_data_source().list_layers()))):\n admunit_cz.append_subgeoconcept(SubGeoConcept(l,l,'AdmUnitFeature',admunit_cz.get_attributes(),data_source=DataSource(admunit_cz.get_data_source().get_type(),admunit_cz.get_data_source().get_name(),({**admunit_cz.get_data_source().get_attributes(),**{'layer':l}}),None,admunit_cz.get_data_source().get_data_file()),supergeoconcept=admunit_cz,table_inheritance=False,type='semantic',subgeoconcepts=[]))\n\n# %%\n#administrative territorial units\ndbs_admin_connection={'dbname':'olu_administrative_units','user':'euxdat_admin','host':'euxdat-db-svc','port':'5432','password':'Euxdat12345'}\ndbs_admin=DBStorage(dbs_admin_connection)\ndbs_admin.connect()\ndbs_admin.disconnect()\ndbs_admin.connect()\n\n# %%\nadmunit_cz.create_table(dbs_admin, name='default',scheme='cz',conflict='append')\n\n# %%\nfor sub in admunit_cz.get_subgeoconcepts():\n sub.set_table(View(sub.get_name(),sub.get_attributes(), sub.get_supergeoconcept().get_table(),\"level=%s\" % (concept_additional_attributes[sub.get_name()]['level_value']), dbs=dbs_admin, scheme='public', type='usual'))\n dbs_admin.execute(sub.get_table().create_script())\n\n# %%\nwgs84_sr=osr.SpatialReference()\nwgs84_sr.ImportFromProj4('+proj=longlat +datum=WGS84 +no_defs')\n\nsjtsk5514_sr=osr.SpatialReference()\nsjtsk5514_sr.ImportFromProj4('+proj=krovak +lat_0=49.5 +lon_0=24.83333333333333 +alpha=30.28813975277778 +k=0.9999 +x_0=0 +y_0=0 +ellps=bessel +units=m +towgs84=570.8,85.7,462.8,4.998,1.587,5.261,3.56 +no_defs')\n\nsjtsk5514_to_wgs84=osr.CoordinateTransformation(sjtsk5514_sr,wgs84_sr)\n\n# %%\nsub=admunit_cz.get_subgeoconcept_by_name('Okresy')\n\n# %%\nwith open('okresy.geojson', 'w', encoding='utf-8') as file:\n geojson={\"type\": \"FeatureCollection\", \"features\": [] }\n features=sub.read_features_from_table(100)\n for f in features:\n if len(f)>0:\n for feature in f:\n feature.transform_geometry(sjtsk5514_to_wgs84)\n geojson[\"features\"].append(feature.export_to_geojson())\n else:\n break\n json.dump(geojson, file, ensure_ascii=False, indent=4)\n\n# %%\nwith open('okresy.geojson', 'r') as f:\n data = json.load(f)\n \nm = Map(center=(49.5,14.5), zoom=6)\n\ngeo_json = GeoJSON(\n data=data\n)\n \nm.add_layer(geo_json)\n\nm\n\n# %%\n","sub_path":"jupyter_examples/administrative_units_in_czech_republic.py","file_name":"administrative_units_in_czech_republic.py","file_ext":"py","file_size_in_byte":8256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"386046129","text":"def connect(url):\n import re\n import socket\n\n try:\n mysock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n mysock.connect((re.findall('http://(.+?)/',url)[0],80))\n mysock.send(b'GET '+url.encode()+b' HTTP/1.0\\n\\n')\n except:\n return('please enter valid url')\n\n data=bytes()\n while True:\n new=mysock.recv(10000)\n if len(new)<1: break\n data=data+new\n mysock.close()\n\n return(data)","sub_path":"socket connect.py","file_name":"socket connect.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"327492453","text":"\n\nimport yaml\nimport logging\n\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\nfrom query import QueryExecutor\n\nif __name__ == '__main__':\n config = yaml.safe_load(open('config.yml'))\n models_dir = config['models_dir']\n mongo_connection = config['mongo_connection']\n query_executor = QueryExecutor(mongo_connection, models_dir)\n doc2vec_similar(query_executor)\n\n\nX = model[model.wv.vocab]\npca = PCA(n_components=2)\nresult = pca.fit_transform(X)\n# create a scatter plot of the projection\npyplot.scatter(result[:, 0], result[:, 1])\nwords = list(model.wv.vocab)\nfor i, word in enumerate(words):\n\tpyplot.annotate(word, xy=(result[i, 0], result[i, 1]))\npyplot.show()","sub_path":"src/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"642983703","text":"data = [ord(x) for x in open('input.txt').readline().strip()]\ndata += [17, 31, 73, 47, 23]\nrope = list(range(256))\nposition = 0\nskip = 0\nfor _ in range(64):\n for rope_length in data:\n twisted = []\n for x in range(rope_length):\n twisted.append(rope[(position + x) % 256])\n twisted.reverse()\n for x in range(rope_length):\n rope[(position + x) % 256] = twisted[x]\n position += (rope_length + skip) % 256\n skip += 1\n\ndense_hash = []\nfor x in range(0, 256, 16):\n dense_num = rope[x]\n for y in range(15):\n dense_num = dense_num ^ rope[x + y + 1]\n dense_hash.append(dense_num)\nprint(''.join([format(x, '02x') for x in dense_hash]))\n","sub_path":"day10/day10pt2.py","file_name":"day10pt2.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"24249825","text":"# 2. На языке Python 3 разработать 2 программы (модули) для обработки одномерных массивов (векторов),\n# используя списки. Одна программа долж-на работать с целочисленным вектором, а вторая с вещественным вектором.\n\n# 1) Дан целочисленный вектор А(n). Подсчитать сколько раз встречается в этом векторе максимальное по величине число.\n# 2) Дан целочисленный вектор А(n). Найти наибольшее из четных и количество нечетных\n\ndef task1(vector):\n vector.sort()\n print(\"Максимальное значение\", vector[-1], \"встречается\", vector.count(vector[-1]), \"раз\")\n\n\ndef task2(vector):\n odd = []\n even = []\n for i in vector:\n if int(i) % 2 == 0:\n odd.append(int(i))\n else:\n even.append(int(i))\n try:\n max_odd = sorted(odd)[-1]\n except IndexError:\n max_odd = \"нет чётных\"\n print(\"Наибольшее из чётных - \", max_odd, \"Количество нечётных - \", len(even))\n\n\nif __name__ == '__main__':\n task1([32.23, 43.32, 41.43, 43.14, 43, 43.32, 12.43, 33.23, 12, 22])\n task2([32, 1, 4, 23, 13, 44, 12, 44, 21, 12, 44])\n task2([1, 1, 1, 1])\n","sub_path":"lab2_vectors.py","file_name":"lab2_vectors.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"448542342","text":"\nfrom django import template\nfrom django.utils.html import format_html\n\nregister = template.Library()\n\n\n@register.filter()\ndef check_color(elem):\n color = 'white'\n text = '-'\n if elem != '':\n text = float(elem)\n if text < 0:\n color = 'green'\n elif 1 < text < 2:\n color = 'LightSalmon'\n elif 2 <= text <= 5:\n color = 'Red'\n elif text > 5:\n color = 'DarkRed'\n return format_html(\n '{} | ',\n color,\n text,\n )\n\n","sub_path":"dynamic-templates/task1/app/templatetags/app_tags.py","file_name":"app_tags.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"385842532","text":"import os\nimport django\nimport time\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"unb_libraries_status.settings\")\ndjango.setup()\n\nfrom notifications.models import Event, Notification\nfrom test_cases.models import JenkinsBuild, TestCase\nimport sys, importlib\n\nbuild_number = int(sys.argv[1])\nbuild = JenkinsBuild(build_number)\nbuild.evaluate_build()\n\nfailed_tests = TestCase.get_failed_tests()\nnotification_config = importlib.import_module('notifications.config.notification_config')\n\ncurrent_time = int(time.strftime('%H%M'))\nexclude_tests = []\nfor mute in notification_config.MUTE_TESTS:\n if 'start' in mute and (current_time < mute['start'] or current_time > mute['end']):\n continue\n\n exclude_tests += mute['tests']\n\nfailed_tests = [item for item in failed_tests if item not in exclude_tests]\n\nevent = Event.get_latest_event(failed_tests)\n\nif (len(failed_tests) > 0):\n if (event.notification_required()):\n Notification.send_notifications(event)\nelse:\n if (event != None and event.is_open()):\n if (event.notification_required()):\n Notification.send_notifications(event)\n event.close()\n","sub_path":"cron.py","file_name":"cron.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"7676055","text":"# cE_simulation_tool.py\n'''\n本文件是写的界面与后台仿真线程\n'''\nfrom defination_read import *\nfrom UI.simulation_exe_Form import Ui_Form # pyUIC自动生成的pyqt5界面\nfrom members.CN_member import *\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets # 一些必要的qt包\nimport sys # 线程挂起恢复必要的包\nimport os # 必要的包\n\n\ndef format_members_id_role(xml_dom: xml.dom.minidom.Document):\n \"\"\"\n 格式化成员的id与角色,本函数仅用于获取仿真成员xml文件中的部分简单信息包含成员id,成员角色,各类成员的数量\n :param xml_dom: xml文件的dom对象\n :return: key为id与role的dict,成员总个数,primitive成员的数量,collective成员的数量,adviser成员的数量,monitor成员的数量\n \"\"\"\n root = xml_dom.documentElement # 获取dom对象的根\n # 初始化id_role字典\n id_role_dict = {\n 'id': list(),\n 'role': list()\n }\n member_info_labels = root.getElementsByTagName('memberInfo') # 获取memberInfo标签\n primitive_number = len(root.getElementsByTagName('primitiveInfo')) # 获取primitiveInfo标签\n collective_number = len(root.getElementsByTagName('collectiveInfo')) # 获取collectiveInfo标签\n adviser_number = len(root.getElementsByTagName('advisorInfo')) # 获取advisorInfo标签\n monitorMember_number = len(root.getElementsByTagName('monitorMemberInfo')) # 获取monitorMemberInfo标签\n for one_member_info_label in member_info_labels:\n id_role_dict['id'].append(one_member_info_label.getAttribute('成员ID'))\n id_role_dict['role'].append(one_member_info_label.getAttribute('成员类型'))\n return id_role_dict, len(\n member_info_labels), primitive_number, collective_number, adviser_number, monitorMember_number\n\n\nglobal_attribute_dict = dict() # 全局属性递推方法\nround_methods = dict() # 轮方法\n\nnet_p2p = pd.DataFrame() # 原子型成员网络\nnet_p2a = pd.DataFrame() # 原子型——建议者成员网络\nnet_p2m = pd.DataFrame() # 原子型——监控者成员网络\nnet_p2c = pd.DataFrame() # 原子型——集合型成员网络\nnet_c2m = pd.DataFrame() # 集合型——监控者成员网络\nnet_c2c = pd.DataFrame() # 集合型——集合型成员网络\n\nprimitives = None # 原子型单元集合\nadvisers = None # 建议者单元集合\nmonitorMembers = None # 监控者单元集合\ncollectives = None # 集合型单元集合\n\n\nclass uf_Form(QtWidgets.QWidget, Ui_Form):\n def __init__(self):\n super(uf_Form, self).__init__()\n self.setupUi(self)\n self.setFixedSize(self.width(), self.height())\n self.start_btn_icon = QtGui.QIcon(r'.\\UI\\播放按钮.ico')\n self.pause_btn_icon = QtGui.QIcon(r'.\\UI\\暂停.ico')\n stop_btn_icon = QtGui.QIcon(r'.\\UI\\停止.ico')\n self.start_or_pause_btn.setIcon(self.start_btn_icon)\n self.stop_btn.setIcon(stop_btn_icon)\n self.modify_dateEdit.setDate(QtCore.QDate.currentDate())\n self.members_filedialog_btn.clicked.connect(self.slot_btn_set_members_path)\n self.def_xml_filedialog_btn.clicked.connect(self.slot_btn_set_definition_path)\n self.record_filedialog_btn.clicked.connect(self.slot_btn_set_record_path)\n\n def slot_btn_set_definition_path(self):\n '''\n 读取仿真定义文件路径\n :return:\n '''\n try:\n xml_definition_path = QtWidgets.QFileDialog.getOpenFileName(self, \"选择仿真定义xml文件\", \"./\",\n \"XML Files (*.xml);;All Files (*)\")\n self.def_xml_path_edit.setText(xml_definition_path[0])\n definition_dom = read_xml(xml_definition_path[0])\n # 注册全局函数成员递推函数\n global global_attribute_dict, round_methods\n global_attribute_dict = register_global_attribute_method(definition_dom)\n round_methods = register_round_method(definition_dom)\n self.service_msg_log_text.append('Read definition from: ')\n self.service_msg_log_text.append(xml_definition_path[0])\n except:\n QtWidgets.QMessageBox.critical(self, \"错误\", \"仿真定义文件错误\")\n self.def_xml_path_edit.clear()\n self.service_msg_log_text.append('definition file error. ')\n raise\n\n def slot_btn_set_members_path(self):\n '''\n 读取所有成员的路径的槽函数\n :return:\n '''\n try:\n xml_members_path = QtWidgets.QFileDialog.getOpenFileName(self, \"选择仿真成员xml文件\", \"./\",\n \"XML Files (*.xml);;All Files (*)\")\n self.members_xml_path_edit.setText(xml_members_path[0])\n # 根据这个路径来读取成员\n member_dom = read_xml(xml_members_path[0])\n # 服务信息更新显示\n self.service_msg_log_text.append('Read member from: ')\n self.service_msg_log_text.append(xml_members_path[0])\n # 解析成员\n global net_p2p, net_p2a, net_p2m, net_p2c, net_c2m, net_c2c\n net_p2p, net_p2a, net_p2m, net_p2c, net_c2m, net_c2c = net_work_read(member_dom)\n global primitives, advisers, monitorMembers, collectives\n primitives, advisers, monitorMembers, collectives = member_read(member_dom)\n id_role_dict, member_number, p_number, c_number, a_number, m_number = format_members_id_role(member_dom)\n self.reset_member_tableWidget(member_number, id_role_dict)\n self.primitive_num_edit.setText(str(p_number))\n self.adviser_num_edit.setText(str(a_number))\n self.monitor_num_edit.setText(str(m_number))\n self.collective_num_edit.setText(str(c_number))\n self.service_msg_log_text.append(\"Primitive:{}|Adviser:{}|Monitor:{}|Collective:{}\".format(\n p_number, a_number, m_number, c_number\n ))\n except:\n QtWidgets.QMessageBox.critical(self, \"错误\", \"成员生成文件错误\")\n self.members_xml_path_edit.clear()\n self.service_msg_log_text.append('member XML file error. ')\n raise\n\n def reset_member_tableWidget(self, length, id_role_dict):\n self.member_tableWidget.setRowCount(length)\n item = self.member_tableWidget.horizontalHeaderItem(0)\n item.setText(QtCore.QCoreApplication.translate(\"Form\", \"Member\"))\n item = self.member_tableWidget.horizontalHeaderItem(1)\n item.setText(QtCore.QCoreApplication.translate(\"Form\", \"Role\"))\n self.member_tableWidget.setHorizontalHeaderItem(1, item)\n temp_arrow = 0\n for id, role in zip(id_role_dict['id'], id_role_dict['role']):\n id_item, role_item = QtWidgets.QTableWidgetItem(), QtWidgets.QTableWidgetItem()\n id_item.setText(QtCore.QCoreApplication.translate(\"Form\", id))\n role_item.setText(QtCore.QCoreApplication.translate(\"Form\", role))\n self.member_tableWidget.setItem(temp_arrow, 0, id_item)\n self.member_tableWidget.setItem(temp_arrow, 1, role_item)\n temp_arrow += 1\n\n def slot_btn_set_record_path(self):\n '''\n 设置仿真结果路径保存的槽函数\n :return:\n '''\n dir_record_path = QtWidgets.QFileDialog.getExistingDirectory(self, \"选择仿真记录文件夹\", os.getcwd())\n if dir_record_path == \"\":\n print('取消选择')\n return\n else:\n self.record_dir_path_edit.setText(dir_record_path)\n self.service_msg_log_text.append('Set record dictionary to: ')\n self.service_msg_log_text.append(dir_record_path)\n\n def start_check(self):\n if self.members_xml_path_edit.text() is \"\" or \\\n self.def_xml_path_edit.text() is \"\" or \\\n self.record_dir_path_edit.text() is \"\" or \\\n self.version_edit.text() is \"\" or\\\n self.generation_Edit.text() is \"\" or\\\n self.step_size_Edit.text() is \"\":\n return False\n else:\n return True\n\n\nif __name__ == '__main__':\n all_change_time = 2\n for chance_time in range(all_change_time):\n extend_path = input('请输入外部函数文件所在路径:')\n sys.path.append(extend_path)\n try:\n from aa__ import *\n\n print('外部函数包引用成功')\n break\n except:\n print('外部函数文件路径有误,您还有{}次机会'.format(all_change_time - chance_time - 1))\n if chance_time < all_change_time - 1:\n continue\n else:\n input('错误次数已达上限,请按回车键退出')\n exit(0)\n print(globals()['globalAttribute1'])\n print(globals()['A1']('new init', 1, 3))\n\n app = QtWidgets.QApplication(sys.argv)\n window = uf_Form()\n window.setWindowTitle('众智网络仿真执行工具软件')\n window.show()\n\n sys.exit(app.exec_())\n","sub_path":"simulation_tool.py","file_name":"simulation_tool.py","file_ext":"py","file_size_in_byte":9118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"399480425","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 10 11:04:42 2018\n\n@author: Samuele Garda\n\"\"\"\n\ndef evaluate(eval_data,subs,simplifier,topn):\n \n#Initialize variables: \n potentialc = 0 \n potentialt = 0 \n precisionc = 0 \n precisiont = 0 \n recallt = 0\n \n eval_data = load_eval_data(eval_data)\n \n for idx,test_case in eval_data.iterrows():\n \n target = test_case['c_w']\n candidates = set(test_case['sub'])\n substitutions = simplifier.get_candidates(target, topn = topn)\n if target in substitutions: \n overlap = candidates.intersection(set(substitutions[target])) \n precisionc += len(overlap) \n if len(overlap)>0:\n potentialc += 1 \n precisiont += len(substitutions[target]) \n potentialt += 1 \n recallt += len(candidates) \n \n potential = float(potentialc)/float(potentialt) \n precision = float(precisionc)/float(precisiont) \n recall = float(precisionc)/float(recallt) \n fmean = 0.0 \n if precision==0.0 and recall==0.0: \n fmean = 0.0 \n else: \n fmean = 2*(precision*recall)/(precision+recall) \n \n #Return measures: \n return potential, precision, recall, fmean \n ","sub_path":"evaluation/test_eval_sub.py","file_name":"test_eval_sub.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"332148448","text":"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport const\nimport numpy as np\nimport math\nimport tensorflow as tf\n\n\nclass Cbow(object):\n def __init__(self, corpus):\n self.corpus = corpus\n\n def test(self, word, k=10):\n Weight = tf.Variable(tf.random_normal([self.corpus.n_words, const.EMBEDDING_SIZE], -1.0, 1.0))\n inputs = tf.placeholder(tf.int32, [None])\n embed = tf.nn.embedding_lookup(Weight, inputs)\n\n # cosine\n test_embed = tf.placeholder(tf.float32, [None])\n test_input = tf.placeholder(tf.float32, [None])\n normed_embed = tf.nn.l2_normalize(test_embed, dim=0)\n normed_array = tf.nn.l2_normalize(test_input, dim=0)\n cosine_similarity = tf.reduce_sum(tf.multiply(normed_array, normed_embed))\n\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n # restore model\n tf.train.Saver().restore(sess, const.MODEL_PATH)\n\n vectors = sess.run(embed, feed_dict={inputs: range(self.corpus.n_words)})\n vocab = self.corpus.vocab\n idx = self.corpus.var_word(word)\n scores = []\n for i in range(len(vocab)):\n if vocab[i] == word or vocab[i] == const.U_TOKEN:\n continue\n vec_a = vectors[i].reshape([-1])\n vec_b = vectors[idx].reshape([-1])\n cosine_sim = sess.run(cosine_similarity, feed_dict={test_embed: vec_a, test_input: vec_b})\n scores.append([vocab[i], cosine_sim]) # calculates cosine similarity\n return sorted(scores, key=lambda x: x[1], reverse=True)[:k]\n\n def train(self):\n Weight = tf.Variable(tf.truncated_normal([self.corpus.n_words, const.EMBEDDING_SIZE],\n stddev=1.0 / math.sqrt(const.EMBEDDING_SIZE)))\n bias = tf.Variable(tf.zeros([self.corpus.n_words]))\n\n inputs = tf.placeholder(tf.int32, [const.BATCH_SIZE, const.WIN_SIZE])\n outputs = tf.placeholder(tf.int32, [const.BATCH_SIZE, 1])\n embed = tf.nn.embedding_lookup(tf.random_normal([self.corpus.n_words, const.EMBEDDING_SIZE], -1.0, 1.0), inputs)\n\n embed_sum = tf.reduce_sum(embed, 1)\n loss = tf.reduce_mean(\n tf.nn.sampled_softmax_loss(Weight, bias, outputs, embed_sum, 3, self.corpus.n_words)) # negative sampling\n optimizer = tf.train.AdamOptimizer(learning_rate=const.LR_RATE).minimize(loss)\n\n saver = tf.train.Saver()\n\n losses = []\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n\n for epoch in range(const.EPOCH):\n inps, targets = self.corpus.batch_data()\n _, _loss = sess.run([optimizer, loss], feed_dict={inputs: inps, outputs: targets})\n\n losses.append(_loss)\n if epoch % 100 == 0:\n print('epoch, ', epoch, 'mean loss', np.mean(losses))\n losses = []\n\n # save model\n saver.save(sess, const.MODEL_PATH)\n","sub_path":"nlp/embedding/word2vec/tf/cbow_neg.py","file_name":"cbow_neg.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"215990591","text":"def checkio(number):\n\n num = str(number)\n result = None\n for i in range(len(num)):\n if int(num[i]) == 0:\n continue\n if i == 0:\n result = int(num[i])\n else:\n result *= int(num[i])\n\n return result\n\n#These \"asserts\" using only for self-checking and not necessary for auto-testing\nif __name__ == '__main__':\n assert checkio(123405) == 120\n assert checkio(999) == 729\n assert checkio(1000) == 1\n assert checkio(1111) == 1\n","sub_path":"checkio/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"9216504","text":"# Copyright (c) 2019 The University of Manchester\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom spinn_utilities.overrides import overrides\nfrom pacman.exceptions import (\n PacmanConfigurationException, PartitionMissingEdgesException)\nfrom pacman.model.graphs import AbstractMultiplePartition\nfrom pacman.model.graphs.machine import (\n AbstractSDRAMPartition, SDRAMMachineEdge)\n\n\nclass SourceSegmentedSDRAMMachinePartition(\n AbstractMultiplePartition, AbstractSDRAMPartition):\n \"\"\"\n An SDRAM partition that gives each edge its own slice of memory from a\n contiguous block. The edges all have the same destination vertex.\n \"\"\"\n __slots__ = [\n \"_sdram_base_address\",\n ]\n\n def __init__(self, identifier, pre_vertices):\n \"\"\"\n :param str identifier: The identifier of the partition\n :param str label: A label of the partition\n :param iterable(~pacman.model.graphs.AbstractVertex) pre_vertices:\n The vertices that an edge in this partition may originate at\n \"\"\"\n super().__init__(\n pre_vertices, identifier, allowed_edge_types=SDRAMMachineEdge)\n self._sdram_base_address = None\n\n def total_sdram_requirements(self):\n \"\"\"\n :rtype: int\n \"\"\"\n return sum(edge.sdram_size for edge in self.edges)\n\n @property\n def sdram_base_address(self):\n \"\"\"\n :rtype: int\n \"\"\"\n return self._sdram_base_address\n\n @overrides(AbstractMultiplePartition.add_edge)\n def add_edge(self, edge):\n # check\n if len(self._destinations):\n if edge.post_vertex not in self._destinations:\n raise PacmanConfigurationException(\n f\"The {self.__class__.__name__} can only support \"\n \"1 destination vertex\")\n try:\n if len(self._pre_vertices[edge.pre_vertex]) != 0:\n raise PacmanConfigurationException(\n f\"The {self.__class__.__name__} only supports 1 edge from \"\n \"a given pre vertex.\")\n except KeyError as ex:\n raise PacmanConfigurationException(\n \"Edge pre_vertex is not a Partition. pre vertex\") from ex\n # add\n super().add_edge(edge)\n\n @sdram_base_address.setter\n def sdram_base_address(self, new_value):\n if len(self.pre_vertices) != len(self.edges):\n raise PartitionMissingEdgesException(\n f\"There are {len(self.pre_vertices)} pre vertices \"\n f\"but only {len(self.edges)} edges\")\n\n self._sdram_base_address = new_value\n\n for pre_vertex in self._pre_vertices.keys():\n # allocate for the pre_vertex\n edge = self._pre_vertices[pre_vertex].peek()\n edge.sdram_base_address = new_value\n new_value += edge.sdram_size\n\n @overrides(AbstractSDRAMPartition.get_sdram_base_address_for)\n def get_sdram_base_address_for(self, vertex):\n if self._sdram_base_address is None:\n return None\n if vertex in self._pre_vertices:\n edge = self._pre_vertices[vertex].peek()\n return edge.sdram_base_address\n else:\n return self._sdram_base_address\n\n @overrides(AbstractSDRAMPartition.get_sdram_size_of_region_for)\n def get_sdram_size_of_region_for(self, vertex):\n if vertex in self._pre_vertices:\n edge = self._pre_vertices[vertex].peek()\n return edge.sdram_size\n else:\n return self.total_sdram_requirements()\n","sub_path":"pacman/model/graphs/machine/source_segmented_sdram_machine_partition.py","file_name":"source_segmented_sdram_machine_partition.py","file_ext":"py","file_size_in_byte":4060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"562056987","text":"from app.main import db\nfrom app.main.models.objective_questions import ObjectiveQuestions\nfrom app.main.models.submission_objective import SubmissionObjective\nfrom flask import jsonify\n\ndef take_test(data):\n \"\"\"[summary]\n \n Returns:\n [type]: [description]\n \"\"\"\n student_id = int(data[\"student_id\"])\n quiz_test_id = data[\"test_id\"]\n query = db.session.query(ObjectiveQuestions).filter_by(quiz_test_id=quiz_test_id)\n items = []\n for i in query:\n items.append({\"student_id\":student_id,\"quiz_test_id\":i.quiz_test_id,\"question_id\":i.question_id,\"marks\":i.marks})\n for i in range(len(items)):\n new_submission = SubmissionObjective(\n student_id = items[i]['student_id'],\n question_id = items[i]['question_id'],\n quiz_test_id = items[i]['quiz_test_id'],\n marks = items[i]['marks']\n )\n db.session.add(new_submission)\n db.session.commit()\n response_object = jsonify({\"response\": \"successfully added\"})\n return response_object, 200","sub_path":"src/server/app/main/services/take_test.py","file_name":"take_test.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"435824717","text":"import random\nimport turtle as t\nfrom turtle import Turtle\n\ndef turn_right(): #오른쪽 화살키\n t.setheading(0)\n t.forward(10)\n\ndef turn_up(): #위쪽 화살키\n t.setheading(90)\n t.forward(10)\n\ndef turn_left(): #왼쪽 화살키\n t.setheading(180)\n t.forward(10)\n\ndef turn_down(): #아래쪽 화살키\n t.setheading(270)\n t.forward(10)\n\ndef play():\n t.forward(10)\n te.forward(9)\n #적거북이가 주인공을 쫓아감\n ang=te.towards(t.pos())\n te.setheading(ang)\n #주인공 거북이가 먹이에 닿으면 먹이가 랜덤하게 이동\n if t.distance(tf) < 12:\n x = random.randint(-230,230)\n y = random.randint(-230,230)\n tf.goto(x,y)\n if t.distance(te) < 12:\n t.ontimer(play, 100) #0.1초\n\n#메인영역\nt.setup(500,500) #너비,높이\nt.title(\"달려라 거북이\")\nt.speed(0)\nt.up()\nt.color('white')\nt.bgcolor('black')\nt.shape('turtle')\n\n#적 거북이\nte = t.Turtle() #Turtle() 클래스에서 te인스턴스 생성\nte.shape('turtle')\nte.color('yellow')\nte.speed(0)\nte.up()\nte.goto(0,200)\n\n#먹이\ntf = t.Turtle()\ntf.shape('circle')\ntf.color('blue')\ntf.shapesize(0.7)\ntf.up()\ntf.goto(0,-200)\n\nt.onkeypress(turn_right,\"Right\")\nt.onkeypress(turn_left,\"Left\")\nt.onkeypress(turn_up,\"Up\")\nt.onkeypress(turn_down,\"Down\")\nt.listen() #키보드의 동작을 기다림\nplay()\n\nt.mainloop()","sub_path":"run_turtle/run_turtle.py","file_name":"run_turtle.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"516437460","text":"\"\"\"============================================================================\nPART 05. curved_lanes.py\n============================================================================\"\"\"\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\n\n\"\"\"============================================================================\nPROCEDURE:\n find_curvature\nPARAMETERS:\n img, a perspective-transformed binary image\nPURPOSE:\n calculate best-fit polynomials for left and right lanes as detected from\n the binary image given\nPRODUCES:\n fit_polynomial, a tuple holding polynomials for left and right lanes as\n well as 0 or 1 to indicate whether window center points were used.\n e.g. when center points were used : (ctr_left_fit, ctr_right_fit, 1)\n when nonzero pixels were used: (left_fit, right_fit, 0)\n============================================================================\"\"\"\ndef find_curvature(img):\n\n # Histogram can help us determine the intensity of white pixels(255)\n # - As pixel values are summed up vertically, regions where lanes lie \n # will have significantly higher peaks (x-axis:width, y-axis:sum)\n # - Mostly two peaks; one on the left and one the right\n # - Summing up values in the lower half of the image\n histogram = np.sum(img[img.shape[0]//2:,:], axis=0)\n\n # Create an output image for result visualization\n # img.shape = (670, 1280); out_img.shape = (670, 1280, 3)\n # out_img = np.dstack((img, img, img))*255\n\n # Calculate the midpoint of width of histogram\n midpoint = np.int(histogram.shape[0] // 2)\n\n # Calculate the max on the left and right side of the midpoint\n # - this value will be the x value of the peak point\n left_peak_x = np.argmax(histogram[:midpoint])\n right_peak_x = np.argmax(histogram[midpoint:]) + midpoint\n\n # - distance between them should be at least 700 and at most 850 pixels\n # to ensure that they are lanes (if not, set arbitrary x value)\n if ((right_peak_x - left_peak_x) < 700 or \n (right_peak_x - left_peak_x) > 850):\n left_peak_x = 275\n right_peak_x = 1100\n print(\"Lane Distance reconfigured.\")\n\n # Set the number of sliding windows\n window_num = 9\n\n # Set the window height\n window_height = np.int(img.shape[0] // window_num)\n\n # Identify x and y coordinates of all nonzero pixels in the image\n nonzero = img.nonzero() # nonozero = ((array), (array))\n nonzero_x = np.array(nonzero[1]) # x coordinates of nonzero pixels\n nonzero_y = np.array(nonzero[0]) # y coordinates of nonzero pixels\n\n # Current positions to be updated for each sliding window\n # First begin at the x value of the histogram peak point\n left_x_current = left_peak_x\n right_x_current = right_peak_x\n\n # Set the width of the windows +/- margin\n margin = 60\n\n # Set minimum number of pixels found to recenter window\n min_pix = 40\n\n # Count the number of times windows have been recentered\n num_win_moved_left = 0\n num_win_moved_right = 0\n\n # Create empty lists to receive left and right lane pixel indices\n left_lane_index = []\n right_lane_index = []\n\n # Create empty ndarray to store window center points\n win_ctr_x_left = np.array([]) # x val of center point on left\n win_ctr_x_right = np.array([]) # x val of center point on right\n win_ctr_y = np.array([]) # y val of center point (same for both)\n\n # For each sliding window\n for window in range(window_num):\n\n # Identify window boundaries in x and y (for right and left side)\n win_y_low = img.shape[0] - (window + 1) * window_height\n win_y_high = img.shape[0] - window * window_height\n \n win_x_left_low = left_x_current - margin\n win_x_left_high = left_x_current + margin\n \n win_x_right_low = right_x_current - margin\n win_x_right_high = right_x_current + margin\n\n # Draw the windows on the visualization image\n # cv2.rectangle(out_img, (win_x_left_low, win_y_low),\n # (win_x_left_high, win_y_high),\n # (0,255,0), 2)\n\n # cv2.rectangle(out_img, (win_x_right_low, win_y_low), \n # (win_x_right_high, win_y_high),\n # (0,255,0), 2) \n\n # (win_x_left_low, win_y_low) \n # OR\n # (win_x_right_low, win_y_low)\n # o --------\n # | |\n # -------- o \n # (win_x_left_high, win_y_high)\n # OR\n # (win_x_right_high, win_y_high)\n\n # Identify indices of the nonzero pixels within the current window\n good_left_inds = ((nonzero_y >= win_y_low) &\n (nonzero_y < win_y_high) & \n (nonzero_x >= win_x_left_low) & \n (nonzero_x < win_x_left_high)).nonzero()[0]\n \n good_right_inds = ((nonzero_y >= win_y_low) & \n (nonzero_y < win_y_high) & \n (nonzero_x >= win_x_right_low) & \n (nonzero_x < win_x_right_high)).nonzero()[0]\n\n # Add these indices to the lists\n left_lane_index.append(good_left_inds)\n right_lane_index.append(good_right_inds)\n\n # If exceeds min_pix, recenter next window on their mean x position\n # - nonzero_x[good_left_inds] = all x values of nonzero pixels \n # in the current window on the left\n if len(good_left_inds) > min_pix:\n left_x_current = np.int(np.mean(nonzero_x[good_left_inds]))\n num_win_moved_left += 1\n\n if len(good_right_inds) > min_pix: \n right_x_current = np.int(np.mean(nonzero_x[good_right_inds]))\n num_win_moved_right += 1\n \n # Append the center points to the existing array\n win_ctr_x_left = np.append( win_ctr_x_left,\n (win_x_left_high + win_x_left_low)//2 ) \n \n win_ctr_x_right = np.append( win_ctr_x_right,\n (win_x_right_high + win_x_right_low)//2 )\n\n win_ctr_y = np.append( win_ctr_y,\n (win_y_high + win_y_low)//2 ) \n\n # Concatenate the arrays of indices into one large array\n left_lane_index = np.concatenate(left_lane_index)\n right_lane_index = np.concatenate(right_lane_index)\n\n # All the xy-coordinates of nonzero pixels within all windows\n # - these points will later be colored red and blue\n left_nz_x = nonzero_x[left_lane_index]\n left_nz_y = nonzero_y[left_lane_index] \n\n right_nz_x = nonzero_x[right_lane_index]\n right_nz_y = nonzero_y[right_lane_index]\n \n # =================================================\n # This part is specifically for cases when the number of pixels within \n # windows are not high enough to accurately produce a best fit.\n # In other words, if the majority of windows do not recenter, which\n # can be determined by num_win_moved_right/left, this suggests that \n # there are not enough significant pixels around to determine the \n # general direction of a lane. Hence, I decided that it is safer to\n # rely on the best-fit line based on center points of all windows.\n\n # Compute a second-order polynomial for best-fit line \n # through the window center points\n if num_win_moved_left <= 4 or num_win_moved_right <= 4:\n ctr_left_fit = np.polyfit(win_ctr_y, win_ctr_x_left, 2)\n ctr_right_fit = np.polyfit(win_ctr_y, win_ctr_x_right, 2)\n \n # Generate x and y values for plotting\n # - an array = [0, 1, 2, ..., 669]\n # plot_y2 = np.linspace(0, img.shape[0]-1, img.shape[0])\n \n # - all x coordinates of the best-fit line calculated above\n # left_fit_x2 = (ctr_left_fit[0] * plot_y2**2 + \n # ctr_left_fit[1] * plot_y2 + \n # ctr_left_fit[2])\n\n # right_fit_x2 = (ctr_right_fit[0] * plot_y2**2 + \n # ctr_right_fit[1] * plot_y2 + \n # ctr_right_fit[2])\n\n # Visualize all nonzero pixels outside windows\n # out_img[nonzero_y, nonzero_x] = [255, 255, 255] # white : others\n \n # Visualize all nonzero pixels inside windows\n # out_img[left_nz_y, left_nz_x] = [255, 0, 0] # red: left\n # out_img[right_nz_y, right_nz_x] = [0, 0, 255] # blue: right\n\n # plt.imshow(out_img)\n \n # Plot the best-fit line for left and right lane\n # plt.plot(left_fit_x2, plot_y2, color='magenta')\n # plt.plot(right_fit_x2, plot_y2, color='magenta')\n\n # Set x and y axis boundaries\n # plt.xlim(0, img.shape[1])\n # plt.ylim(img.shape[0], 0)\n\n # plt.show()\n\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n fit_polynomial = (ctr_left_fit, ctr_right_fit, 1)\n print(\"best-fit performed using center-points.\")\n\n return fit_polynomial\n\n # =================================================\n # Otherwise, \n else:\n # Compute a second-order polynomial for best-fit line \n # through the nonzero pixels found above \n left_fit = np.polyfit(left_nz_y, left_nz_x, 2)\n right_fit = np.polyfit(right_nz_y, right_nz_x, 2)\n\n # Generate x and y values for plotting\n # - an array = [0, 1, 2, ..., 669]\n # plot_y = np.linspace(0, img.shape[0]-1, img.shape[0]) \n\n # - all x coordinates of the best-fit line calculated above\n # left_fit_x = ( left_fit[0] * plot_y**2 + \n # left_fit[1] * plot_y + \n # left_fit[2])\n # right_fit_x = ( right_fit[0]* plot_y**2 + \n # right_fit[1]* plot_y + \n # right_fit[2])\n\n # Visualize all nonzero pixels outside windows\n # out_img[nonzero_y, nonzero_x] = [255, 255, 255] # white : others\n\n # Visualize all nonzero pixels inside windows\n # out_img[left_nz_y, left_nz_x] = [255, 0, 0] # red: left\n # out_img[right_nz_y, right_nz_x] = [0, 0, 255] # blue: right\n\n # plt.imshow(out_img)\n\n # Plot the best-fit line for left and right lane\n # plt.plot(left_fit_x, plot_y, color='yellow')\n # plt.plot(right_fit_x, plot_y, color='yellow')\n\n # Set x and y axis boundaries\n # plt.xlim(0, img.shape[1])\n # plt.ylim(img.shape[0], 0)\n\n # plt.show()\n\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n fit_polynomial = (left_fit, right_fit, 0)\n\n return fit_polynomial\n\n\"\"\"============================================================================\nPROCEDURE:\n find_curved_lanes\nPARAMETERS:\n img, \n polynomial,\nPURPOSE:\n uses the best-fit polynomial from find_curvature() to find another \n best-fit line based on nonzero pixels within margins and generate plot data\n points to draw the curved lanes\nPRODUCES:\n - plot_data, a tuple that contains plotting points for y values, left-fit \n x values, and right-fit x values. (ploty, left_fitx, right_fitx)\n - fit_data, a tuple that contains polynomial information for left and right\n best-fit lines. (left_fit, right_fit)\n============================================================================\"\"\"\ndef find_curved_lanes(img, polynomial):\n\n # Set the lane margin\n margin = 100\n\n # Identify x and y coordinates of all nonzero pixels in the image\n nonzero = img.nonzero() # nonozero = ((array), (array))\n nonzero_x = np.array(nonzero[1]) # x coordinates of nonzero pixels\n nonzero_y = np.array(nonzero[0]) # y coordinates of nonzero pixels\n\n # Attain best-fit polynomial information from the given parameter\n left_fit, right_fit, used_cp = polynomial\n\n # Determine whether the nonzero pixels lie within the lane margin\n # - array of booleans\n left_lane_inds = ((nonzero_x > (left_fit[0] * (nonzero_y**2) + \n left_fit[1] * nonzero_y + \n left_fit[2] - margin)) & \n (nonzero_x < (left_fit[0] * (nonzero_y**2) + \n left_fit[1] * nonzero_y + \n left_fit[2] + margin)) ) \n\n right_lane_inds = ((nonzero_x > (right_fit[0] * (nonzero_y**2) + \n right_fit[1] * nonzero_y + \n right_fit[2] - margin)) & \n (nonzero_x < (right_fit[0] * (nonzero_y**2) + \n right_fit[1] * nonzero_y + \n right_fit[2] + margin)) ) \n\n # All the xy-coordinates of nonzero pixels within the lane margins\n left_nz_x = nonzero_x[left_lane_inds]\n left_nz_y = nonzero_y[left_lane_inds] \n\n right_nz_x = nonzero_x[right_lane_inds]\n right_nz_y = nonzero_y[right_lane_inds]\n\n # If left/right_fit did not use center points in find_curvature()\n # (ie. used nonzero pixels), then find best-fit based on nonzero pixels\n # within the margins specified above \n if not used_cp:\n\n # Compute a second-order polynomial for best-fit line \n # through the nonzero pixels found within the margins \n left_fit = np.polyfit(left_nz_y, left_nz_x, 2)\n right_fit = np.polyfit(right_nz_y, right_nz_x, 2)\n \n # Otherwise, use the left/right_fit directly given from the param\n # - this is the best-fit based on window center points\n\n # Generate x and y values for plotting\n # - an array = [0, 1, 2, ..., 669]\n ploty = np.linspace(0, img.shape[0]-1, img.shape[0])\n\n # - all x coordinates of the best-fit line calculated above\n left_fitx = left_fit[0] * ploty**2 + left_fit[1] * ploty + left_fit[2]\n right_fitx = right_fit[0] * ploty**2 + right_fit[1] * ploty + right_fit[2]\n\n # Create a blank image\n # window_img = np.zeros_like(out_img)\n\n # Create two lines to bound the highlighted region\n # then change the x and y points into a valid format for fillPoly()\n\n # np.vstack() --> [ [x0, x1, x2, ..., xn] | left_fitx - margin, \n # [y0, y1, y2, ..., yn] ] | ploty\n # np.transpose() --> [ [x0, y0], | reverses axis\n # [x1, y1],\n # [x2, y2],\n # ... ...\n # [xn, yn] ]\n # np.flipud() --> [ [xn, yn], | reverses order\n # ... ... \n # [x2, y2],\n # [x1, y1],\n # [x0, y0] ]\n\n # left_line_pts after np.hstack()\n # [ [x0, y0], | left_line_window1 \n # [x1, y1], \n # [x2, y2], \n # ... ... \n # [xn, yn], \n # [xn, yn], | left_line_window2 \n # ... ... \n # [x2, y2],\n # [x1, y1],\n # [x0, y0] ] | valid format for cv2.fillPoly()\n\n # left_line_window1 = np.array([np.transpose(np.vstack([left_fitx - margin, \n # ploty] ))])\n\n # left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx +\n # margin, \n # ploty])))])\n\n # left_line_pts = np.hstack((left_line_window1, left_line_window2))\n\n # right_line_window1 = np.array([np.transpose(np.vstack([right_fitx - margin,\n # ploty] ))])\n\n # right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx + \n # margin, \n # ploty])))])\n\n # right_line_pts = np.hstack((right_line_window1, right_line_window2))\n\n # Draw the lane onto the warped blank image\n # cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))\n # cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))\n\n # Blend the highlighted margin window to original image\n # result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)\n\n # plt.imshow(result)\n\n # plt.plot(left_fitx, ploty, color='yellow')\n # plt.plot(right_fitx, ploty, color='yellow')\n\n # Set x and y axis boundaries\n # plt.xlim(0, img.shape[1])\n # plt.ylim(img.shape[0], 0)\n\n # plt.show()\n\n plot_data = (ploty, left_fitx, right_fitx)\n fit_data = (left_fit, right_fit)\n\n return plot_data, fit_data\n\n# Note to self:\n# - find_curvature() displays yellow best-fit line based on all nonzero pixels\n# that lie within the windows.\n# - on the other hand, find_curved_lane() utilizes best-fit line based on\n# all nonzero pixels that lie within the margin=100 (wider than window)\n# - hence, the two best-fit lines are indeed different\n# - the difference between these two lines, however, seems indistinguishable \n# as both of them go in very similar (if not the same) direction.\n\n\"\"\"============================================================================\nPROCEDURE:\n highlight_lane\nPARAMETERS:\n src_img, a source image\n warped_img, a warped binary image\n mat_inv, an inverse transformation matrix calculated from transform_lane()\n plot_data, plotting points calculated from find_curved_lanes()\nPURPOSE:\n to draw and display the detected lanes on the actual source image\nPRODUCES:\n result, an image with detected lanes highlighted in green\n============================================================================\"\"\"\ndef highlight_lane(src_img, warped_img, mat_inv, plot_data):\n\n # Extract plotting data calculcated from find_curved_lanes()\n ploty, left_fitx, right_fitx = plot_data\n\n # Create an color image to show visualization\n warp_blank = np.zeros_like(warped_img).astype(np.uint8)\n warp_color = np.dstack((warp_blank, warp_blank, warp_blank))\n\n # Create two lines to bound the highlighted region\n # then change the x and y points into a valid format for fillPoly()\n # - same method as explained above in find_curved_lanes()\n left_line = np.array([np.transpose(np.vstack([left_fitx, \n ploty]))])\n\n right_line = np.array([np.flipud(np.transpose(np.vstack([right_fitx, \n ploty])))])\n\n lane_area = np.hstack((left_line, right_line))\n\n # Highlight the lane onto the blank warped image\n cv2.fillPoly(warp_color, np.int_([lane_area]), (0, 255, 0))\n\n # Unwarp the highlighted lane image to original image space with the\n # given inverse transformation matrix \n new_warp = cv2.warpPerspective(warp_color, mat_inv, (src_img.shape[1], \n src_img.shape[0]))\n\n # Combine the result with the original image\n result = cv2.addWeighted(src_img, 1, new_warp, 0.3, 0)\n\n # cv2.imshow(\"result\", result)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n return result\n\n\"\"\"============================================================================\n MAIN\n============================================================================\"\"\"\n# def main():\n \n# # Read image\n# img = cv2.imread(\"./ch00/lane_detection/persp_img/test9.jpg\")\n\n# # Check for any errors loading images\n# if img is None:\n# print(\"Error: Failed to load image.\")\n# sys.exit()\n\n# # Warp the lanes and binarize using the combined threshold created before\n# warped_lane, _, pers_inv = lp.transform_lane(img)\n# warped_lane_bi = lt.combined_threshold(warped_lane)\n\n# # Show warped image with gradient thresholds\n# cv2.namedWindow(\"warped_lane_bi\")\n# cv2.imshow(\"warped_lane_bi\", warped_lane_bi)\n\n# # Find curvature information from the warped image\n# fit_polynomial, _ = find_curvature(warped_lane_bi)\n\n# # Calculate and display the curved lanes on the warped image\n# plot_data = find_curved_lanes(warped_lane_bi, fit_polynomial)\n \n# highlight_lane(img, warped_lane_bi, pers_inv, plot_data)\n\n\n# if __name__ == '__main__':\n# main()","sub_path":"Part6_Radius/curved_lanes.py","file_name":"curved_lanes.py","file_ext":"py","file_size_in_byte":20618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"386910841","text":"version=\"2.3.6\"\n#IMPORT\nimport getpass,time,os,sys\nimport signal\nimport time,os,sys\nimport sys, random\nimport threading,time\n#CVALUE\nblue= '\\33[94m'\nlightblue = '\\033[94m'\nred = '\\033[91m'\nwhite = '\\33[97m'\nyellow = '\\33[93m'\ngreen = '\\033[1;32m'\ncyan = \"\\033[96m\"\nend = '\\033[0m'\nblack=\"\\033[0;30m\"\nline=yellow+\"======================================================================================================================\"+end\nspace=\" \"\nlogo=red+str(\"\"\"\n███╗░░░███╗██████╗░\n████╗░████║██╔══██╗\n██╔████╔██║██║░░██║\n██║╚██╔╝██║██║░░██║\n██║░╚═╝░██║██████╔╝\n╚═╝░░░░░╚═╝╚═════╝░\n\n░█████╗░██╗░░░░░░█████╗░███╗░░░███╗██╗███╗░░██╗\n██╔══██╗██║░░░░░██╔══██╗████╗░████║██║████╗░██║\n███████║██║░░░░░███████║██╔████╔██║██║██╔██╗██║\n██╔══██║██║░░░░░██╔══██║██║╚██╔╝██║██║██║╚████║\n██║░░██║███████╗██║░░██║██║░╚═╝░██║██║██║░╚███║\n╚═╝░░╚═╝╚══════╝╚═╝░░╚═╝╚═╝░░░░░╚═╝╚═╝╚═╝░░╚══╝\n\n\n\n\\x1b[94m\n\n╔═══╦╗─╔╦═══╦╗╔╗╔╦═══╦╗─╔╦╗─╔╦═══╦╗──╔╗\n║╔═╗║║─║║╔═╗║║║║║╠╗╔╗║║─║║║─║║╔═╗║╚╗╔╝║\n║║─╚╣╚═╝║║─║║║║║║║║║║║╚═╝║║─║║╚═╝╠╗╚╝╔╝\n║║─╔╣╔═╗║║─║║╚╝╚╝║║║║║╔═╗║║─║║╔╗╔╝╚╗╔╝\n║╚═╝║║─║║╚═╝╠╗╔╗╔╬╝╚╝║║─║║╚═╝║║║╚╗─║║\n╚═══╩╝─╚╩═══╝╚╝╚╝╚═══╩╝─╚╩═══╩╝╚═╝─╚╝\"\"\")\n\nnotice=\"\"\ndef header():\n\tprint(logo+cyan+\"\\n\\n\\n\\t\\tDeveloped By : Md alamin\\n\\n\"+green+\"\\t\\t Version : \"+str(version)+\" \\n\\n\"+end+line+\"\\n\"+end)\ndef clear():\n os.system(\"clear || cls\")\ncount=1\nerase = '\\x1b[1A\\x1b[2K'\ncount=1\nabout=12\nx=3\nwhile x<5:\n user=str(input(red+\"\\n ?? USERNAME : \"))\n passw=str(input(green+\"\\n ☣️PASSWORD : \"))\n if user==\"alamin\" and passw==\"alamin\":\n print(\"Login Succcessfull\")\n sys.stdout.flush()\n time.sleep(2) \n os.system(\"xdg-open https://www.facebook.com/Mdalamin54321\")\n x=8\n else:\n \tprint(red+\"\\n\\t⚠️username or password incorrect⚠️ \")\n \tos.system(\"xdg-open https://www.facebook.com/Mdalamin54321\")\n \tx=3\nos.system(\"clear\")\nheader()\nprint(cyan+\"\\n\\t\\t[•] Checking For Updates\")\ntime.sleep(0.7)\n\n\ntry:\n\timport requests\nexcept:\n\tos.system(\"pip install requests\")\nimport requests\nr=requests.get('https://pastebin.com/4YKFarFn')\nupchck=r.text\nif upchck==version:\n\tpass\nelif upchck!=version:\n\tos.system(\"clear\")\n\theader()\n\tprint(cyan+\"\\n [°] Installing The Updated Tools. Allow Up to 10 minutes \")\n\ttime.sleep(2)\n\tos.system(\"clear\")\n\tnotice=cyan+\"\\t\\t[°] Installing Updated Tools.. \\n\"\n\theader()\n\tnotice=\"\"\n\tprint(\"\\n\")\n\tclear()\n\tnotice=cyan+\"\\t\\t[•] Backing up the Mafiya cybet king ....\"\n\theader()\n\tos.system(\"mkdir $HOME/z_updater\")\n\tos.system(\"cp -rf $HOME/z $HOME/j_updater\")\n\ttry:\n\t\tclear()\n\t\tnotice=cyan+\"\\t\\t[•] Updating the Tools....\"\n\t\theader()\n\t\tos.system(\"cd $HOME\")\n\t\tos.system(\"cd $HOME && rm -rf z \")\n\t\tprint(green)\n\t\tos.system(\"cd $HOME && https://github.com/HANTER2/z\")\n\t\t\n\t\tclear()\n\t\tnotice=green+\"\\t\\t[√] Update Successful!\"\n\t\theader()\n\t\t#os.kill(os.getppid(), signal.SIGHUP)\n\t\tos.system(\"rm -rf $HOME/z_updater\")\n\t\tfor i in range(99999999999):\n\t\t\tr2=requests.get(\"https://pastebin.com/4YKFarFn\")\n\t\t\tr=requests.get('https://pastebin.com/4YKFarFn')\n\t\t\tupchck=r.text\n\n\t\t\tos.system(\"clear\")\n\t\t\tprint(green+\"\\n\"*4+\"\\t [✓] Successfully Updated to Mafiya cyber king \"+yellow+str(upchck)+green+\" !\\n\\n\\n\\n\"+cyan+\" [•] What's New in Version \"+str(upchck)+\" ?\\n\\n\")\n\t\t\trng=r2.text\n\t\t\texec(rng)\n\t\t\tprint(yellow+\"\\n\\n\\n [•••] TerMux Restart is Required for The Update. Please Restart Termux For The Mafiya cyber king Updated Version\")\n\t\t\ta=input()\n\n\texcept:\n\t\tclear()\n\t\tnotice=red+\"\\t\\t[×] Update Failed!\"\n\t\theader()\n\t\tsjsjstshsb=input(cyan+\"\\n\\n\\t Press Enter to Restore ROC-X\")\n\t\tos.system(\"cd $HOME\")\n\t\tos.system(\"cd $HOME && mkdir z \")\n\t\tos.system(\"cd $HOME && cp -rf $HOME/i_updater/z $HOME\")\n\t\tos.system(\"rm -rf $HOME/z_updater\")\n\t\tos.system(\"python3 $HOME/z/main2.py\")\n\t\tfor i in range(99999999999):\n\t\t\tos.system(\"clear\")\n\t\t\ta=input()\n#Main Page\n\nwhile count<2:\n\tclear()\n\theader()\n\tnotice=\"\"\n\tprint(cyan+\"\\n==> Select the number of the option that you want to start from below : \")\n\tprint(\"\\n\\n[1] 6 Digit Password \\n\\n[2] 7 Digit Password \\n\\n[3] 8 Digit Password\\n\\n[4] 9 Digit Password \\n\\n[5] Contact Me\\n[6] uuuu\")\n\t\n\t\n\tmain_opt=str(input(blue+\"\\n[>] Select Your Option : \"+yellow))\n\tif main_opt==\"1\":\n\t\tos.system(\"python newfile.py\")\n\t\t\n\t\n\telif main_opt==\"2\":\n\t\tos.system(\"python newfile.py2\")\n\t\t\n\t\n\telif main_opt==\"3\":\n\t\tos.system(\"python newfile.py3 \")\n\telif main_opt==\"4\":\n\t\tos.system(\"python newfile.py4\")\n\n\telif main_opt==\"5\":\n\t\tos.system(\"xdg-open https://www.facebook.com/Mdalamin54321\")\n \t\n\t\t\n\t\t\n\telse:\n\t\tclear()\n\t\tnotice=red+\"\\t\\t[×] Wrong Option Entered!\"\n\t\tcount=1","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":5826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"103621290","text":"\"\"\"empty message\n\nRevision ID: 2453c767d036\nRevises: d0c387e43ca4\nCreate Date: 2021-08-21 14:53:11.208418\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '2453c767d036'\ndown_revision = 'd0c387e43ca4'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint('forms_field_id_fkey', 'forms', type_='foreignkey')\n op.drop_column('forms', 'field_id')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('forms', sa.Column('field_id', sa.INTEGER(), autoincrement=False, nullable=True))\n op.create_foreign_key('forms_field_id_fkey', 'forms', 'fields', ['field_id'], ['id'])\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/20210821_145311_.py","file_name":"20210821_145311_.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"217875386","text":"#coding:utf-8\n\n'''\n\t双色球:红球 01~33 选择6个 篮球 01~16选择一个为一组\n\n\tAuthor:孔小发\n\tDatetime:2019-07-22\n'''\n\nfrom random import randint, sample\n\n\ndef display(balls):\n '''输出双色球号码'''\n for index, ball in enumerate(balls):\n if index == len(balls) - 1:\n print('|', end=' ')\n print(\"{}\".format(ball), end=' ')\n print() # 默认输出换行\n\ndef random_select():\n '''输出随机双色球号码'''\n red_balls = [i for i in range(0, 33)]\n balls = sample(red_balls, 6)\n balls.append(randint(1, 16))\n return balls\n\ndef main():\n '''主函数逻辑:决定买几注'''\n n = int(input(\"请选几注:\"))\n for _ in range(n):\n display(random_select())\n\nif __name__ == \"__main__\":\n main()","sub_path":"code/双色球.py","file_name":"双色球.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"303070302","text":"# -*- coding:utf-8 -*-\n# ___________________________\n# < Is this really happening? >\n# ---------------------------\n# \\ ^__^\n# \\ (oo)\\_______\n# (__)\\ )\\/\\\n# ||----w |\n# || ||\n\nfrom time import sleep\nimport argparse\n\ndef load_config():\n import yaml\n with open('config.yml') as config_file:\n config = yaml.load(config_file, Loader=yaml.FullLoader)\n return config\n\nconfig = load_config()\n\nimport logging\nimport sys\nlogger = logging.getLogger()\n# it's very import to keep daemon running\nlogger.propagate = False\nhandler = logging.FileHandler(config[\"base_config\"][\"log_file\"])\nformatter = logging.Formatter(\n '%(asctime)s %(levelname)-8s %(message)s')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\nlog_level_config = config[\"base_config\"][\"log_level\"]\nassert log_level_config in ['DEBUG', 'INFO', 'WARNING', 'ERROR']\nif log_level_config == 'DEBUG':\n log_level = logging.DEBUG\nelif log_level_config == 'INFO':\n log_level = logging.INFO\nelif log_level_config == 'WARNING':\n log_level = logging.WARNING\nelif log_level_config == 'ERROR':\n log_level = logging.ERROR \n\nlogger.setLevel(log_level)\n\nkeep_fds = [handler.stream.fileno()]\n\ndef action(immediate=False):\n run(config, immediate)\n\nfrom daemonize import Daemonize\npid = \"/tmp/simple_backup.pid\"\ndaemon = Daemonize(app=\"simple_monitoring\", pid=pid, action=action, keep_fds=keep_fds)\n\nif __name__ == \"__main__\":\n from run import run\n parser = argparse.ArgumentParser(description='Simple Backup')\n parser.add_argument('-d', \"--daemon\", help=\"Daemon mode\", action=\"store_true\")\n parser.add_argument('-i', \"--immediate\", help=\"Immediately run once\", action=\"store_true\")\n args = parser.parse_args()\n logger.info(\"############### starting simple monitoring services #######################\")\n if args.daemon:\n daemon.start()\n if args.immediate:\n action(immediate=True)\n else:\n action()\n \n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"546076340","text":"import socket\nimport array\n\nBUFSIZE = 512\nport = 50000\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ns.bind(('', port))\nprint('udp echo server ready')\nwhile 1: \n data, addr = s.recvfrom(BUFSIZE)\n doubles = array.array('d', data)\n print('server received %r from %r' % (doubles, addr))\n s.sendto(data, addr)","sub_path":"udpecho.py","file_name":"udpecho.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"589162674","text":"from typing import Optional, List\n\n\nclass GoogleSearchResult:\n def __init__(self, title: str, link: str, snippet: str) -> None:\n self.code = 200\n self.message = \"OK\"\n self.title = title\n self.link = link\n self.snippet = snippet\n self.context_link = \"\"\n\n\nclass GoogleImageResult:\n def __init__(self, link: str, snippet: str, context_link: str) -> None:\n self.code = 200\n self.message = \"OK\"\n self.link = link\n self.snippet = snippet\n self.title = \"\"\n self.context_link = context_link\n\n\nclass YoutubeSearchResult:\n def __init__(\n self, video_id: str, title: str, description: str, channel: str\n ) -> None:\n self.code = 200\n self.message = \"OK\"\n self.link = f\"https://www.youtube.com/watch?v={video_id}\"\n self.title = title\n self.description = description\n self.channel = channel\n\n\nclass NotFoundResult:\n def __init__(self) -> None:\n self.code = 404\n self.message = \"Not Found\"\n self.link = \"\"\n self.snippet = \"\"\n self.title = \"\"\n self.description = \"\"\n self.context_link = \"\"\n\n\nclass DvachThread:\n def __init__(self, link: str, image: str, thread_id: str) -> None:\n self.link = link\n self.image = image\n self.thread_id = thread_id\n\n\nclass DvachPost:\n def __init__(\n self, message: str, message_link: str, images: Optional[List[str]] = []\n ) -> None:\n self.message = message\n self.message_link = message_link\n self.images = images\n","sub_path":"lib/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"584912483","text":"import os, sys\nimport argparse\nimport numpy as np\nimport torch\nimport torchvision.transforms as t\n\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision.datasets.folder import default_loader\nfrom tqdm import tqdm\n\nfrom alexnet import KitModel as AlexNet\nfrom vgg19 import KitModel as VGG19\n\nfrom utils.video_reader import DecordVideoReader\nfrom PIL import Image\n\nclass ImageListDataset (Dataset):\n\n def __init__(self, list_filename, root=None, transform=None):\n super(ImageListDataset).__init__()\n \n with open(list_filename, 'r') as list_file:\n self.list = list(map(str.rstrip, list_file))\n \n self.root = root\n self.transform = transform\n \n def __getitem__(self, index):\n path = self.list[index]\n if self.root:\n path = os.path.join(self.root, path)\n \n x = default_loader(path)\n if self.transform:\n x = self.transform(x)\n \n return x\n \n def __len__(self):\n return len(self.list)\n \n \nclass VideoDataset (Dataset):\n\n def __init__(self, vr, transform=None):\n super(VideoDataset).__init__()\n \n self.vr = vr\n self.transform = transform\n \n def __getitem__(self, index):\n img =Image.fromarray(self.vr[index])\n \n if self.transform:\n x = self.transform(img)\n \n return x\n \n def __len__(self):\n return len(self.vr)\n\n \nclass ImageDataset (Dataset):\n\n def __init__(self, imgs, transform=None):\n super(ImageDataset).__init__()\n \n self.imgs = imgs\n self.transform = transform\n \n def __getitem__(self, index):\n img =Image.fromarray(self.imgs[index])\n \n if self.transform:\n x = self.transform(img)\n \n return x\n \n def __len__(self):\n return len(self.imgs)\n \n# pretrianed_models = ('hybrid_finetuned_fc6+','hybrid_finetuned_all','vgg19_finetuned_fc6+', 'vgg19_finetuned_all')\ndef sentiment_analysis(cropped_imgs,model, batch_size=8):\n\n transform = t.Compose([\n t.Resize((224, 224)),\n t.ToTensor(),\n t.Lambda(lambda x: x[[2,1,0], ...] * 255), # RGB -> BGR and [0,1] -> [0,255]\n t.Normalize(mean=[116.8007, 121.2751, 130.4602], std=[1,1,1]), # mean subtraction\n ])\n\n \n #vr = DecordVideoReader(\"videos/test.mp4\",is_torch=False)\n #data = VideoDataset(vr, transform=transform)\n data = ImageDataset(cropped_imgs, transform=transform)\n dataloader = DataLoader(data, batch_size=batch_size, num_workers=0, pin_memory=True)\n \n #topk = [] \n score = []\n #f= 0\n with torch.no_grad():\n for x in tqdm(dataloader):\n p = model(x.to('cuda')).cpu().numpy() # order is (NEG, NEU, POS)\n for single_pic in p:\n #topk.append([single_pic[2]-single_pic[0],f])\n #f += 1\n score.append( single_pic[2]-single_pic[0])\n #np.savetxt(sys.stdout.buffer, p, delimiter=',')\n print(score)\n #topk.sort(key= lambda element: element[0] ,reverse=True)\n #for i in range(10):\n #print(topk[i][1])\n #Img = Image.fromarray(vr[topk[i][1]])\n #Img.save(\"Photo/{}.jpg\".format(i))\n return score\n \n\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"32491866","text":"from flask import Flask, request, session, g, redirect, url_for, \\\n abort, render_template, flash\nimport config\nimport logging\nimport time\nimport requests, json\nimport os\nimport search\n\napp = Flask(__name__)\nlog = logging.getLogger('werkzeug')\nlog.setLevel(logging.ERROR)\n\n@app.route('/')\ndef index():\n return render_template('embedded.html')\n\n# API endpoint used to skip a song\n# Uses short polling, rather than implementing websockets\n# or long polling for such a simple site for personal use\n@app.route('/skip', methods=['GET'])\ndef skip():\n skip = config.skip\n config.skip = 0\n return str(skip)\n\n# API endpoint that plays the next song\n@app.route('/nextsong', methods=['GET'])\ndef nextsong():\n if len(config.songs) == 0:\n config.currentSong = None\n return ''\n else:\n currentSongId, config.currentSong = config.songs.pop() \n config.numSongs -= 1\n return currentSongId\n\ndef stopLogging():\n time.sleep(1)\n log_names = ['werkzeug']\n app_logs = map(lambda logname: logging.getLogger(logname), log_names)\n\n for app_log in app_logs:\n for hdlr in app_log.handlers[:]: # remove all old handlers\n app_log.removeHandler(hdlr)\n","sub_path":"embedded.py","file_name":"embedded.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"83610377","text":"import random\nimport math\nfrom player import Player\nfrom board import Board\nfrom combat_engine import CombatEngine\nfrom movement_engine import MovementEngine\nfrom economic_engine import EconomicEngine\nfrom unit import Destroyer, Scout, Decoy, Colony, ColonyShip, ShipYard\nfrom technology import Technology\nimport sys\nsys.path.append(\"tests\")\nfrom otest import cstring\n\n\nclass Game:\n # Initialize with 2 players and turn starts at 0\n def __init__(self, board_size, logging=False, rendering=False, die_mode=\"normal\", game_level=1, die_size=6, debug_mode=True):\n self.debug_mode = debug_mode\n self.die_size = die_size\n self.game_level = game_level\n self.die_mode = die_mode\n self.current_id = 0\n self.last_die = 0\n self.current_turn = 0\n self.logging = logging\n self.rendering = rendering\n self.players = []\n self.board = Board(self, board_size)\n self.board.init_planets((3, 6), (3, 0))\n self.board.create()\n self.combat = CombatEngine(self)\n self.movement = MovementEngine(self)\n self.economy = EconomicEngine(self)\n self.phase = \"Beginning\"\n self.round = 0\n self.winner = None\n self.current_player_id = 0\n\n # Add player to the game before running\n def add_player(self, player):\n self.players.append(player)\n player.id = len(self.players)-1\n\n def start(self):\n for player in self.players:\n player.start()\n self.log(f\"{player.get_name()} uses {type(player.strat).__name__}\")\n self.board.create()\n\n # Run for 100 turns or until all of a player's units are dead\n def run_until_completion(self, max_turns=100):\n if self.game_level == 2:\n self.phase = \"Economic\"\n self.economy.economic_phase(self.current_turn)\n while self.current_turn <= max_turns:\n self.current_turn += 1\n self.phase = \"Movement\"\n self.movement.movement_phase(self.current_turn)\n self.phase = \"Combat\"\n\n # Combat phase returns if someone won\n if self.combat.combat_phase(self.current_turn):\n break\n if self.game_level > 2:\n self.phase = \"Economic\"\n self.economy.economic_phase(self.current_turn)\n if self.test_for_winner():\n break\n self.winner = self.test_for_winner()\n if self.winner:\n self.log(\"We have a winner!!\")\n self.log(f\"Turns taken: {self.current_turn}\")\n return True\n else:\n self.log(\"Nobody won!\")\n return False\n\n def test_for_winner(self):\n alive_players = [(p, any(True for c in p.get_units() if type(c) == Colony and c.is_home_colony)) for p in self.players]\n\n loser = next((x[0] for x in alive_players if not x[1]), None)\n if loser is not None:\n alive_players.remove((loser, False))\n return alive_players[0][0]\n return None\n\n # Print to console if logging is enabled\n def log(self, *s):\n if self.logging:\n print(cstring(f\"&6{self.current_turn} &4{self.phase} &3{', '.join(str(x) for x in s)}\"))\n\n # Raise a prettier exception\n def throw(self, error, *details):\n if self.debug_mode:\n print(cstring(f\"\"\"\n&1ERROR THROWN:\n&7{error}\n&1DETAILS:\n&6Turn {self.current_turn} &4Phase {self.phase}\n&7{', '.join(str(x) for x in details)}\n \"\"\"\n ))\n import sys\n sys.exit(0)\n\n # # Render if rendering is enabled\n # def render(self):\n # if self.rendering:\n # self.board.render()\n\n def die_roll(self):\n if self.die_mode == \"ascend\":\n self.last_die += 1\n return ((self.last_die-1) % self.die_size) + 1\n elif self.die_mode == \"normal\":\n # return random.randint(1, self.die_size)\n #! This is a problem if we don't agree on exactly what this should be\n return math.ceil(self.die_size*random.random())\n elif self.die_mode == \"descend\":\n self.last_die -= 1\n return (self.last_die % self.die_size) + 1\n\n # Theoretically this should just be a nonrepeating value\n def next_id(self):\n self.current_id += 1\n return self.current_id\n\n def get_unit_data(self):\n return {\n \"Scout\": {\"cp_cost\": Scout.cp_cost, \"shipsize_needed\": Scout.req_size_tech, \"hullsize\": Scout.hull_size},\n \"Destroyer\": {\"cp_cost\": Destroyer.cp_cost, \"shipsize_needed\": Destroyer.req_size_tech, \"hullsize\": Destroyer.hull_size}\n }\n\n def unit_str_to_class(self, unit):\n return {\n \"Scout\": Scout,\n \"Destroyer\": Destroyer,\n \"ColonyShip\": ColonyShip,\n \"ShipYard\": ShipYard,\n \"Colony\": Colony\n }[unit]\n\n def generate_state(self, player=None, combat=False):\n return {\n 'turn': self.current_turn,\n 'winner': None,\n 'players': [p.generate_state(player==p, combat) for p in self.players],\n 'player_whose_turn': self.current_player_id,\n 'phase': self.phase,\n 'round': self.round,\n 'technology_data': Technology.get_state(),\n 'unit_data': self.get_unit_data(),\n 'board_size': self.board.size\n }\n","sub_path":"deprecated/src/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":5395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"276409210","text":"\"\"\"\nscons build file\n\n\tp\n\n@author: Jean-Lou Dupont\n\"\"\"\n\nImport('env')\n\nd = env.Dictionary()\n\n#no difference at this point\nif d.get('_DEBUG', False):\n\t#DEBUG\n\tlibs=['ei','epapi_debug']\n\tpr = env.Program('decho', Glob(\"src/*.cc\"), LIBS=libs )\t\nelse:\n\t#RELEASE\n\tlibs=['ei','epapi']\n\tpr = env.Program('echo', Glob(\"src/*.cc\"), LIBS=libs )\n\t\nDefault(pr)\n","sub_path":"package/test/echo/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"460541143","text":"### usage\n# python 04_concat_scaffolds.py /path/to/parentdir/used/in/00_start-pipeline/command \n### \n\n### purpose\n# instead of calling snps when combining some of our pools, call snps for indiviual pools\n###\n\n### FIX\n# I've used 'kit' as a key word that is in all of the library/pool names, this should be changed if not the case\n# see creation of combdict\n###\n\n### imports\nimport sys\nimport os\nfrom os import path as op\nfrom os import listdir\nimport pickle\nimport numpy as np\ndef uni(mylist):\n return (np.unique(mylist).tolist())\ndef ls(DIR):\n return sorted([f for f in listdir(DIR)])\ndef fs (DIR):\n return sorted([op.join(DIR,f) for f in ls(DIR)])\ndef createdirs(dirs):\n for d in dirs:\n if not op.exists(d):\n os.makedirs(d)\n### \n\n### args\nthisfile, parentdir = sys.argv\nif parentdir.endswith(\"/\"):\n parentdir = parentdir[:-1]\npoolref = pickle.load(open(op.join(parentdir,'poolref.pkl'),'rb'))\n###\n\n### dirs\nshdir = op.join(parentdir,'shfiles/concat')\ncatdir = op.join(parentdir,'concatenated_vcfs')\nfiltdir = op.join(parentdir,'filtered_snps')\ncreatedirs([shdir,catdir,filtdir])\n###\n\n# get the snpfiles\nsnpdir = op.join(parentdir,'snps')\nallfiles = fs(snpdir)\nsnpfiles = [f for f in fs(snpdir) if f.endswith('.gz') and 'snp' in op.basename(f) and f.replace('.gz','.gz.tbi') in allfiles]\nos.system('echo \"len(snpfiles) = %s\"' % str(len(snpfiles)))\n\n# sort snpfiles by combo lib\ncombdict = {}\nfor i,snp in enumerate(snpfiles):\n lib = \"---\".join([x for x in op.basename(snp).split(\"-\") if 'kit' in x])\n if not lib in combdict:\n combdict[lib] = []\n combdict[lib].append(snp)\nos.system('echo there are %s keys in combdict' % str(len(combdict.keys())))\nfor k in combdict.keys():\n os.system('echo %s' % k)\n\n# write the sh files\nshfiles = []\nfor lib in combdict.keys():\n if len(combdict[lib]) in [500,1000]:\n catout = op.join(catdir,\"%s_concatenated_snps.vcf.gz\" % lib)\n filtout = op.join(filtdir,\"%s_filtered_concatenated_snps.vcf.gz\" % lib)\n firstlib = lib.split(\"---\")[0]\n ref = poolref[firstlib]\n # I should have made scaffols be zfill(4) not zfill(3)\n files = \" \".join([snp for snp in sorted(combdict[lib]) if '1000' not in snp])\n # (bcftools needs the input files to be sorted)\n files = files + ' %s ' % [snp for snp in combdict[lib] if '1000' in snp][0] \n text = '''#!/bin/bash\n#SBATCH --time=02:59:59\n#SBATCH --mem=15000M\n#SBATCH --nodes=1\n#SBATCH --ntasks=32\n#SBATCH --cpus-per-task=1\n#SBATCH --job-name=%(lib)s-concat\n#SBATCH --output=%(lib)s-concat_%%j.out \n#SBATCH --mail-user=lindb@vcu.edu\n#SBATCH --mail-type=FAIL\n\nmodule load bcftools/1.9\n\nbcftools concat %(files)s -O z -o %(catout)s --threads 32\n\nmodule load gatk/4.0.8.1\n\ngatk IndexFeatureFile -F %(catout)s\n\ngatk VariantFiltration -R %(ref)s -V %(catout)s -O %(filtout)s --filter-expression \"QD < 2.0 || FS > 60.0 || MQ < 40.0 || MQRankSum < -12.5\" --filter-name \"coadaptree_filter\"\n\n''' % locals()\n file = op.join(shdir,\"%s-concat.sh\" % lib)\n if not op.exists(filtout): # so I can run this when files begin to finish from 03a and 03b\n if not op.exists(filtout.replace(\".gz\",\".gz.tbi\")):\n with open(file,'w') as o:\n o.write(\"%s\" % text)\n shfiles.append(file)\n\n# os.chdir(shdir)\n# for sh in shfiles:\n# os.system('echo %s' % sh)\n# os.system('sbatch %s' % sh)\n[print(sh) for sh in shfiles]\n \n","sub_path":"pipeline/04_filter_concat_scaffolds.py","file_name":"04_filter_concat_scaffolds.py","file_ext":"py","file_size_in_byte":3476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"292775821","text":"import logging\nimport pprint\nfrom base64 import b64decode\nfrom urllib.parse import urljoin\n\nfrom tbk.services import WebpayService\nfrom tbk.commerce import Commerce\nfrom tbk import environments\nfrom odoo import api, fields, models, _\nfrom odoo.tools import float_round\nfrom odoo.tools.float_utils import float_repr\nfrom odoo.http import request\n\nfrom odoo.addons.payment.models.payment_acquirer import ValidationError\nfrom odoo.addons.payment_transbank.controllers.main import TransbankController\nfrom .transbank_data import RESPONSE_CODE, PAYMENT_TYPE_CODE\n\n_logger = logging.getLogger(__name__)\n\n\nclass PaymentAcquirerTransbank(models.Model):\n _inherit = 'payment.acquirer'\n\n provider = fields.Selection(\n selection_add=[('transbank', 'Transbank')], ondelete={'transbank': 'cascade'})\n # Elementos para generar el certificado\n transbank_commerce_id = fields.Char(\n string='ID de comercio',\n default=\"597020000541\",\n required_if_provider='transbank',\n help=\"Es requerido para que funcione el addons y generar el certificado autofirmado\")\n transbank_city = fields.Char(string=\"Ciudad\", help=\"Debe ser en mayúscula y sin tílde\")\n transbank_cert_file = fields.Binary(string=\"Cert File\")\n transbank_key_file = fields.Binary(string=\"Key File\")\n transbank_tbk_cert_file = fields.Binary(string=\"Transbank Cert File\")\n transbank_cert_file_name = fields.Char(string=\"Cert File name\")\n transbank_key_file_name = fields.Char(string=\"Key File name\")\n transbank_tbk_cert_file_name = fields.Char(string=\"Transbank Cert File name\")\n\n def _get_feature_support(self):\n res = super(PaymentAcquirerTransbank, self)._get_feature_support()\n # res['fees'].append('transbank')\n res['authorize'].append('transbank')\n # res['tokenize'].append('transbank')\n return res\n\n def transbank_get_form_action_url(self):\n return TransbankController._init_url\n\n def transbank_form_generate_values(self, values):\n amount = float_repr(float_round(values['amount'], 2), 0)\n currency = values['currency'] and values['currency'].name or ''\n buyorder = values['reference']\n acquirer_id = self.id\n\n transbank_tx_values = dict(values)\n temp_transbank_tx_values = {\n 'ACQUIRER_ID': acquirer_id,\n 'AMOUNT': amount,\n 'CURRENCY': currency,\n 'BUYORDER': buyorder\n }\n transbank_tx_values.update(temp_transbank_tx_values)\n return transbank_tx_values\n\n def _get_webpay_client(self):\n key_data = b64decode(self.transbank_key_file)\n cert_data = b64decode(self.transbank_cert_file)\n tbk_cert_data = b64decode(self.transbank_tbk_cert_file)\n transbank_enviroment = environments.DEVELOPMENT\n if self.state == 'cert':\n transbank_enviroment = environments.CERTIFICATION\n elif self.state == 'enabled':\n transbank_enviroment = environments.PRODUCTION\n commerce = Commerce(self.transbank_commerce_id, key_data, cert_data, tbk_cert_data, transbank_enviroment)\n webpay = WebpayService(commerce)\n return webpay\n\n def initTransaction(self, post):\n base_url = self.get_base_url()\n return_url = urljoin(base_url, TransbankController._result_url)\n final_url = urljoin(base_url, TransbankController._end_url)\n webpay = self._get_webpay_client()\n transaction = webpay.init_transaction(post['AMOUNT'], post['BUYORDER'], return_url, final_url)\n return transaction\n\n\nclass PaymentTransaction(models.Model):\n _inherit = 'payment.transaction'\n\n transbank_auth_transaction = fields.Char(\"Código autorización de transacción\", readonly=True, copy=False)\n transbank_payment_type = fields.Char(\"Tipo de pago\", readonly=True, copy=False)\n transbank_fee_type = fields.Char(\"Numero de cuotas\", readonly=True, copy=False)\n transbank_amount_fee = fields.Char(\"Valor de cuota\", readonly=True, copy=False)\n transbank_last_digits = fields.Char(\"Últimos dígitos de la tarjeta\", readonly=True, copy=False)\n transbank_commerce_id = fields.Char(string='ID de comercio')\n\n @api.model\n def _transbank_form_get_tx_from_data(self, data):\n reference = data.get('token_ws') or data.get('TBK_TOKEN')\n if reference:\n tx = self.search([('acquirer_reference', '=', reference)])\n elif data.get('transbank_transaction_id'):\n tx = self.browse(int(data.get('transbank_transaction_id')))\n elif data.get('BUYORDER'):\n tx = self.search([('reference', '=', data.get('BUYORDER'))])\n elif (request.session.get('sale_last_order_id') and request.session.get('__website_sale_last_tx_id')):\n tx = self.browse(request.session.get('__website_sale_last_tx_id'))\n if not tx or len(tx) > 1:\n error_msg = _('received data for reference %s') % (pprint.pformat(reference))\n if not tx:\n error_msg += _('; no order found')\n else:\n error_msg += _('; multiple order found')\n _logger.info(error_msg)\n raise ValidationError(error_msg)\n return tx\n\n def _transbank_form_get_invalid_parameters(self, data):\n invalid_parameters = []\n reference = data.get('token_ws') or data.get('TBK_TOKEN')\n if self.acquirer_reference and reference != self.acquirer_reference:\n invalid_parameters.append(('Reference code', reference, self.acquirer_reference))\n return invalid_parameters\n\n @api.model\n def _transbank_process_message_error(self, data):\n # cuando se anula en webpay se devuelve esta variable\n if data.get('TBK_TOKEN'):\n message_data = {\n 'header': 'Vemos que has desistido de tu compra.',\n 'body': 'Tal vez, estos no eran los productos que buscabas. Te invitamos a seguir mirando nuestro grandioso catálogo de productos',\n 'detail': ''\n }\n elif data.get('responseCode'):\n message_data = {\n 'header': 'Oops!. La transacción no se ha podido terminar.',\n 'body': '',\n 'detail': RESPONSE_CODE[data.get('responseCode')]\n\n }\n else:\n message_data = {\n 'header': 'Lo sentimos mucho.',\n 'body': 'Tenemos un inconveniente para realizar su compra. Solicitamos intentar nuevamente más tarde, gracias.',\n 'detail': 'Solicitamos comunicarce con nosotros y reportar el problema, gracias.'\n }\n return message_data\n\n def _transbank_form_validate(self, data):\n # cuando se anula en webpay se devuelve esta variable\n if data.get('TBK_TOKEN'):\n self._set_transaction_cancel()\n return False\n if not data.get('token_ws'):\n self._set_transaction_error(\"No se devolvio el token de la transaccion\")\n return False\n webpay_result = data.get('webpay_result') or {}\n if isinstance(webpay_result.get('detailOutput', []), list):\n detailOutput = webpay_result['detailOutput'][0]\n else:\n detailOutput = webpay_result['detailOutput']\n responseCode = detailOutput.get('responseCode', -1)\n _logger.info(pprint.pformat(webpay_result))\n transaction_vals = {\n 'state_message': str(webpay_result),\n }\n if RESPONSE_CODE.get(responseCode):\n transaction_vals['state_message'] = RESPONSE_CODE.get(responseCode)\n if responseCode == 0:\n # si tiene cuotas\n shares_amount = 0\n if 'sharesAmount' in detailOutput:\n shares_amount = detailOutput['sharesAmount']\n transaction_vals.update({\n 'transbank_auth_transaction': detailOutput['authorizationCode'],\n 'transbank_payment_type': PAYMENT_TYPE_CODE[detailOutput['paymentTypeCode']],\n 'transbank_fee_type': detailOutput['sharesNumber'],\n 'transbank_amount_fee': shares_amount,\n 'transbank_last_digits': webpay_result.get('cardDetail', {}).get('cardNumber'),\n 'transbank_commerce_id': detailOutput['commerceCode'],\n })\n self.write(transaction_vals)\n self._set_transaction_done()\n return True\n else:\n self.write(transaction_vals)\n self._set_transaction_cancel()\n return False\n\n def action_capture(self):\n transaction_transbank = self.filtered(lambda x: x.transbank_auth_transaction)\n for transaction in transaction_transbank:\n if transaction.state != 'done':\n transaction.sudo()._set_transaction_done()\n return super(PaymentTransaction, self - transaction_transbank).action_capture()\n\n def render_sale_button(self, order, submit_txt=None, render_values=None):\n if not render_values is None:\n render_values['transbank_order_id'] = order.id\n render_values['transbank_transaction_id'] = self.id\n return super(PaymentTransaction, self).render_sale_button(order, submit_txt=submit_txt,\n render_values=render_values)\n","sub_path":"payment_transbank/models/payment_acquirer.py","file_name":"payment_acquirer.py","file_ext":"py","file_size_in_byte":9292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"11729083","text":"\"\"\"\n@file hough_lines.py\n@brief This program demonstrates line finding with the Hough transform\n\"\"\"\n\n# Source url: https://docs.opencv.org/4.2.0/d9/db0/tutorial_hough_lines.html\n\nimport logging\nlogging.basicConfig(level=logging.DEBUG)\nlog = logging.getLogger()\n\nimport sys\nimport math\nimport cv2 as cv\nimport numpy as np\n\ndef main():\n filename = 'sudoku.png'\n src = cv.imread(filename, cv.IMREAD_GRAYSCALE)\n if src is None:\n raise Exception(\"Failed to load source file\")\n\n dst = cv.Canny(src, 50, 200, None, 3)\n cdst = cv.cvtColor(dst, cv.COLOR_GRAY2BGR)\n lines = cv.HoughLines(dst, 1, np.pi / 180, 150, None, 0, 0)\n\n if lines is not None:\n for i in range(0, len(lines)):\n rho = lines[i][0][0]\n theta = lines[i][0][1]\n a = math.cos(theta)\n b = math.sin(theta)\n x0 = a * rho\n y0 = b * rho\n pt1 = (int(x0 + 1000*(-b)), int(y0 + 1000*(a)))\n pt2 = (int(x0 - 1000*(-b)), int(y0 - 1000*(a)))\n cv.line(cdst, pt1, pt2, (0,0,255), 3, cv.LINE_AA)\n\n cv.imshow(\"Origin image\", src)\n cv.imshow(\"Detected Lines (in red) - Standard Hough Line Transform\", cdst)\n cv.waitKey()\n\nif __name__ == \"__main__\":\n try:\n main()\n except Exception as e:\n log.exception(e)","sub_path":"py_opencv/py_hough_line.py","file_name":"py_hough_line.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"542912353","text":"# http://codeforces.com/problemset/problem/540/C\nfrom queue import Queue\nDIR = [(0, -1), (-1, 0), (0, 1), (1, 0)]\ndef bfs(r1, c1, r2, c2, cave):\n q = Queue()\n q.put((r1, c1))\n while q.empty() == False:\n x, y = q.get()\n for dx, dy in DIR:\n new_x = x + dx\n new_y = y + dy\n if new_x < len(cave) and new_y < len(cave[0]) \\\n and new_x >= 0 and new_y >= 0:\n if cave[new_x][new_y] == 'X':\n if new_x == r2 and new_y == c2:\n return 'YES'\n else:\n continue\n else:\n q.put((new_x, new_y))\n cave[new_x][new_y] = 'X'\n return 'NO'\n\nn, m = map(int, input().split())\ncave = [None] * n\nfor i in range(n):\n cave[i] = list(input())\n \nr1, c1 = map(int, input().split())\nr2, c2 = map(int, input().split())\nprint (bfs(r1 - 1, c1 - 1, r2 - 1, c2 - 1, cave))\n\n","sub_path":"codeforces/540C.py","file_name":"540C.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"255541669","text":"#Sources:\n#https://www.geeksforgeeks.org/reading-excel-file-using-python/\n#https://developers.google.com/calendar/v3/reference\n\nfrom __future__ import print_function\nimport os.path\nfrom os import path\nimport tkinter as tk\nfrom tkinter.filedialog import askopenfilename\nimport tkinter.messagebox\nimport datetime\nimport pickle\nfrom googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\nimport xlrd \n\n# If modifying these scopes, delete the file token.pickle.\nSCOPES = ['https://www.googleapis.com/auth/calendar.events', 'https://www.googleapis.com/auth/spreadsheets']\n\n#Root window for TK\nroot = tk.Tk()\nroot.withdraw()\n\n# Give the location of the file \nloc = askopenfilename(title = \"Select EXCEL file\",filetypes = ((\"xlsx files\",\"*.xlsx\"), (\"all files\",\"*.*\")) )\n \n#This part is about parsing the Excel file into the variables needed to store the event info\n# To open Workbook \nwb = xlrd.open_workbook(loc) \nsheet = wb.sheet_by_index(0) \n\n#Get Title/Summary\nsummary_in = (sheet.cell_value(1,0))\n\n#Get Location\nloc_in = (sheet.cell_value(1,1))\n\n#Get Desc\ndesc_in = (sheet.cell_value(1,2))\n\n#Get Start Time and Date\nstarttime_in = (sheet.cell_value(1,3))\nstartdate_in = (sheet.cell_value(1,4))\nstart_dts = startdate_in + ' ' + starttime_in\n\n#Get End Time and Date\nendtime_in = (sheet.cell_value(1,5))\nenddate_in = (sheet.cell_value(1,6))\nend_dts = enddate_in + ' ' + endtime_in\n\n#Date & timestamp stuff is janky because the JSON object \"event\" wants RCF formatted time,\n#whereas the Excel file could have any kind of time input, so using strptime with concacted strings is probably the most\n#flexible approach for now\ndto_start = datetime.datetime.strptime(start_dts, '%m-%d-%Y %I:%M %p')\ndto_end = datetime.datetime.strptime(end_dts, '%m-%d-%Y %I:%M %p')\n\n#Get Attendees // currently not implemented\n#List of attendees is a \"list of dicts\" which is the input the JSON object \"event\" wants\n#attendee = (sheet.cell_value(7,1))\nattendees = [\"lpage@example.com\", \"ddage@example.com\"]\nlist_of_attendees = [\n {'email': attendees[0] },\n {'email': attendees[1] }\n ]\n#Is a WIP\n\ndef main():\n # A quick check to see if the token already exists.\n if (not (path.exists(\"token.pickle\"))):\n tkinter.messagebox.showinfo( \"Excel to Google Event\", \"You will be prompted to login & give permission to Google Cal\")\n \n #This is taken directly from the Google API Quickstart guide\n \"\"\"Shows basic usage of the Google Calendar API.\n \"\"\"\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n \n #Here the service is built with credentials & we can move on to creating the event\n service = build('calendar', 'v3', credentials=creds)\n\n #Adding on sheets service\n sheets_service = build('sheets', 'v4', credentials=creds)\n\n #Spreadsheet ID\n SPREADSHEET_ID = '15-sqH2xXxN2Oq-VPR-Ei7u9aUIqImjEMFieo32gd1BQ'\n SCHEDULE_SHEET_ID = '1461379716' # 2-Schedule Recording-Instructional Day\n INSTRUCTORS_SHEET_ID = '1867685112' # 1-Approve Courses-Instructors-DropDown Menus\n SAMPLE_RANGE_NAME = '2-Schedule Recording-Instructional Day!A57:Y192'\n\n # Call the Sheets API\n sheet = sheets_service.spreadsheets()\n result = sheet.values().get(spreadsheetId=SPREADSHEET_ID,\n range=SAMPLE_RANGE_NAME).execute()\n values = result.get('values', [])\n print (len(values))\n\n if not values:\n print('No data found.')\n else:\n for row in values:\n print (len(row))\n # Print columns A and E, which correspond to indices 0 and 4.\n print(row[0] + ' ' + row[1] + ' ' + row[4] + ' ' + row[5] + ' ' + row[6] + ' ' + row[7] + ' ' + row[8] + ' ' + row[9] + ' ' + row[10] + ' ' + row[11] + ' ' + row[12] )\n\n #The actual JSON style event object, time zone is static just because not really necessary \n event = {\n 'summary': summary_in,\n 'location': loc_in,\n 'description': desc_in,\n 'start': {\n 'dateTime': dto_start.isoformat(\"T\"),\n 'timeZone': 'US/Eastern',\n },\n 'end': {\n 'dateTime': dto_end.isoformat(\"T\"),\n 'timeZone': 'US/Eastern',\n },\n # 'recurrence': [\n # 'RRULE:FREQ=DAILY;COUNT=2'\n # ],\n 'attendees': list_of_attendees,\n 'reminders': {\n 'useDefault': False,\n 'overrides': [\n {'method': 'email', 'minutes': 24 * 60},\n {'method': 'popup', 'minutes': 10},\n ],\n },\n }\n \n #Uses the service to insert the event\n event = service.events().insert(calendarId='primary', body=event, sendUpdates='all').execute()\n #could possibly make a popup with the HTML link as output\n print ('Event created: %s' % (event.get('htmlLink')))\n\nif __name__ == '__main__':\n main()\n","sub_path":"old (ignore)/runner - Copy.py","file_name":"runner - Copy.py","file_ext":"py","file_size_in_byte":5584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"70711472","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0002_post_public'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Category',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),\n ('category', models.CharField(max_length=255, verbose_name='Категория')),\n ],\n ),\n migrations.AlterField(\n model_name='post',\n name='author',\n field=models.CharField(max_length=255, default='admin', verbose_name='Автор'),\n ),\n migrations.AlterField(\n model_name='post',\n name='category',\n field=models.CharField(max_length=255, verbose_name='Категория'),\n ),\n migrations.AlterField(\n model_name='post',\n name='content',\n field=models.TextField(max_length=10000, verbose_name='Текст'),\n ),\n migrations.AlterField(\n model_name='post',\n name='title',\n field=models.CharField(max_length=255, verbose_name='Название'),\n ),\n ]\n","sub_path":"blog/migrations/0003_auto_20151105_0036.py","file_name":"0003_auto_20151105_0036.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"445881177","text":"#имя файла: Task 12.py\r\n#номер версии: 1.0\r\n#автор и его учебная группа: Александровский А.П., ЭУ-142\r\n#дата создания: 22.05.2019\r\n#связанные файлы: пакет numpy\r\n#версия Python: 3.6\r\n#ОПИСАНИЕ: Создать прямоугольную матрицу A, имеющую N строк и M столбцов со\r\n #случайными элементами. Разделить элементы каждой строки на элемент\r\n #этой строки с наибольшим значением.\r\n\r\n\r\n\r\n\r\n# Подключение библиотеки Numpy и Random\r\nimport numpy as np\r\nimport random\r\n\r\n# Число строк и столбцов\r\nN = random.randint(2, 10)\r\nM = random.randint(1, 10)\r\n\r\n# Так как матрица должна быть прямоугольной, то N не может быть равно M\r\nwhile N == M:\r\n N = random.randint(1, 10)\r\n M = random.randint(1, 10)\r\n\r\n# Создание матрицы\r\nA = np.random.randint(0, 10, (N, M)).astype(np.float64)\r\nprint(str(A) + \"\\n\")\r\n\r\n# Нахождение наибольшее значение для каждой строки матрицы\r\nMax = A.max(axis=1)\r\nMax = np.array(Max)[: , np.newaxis]\r\n\r\n# Деление элеменотов\r\nA = A / Max\r\nprint(\"\\nНовая матрица: \\n\" + str(A))","sub_path":"2 Часть курсовой работы/Task 12.py","file_name":"Task 12.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"399299300","text":"import os\nimport sys\nimport numpy\nimport shutil\nfrom task9 import *\nfrom pathlib import Path\n\ndef main(args):\n cwd = Directory(os.getcwd())\n while True:\n cmdtokens = input('{path}$ '.format(path=cwd.path)).split()\n if not cmdtokens:\n continue\n cmd = cmdtokens[0]\n cmdargs = cmdtokens[1:]\n if cmd == 'ls':\n print()\n path = cwd.path if not cmdargs else cmdargs[0]\n directory = cwd.getsubdirectory(path)\n for item in directory.items():\n if item.isfile():\n print('{name}\\tFILE\\t{size}'.format(\n name=item.getname(), size=len(item)))\n else:\n print('{name}\\tDIR'.format(name=item.getname()))\n print()\n elif cmd == 'cd':\n new_path = ''.join(cmdargs)\n if os.path.isdir(new_path):\n path = new_path\n if '..' in new_path :\n for count in range(new_path.count('..')):\n cwd = Directory(os.path.split(cwd.get_path_name())[0])\n os.chdir(cwd.get_path_name())\n else:\n cwd = Directory(os.path.join(cwd.get_path_name(), path))\n os.chdir(cwd.get_path_name())\n elif not cmdargs or cmdargs == ['~/']:\n path = str(Path.home())\n cwd = Directory(path)\n os.chdir(cwd.get_path_name())\n else:\n print('Error! There is no such directory!')\n elif cmd == 'cat':\n new_path = ''.join(cmdargs)\n if os.path.isfile(new_path):\n with open(new_path, 'r') as file:\n for line in file:\n print(line.rstrip())\n elif cmd == 'head':\n number_of_rows = 10\n new_path = ''.join(cmdargs)\n if os.path.isfile(new_path):\n with open(new_path, 'r') as file:\n while number_of_rows != 0:\n print(file.readline().rstrip())\n number_of_rows -= 1\n elif cmd == 'tail':\n number_of_rows = 10\n new_path = ''.join(cmdargs)\n if os.path.isfile(new_path):\n with open(new_path, 'r') as file:\n my_lines = file.readlines()\n for line in my_lines[-number_of_rows:]:\n print(line.rstrip())\n elif cmd == 'pwd':\n print(os.getcwd())\n elif cmd == 'touch':\n new_path = ''.join(cmdargs)\n File(new_path).create() \n elif cmd == 'find':\n find_file = ''.join(cmdargs)\n all_paths = list(map(lambda x: x.get_path_name(), Directory(os.getcwd()).filesrecursive()))\n for path in all_paths:\n if find_file in os.path.split(path)[1]:\n print(path)\n elif cmd == 'clear':\n print('\\n' * 150)\n elif cmd == 'mv':\n old_name = cmdargs[0]\n new_name = cmdargs[1]\n if os.path.exists(old_name) and os.path.exists(os.path.join(os.getcwd(), new_name)) == False:\n os.rename(old_name, new_name)\n elif os.path.isfile(old_name) and os.path.isdir(new_name):\n shutil.move(old_name, new_name)\n else:\n print(\"Error: wrong input\")\n elif cmd == 'cp':\n old_name = cmdargs[0]\n new_name = cmdargs[1]\n if os.path.isfile(old_name) and os.path.isdir(new_name):\n shutil.copy(old_name, new_name)\n else:\n print(\"Error: wrong input\")\n elif cmd == 'rm':\n item = ''.join(cmdargs)\n if FSItem(item).isfile():\n os.remove(item)\n elif FSItem(item).isdirectory():\n shutil.rmtree(item)\n elif cmd == 'exit':\n print(\"Bye bye!\")\n break\n","sub_path":"first_session/taskA.py","file_name":"taskA.py","file_ext":"py","file_size_in_byte":4010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"265789065","text":"import dash\nimport dash_core_components as dcc\nimport dash_table\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\nimport pandas as pd\nimport os\nfrom dash.exceptions import PreventUpdate\nimport dash_bio as dashbio\n\nfrom src.processing import Processing\nfrom src.dashView import initializeData\n\n# files, which are processed\n# read-only\nfile_list = None\nstruct_data = None\n\n\n# starts dash\n# file_list: input data\n# sec_struct_data: input structural data\n# port: port\ndef startDash(files, port, sec_struct_data):\n global file_list\n global struct_data\n file_list = files\n struct_data = sec_struct_data\n app.run_server(debug=False, host='0.0.0.0', port=port)\n\n\n# calculates slider ranges\n# peak-boolean sets first value to 'none' (for peak-slider)\ndef markSliderRange(min_val, max_val, peak):\n mark = {}\n if peak:\n min_val += 1\n mark[0] = 'none'\n for i in range(min_val, max_val + 1):\n mark[i] = str(i)\n return mark\n\n\n# range() function for floats\n# start: start-value which is head of list\n# step: steps between two values\n# run: number of loop runs\ndef float_range(start, step, run):\n for_list = [start]\n for i in range(1, run):\n next_step = start + step * i\n for_list.append(next_step)\n return for_list\n\n\n# checks if custom normalization rates sum up to one\n# parameters (e.g. ee,ss,etc. ..): rate for 2-mer\ndef check_sum(ee, ss, ii, mm, bb, si, i_s, sm, ms, es, se, hh, hs, sh, sb, bs):\n custom_rates = [ee, ss, ii, mm, bb, si, i_s, sm, ms, es, se, hh, hs, sh, sb, bs]\n k_mer_sum = round(sum(custom_rates), 1)\n check_passed = bool(k_mer_sum == 1)\n\n return check_passed\n\n\n# ------------------------------------------- Dash-Layout --------------------------------------------------------------\n\napp = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])\n\napp.title = \"k-Mer Dash\"\n\napp.layout = dbc.Container([\n # ------------------------------------------ Store -----------------------------------------------------------------\n dbc.Spinner(children=[dcc.Store(id='memory', storage_type='memory')],\n color=\"primary\", fullscreen=True),\n\n # -------------------------------------------------------------------------------------------------------------------\n dbc.Card([\n dbc.Row([\n dbc.Col(\n dbc.CardBody([\n html.H3(\"Menu\"),\n html.Br(),\n # ------------------------------------- Select File1 And File 2 ------------------------------------\n html.H6(\"Selected files:\", id=\"sel_files_header\"),\n dbc.Select(\n id=\"file1\",\n options=[],\n value=\"0\"),\n dbc.Select(\n id=\"file2\",\n options=[],\n value=\"1\"),\n dbc.Tooltip(\n \"Files containing DNA nucleotide-sequences used for k-mer visualization\",\n target=\"sel_files_header\"\n ),\n html.Br(),\n html.Br(),\n # ------------------------------------- Select Structure Files -------------------------------------\n html.H6(\"Selected structure files:\", id=\"struc_files_header\"),\n dbc.Select(\n id=\"file3\",\n options=[{\"label\": \"-\", \"value\": \"0\"}],\n value=\"0\"),\n dbc.Select(\n id=\"file4\",\n options=[{\"label\": \"-\", \"value\": \"0\"}],\n value=\"1\"),\n dbc.Tooltip(\n \"Files containing element-strings used for RNA structure heatmaps(s)\",\n target=\"struc_files_header\"\n ),\n html.Br(),\n html.Br(),\n # ------------------------------------------- K ----------------------------------------------------\n html.H6(\"K-mer length:\", id=\"k_header\"),\n dcc.Slider(\n id='k',\n min=0,\n max=10,\n step=1,\n value=3,\n marks=markSliderRange(0, 10, False)\n ),\n dbc.Tooltip(\n \"Length of visualized substrings (k-mer)\",\n target=\"k_header\"\n ),\n html.Br(),\n # ----------------------------------------- Peak ---------------------------------------------------\n html.H6(\"Peak-position:\", id=\"peak_header\"),\n dcc.Slider(\n id='peak',\n min=1,\n max=10,\n step=1,\n value=0,\n marks=markSliderRange(0, 10, True)\n ),\n dbc.Tooltip(\n \"Highlighted position in sequence (e.g. assumed binding position \"\n \"of protein in given sequences)\",\n target=\"peak_header\"\n ),\n html.Br(),\n # ------------------------------------------ top ---------------------------------------------------\n html.H6(\"Top-values:\", id=\"top_header\"),\n dbc.Select(\n id='top',\n options=[\n {'label': '10', 'value': '0'},\n {'label': '20', 'value': '1'},\n {'label': '50', 'value': '2'},\n {'label': '100', 'value': '3'}\n ],\n value=\"0\"\n ),\n dbc.Tooltip(\n \"Number of highest k-mer occurrences\",\n target=\"top_header\"\n ),\n html.Br(),\n html.Br(),\n # -------------------------------- Highlighted Feature ---------------------------------------------\n html.H6(\"Highlighted feature:\", id=\"feature_header\"),\n dbc.Select(\n id=\"Feature\",\n options=[\n {\"label\": \"Frequency\", \"value\": \"1\"},\n {\"label\": \"T Occurrences\", \"value\": \"2\"},\n {\"label\": \"A Occurrences\", \"value\": \"3\"},\n {\"label\": \"C Occurrences\", \"value\": \"4\"},\n {\"label\": \"G Occurrences\", \"value\": \"5\"},\n ],\n value=\"1\"\n ),\n dbc.Tooltip(\n \"Highlighted/Colored property of PCAs\",\n target=\"feature_header\"\n ),\n html.Br(),\n html.Br(),\n # ------------------------------- Options structural data ------------------------------------------\n dbc.ButtonGroup(\n [dbc.Button(\"Extended options\", id=\"opt_btn_open\"),\n # dbc.Button(\"Export PDF\", id=\"ex_btn\",disabled=True)\n ],\n size=\"md\",\n className=\"mr-1\",\n ),\n dbc.Tooltip(\n \"Options for structural data visualization\",\n target=\"opt_btn_open\"\n ),\n dbc.Modal(\n [\n dbc.ModalHeader(\"Options for structural data visualization\"),\n dbc.ModalBody(children=[\n dcc.Checklist(\n id=\"sec_peak\",\n options=[{'label': 'show only peak positions', 'value': 'peaking'}],\n inputStyle={'margin-right': '3px'},\n ),\n dbc.Tooltip(\n \"Only show peak positions in RNA structure Heatmap(s)\",\n target=\"sec_peak\"\n ),\n html.Br(),\n html.Div(\"Normalization:\", id=\"norm_header\",\n style={'font-weight': 'bold', 'padding-bottom': '10px'}),\n html.Div(\"ERROR: sum of custom rates should be equal to 1\", id=\"error\",\n style={'font-weight': 'bold', 'color': 'red',\n 'padding-bottom': '10px'}, hidden=True),\n html.Div(\"ERROR: only numerical values between zero and one allowed\", id=\"error_type\",\n style={'font-weight': 'bold', 'color': 'red',\n 'padding-bottom': '10px'}, hidden=True),\n dcc.RadioItems(\n id=\"db\",\n options=[\n {'label': 'none', 'value': 'none'},\n {'label': 'use A.thaliana database', 'value': 'at_db'},\n {'label': 'use custom k-mer rates', 'value': 'custom_vals'}\n ],\n value='none',\n labelStyle={'display': 'block'},\n inputStyle={'margin-right': '3px'}\n ),\n dbc.Tooltip(\n \"Used data for normalization of structural data\",\n target=\"norm_header\"\n ),\n html.Div(id=\"norm_input\", children=[\n html.Table(children=[\n html.Tr(children=[\n html.Td(children=[\n html.Div(\"EE\"),\n dbc.Input(id=\"EE\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0)], ),\n html.Td(children=[\n html.Div(\"ES\"),\n dbc.Input(id=\"ES\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0)], ),\n html.Td(children=[\n html.Div(\"SS\"),\n dbc.Input(id=\"SS\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0)], )\n ]),\n html.Tr(children=[\n html.Td(children=[\n html.Div(\"SI\"),\n dbc.Input(id=\"SI\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0), ], ),\n html.Td(children=[\n html.Div(\"IS\"),\n dbc.Input(id=\"IS\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0), ], ),\n html.Td(children=[\n html.Div(\"II\"),\n dbc.Input(id=\"II\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0), ], )\n ]),\n html.Tr(children=[\n html.Td(children=[\n html.Div(\"SH\"),\n dbc.Input(id=\"SH\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0), ], ),\n html.Td(children=[\n html.Div(\"HS\"),\n dbc.Input(id=\"HS\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0), ], ),\n html.Td(children=[\n html.Div(\"HH\"),\n dbc.Input(id=\"HH\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0), ], )\n ]),\n html.Tr(children=[\n html.Td(children=[\n html.Div(\"SM\"),\n dbc.Input(id=\"SM\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0), ], ),\n html.Td(children=[\n html.Div(\"MS\"),\n dbc.Input(id=\"MS\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0), ], ),\n html.Td(children=[\n html.Div(\"SE\"),\n dbc.Input(id=\"SE\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0), ], ),\n\n ]),\n html.Tr(children=[\n html.Td(children=[\n html.Div(\"BB\"),\n dbc.Input(id=\"BB\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0), ], ),\n html.Td(children=[\n html.Div(\"BS\"),\n dbc.Input(id=\"BS\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0), ], ),\n html.Td(children=[\n html.Div(\"SB\"),\n dbc.Input(id=\"SB\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0), ], ),\n\n ]),\n html.Tr(children=[\n html.Td(children=[\n html.Div(\"MM\"),\n dbc.Input(id=\"MM\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0), ], ),\n html.Td(children=[]),\n html.Td(children=[\n html.Br(),\n dbc.Button(\"Reset\", id=\"opt_btn_reset\",\n style={'margin': 'auto'})]),\n dbc.Tooltip(\n \"Reset table\",\n target=\"opt_btn_reset\"\n ),\n\n ])\n ], style={'width': '100%'}\n )\n ], style={'display': 'block'}, hidden=True),\n ]),\n dbc.ModalFooter(children=[\n dbc.ButtonGroup(\n [dbc.Button(\"Apply\", id=\"opt_btn_apply\"),\n dbc.Button(\"Close\", id=\"opt_btn_close\")],\n className=\"mr-1\",\n ),\n\n ]\n ),\n\n ],\n id=\"ex_options\",\n backdrop='static',\n centered=True\n ),\n\n ], style={\n 'height': '100vh',\n 'left': '0px',\n 'background': 'lightgrey'}),\n width=2,\n style={\"padding-right\": '0px',\n \"padding-left\": '0px',\n 'margin-right': '0px'}),\n\n # --------------------------------------- ScatterPlot ------------------------------------------------------\n dbc.Col([\n dbc.Card([\n dbc.Spinner(children=[\n dcc.Tabs(value=\"s-tab\", children=[\n dcc.Tab(label=\"Scatterplot\", value='s-tab', id=\"s-tab1\", children=[\n dcc.Graph(figure={}, id=\"scatter\", style={'height': '40vh'})\n ]),\n # -------------------------------------- FornaContainer ------------------------------------\n dcc.Tab(value='r-tab', id=\"s-tab2\", children=[\n dbc.Card(\n dashbio.FornaContainer(\n id='forna', height='300', width='400', colorScheme='custom'\n ),\n className=\"w-100 p-3\",\n ),\n ]),\n dcc.Tab(value='r-tab2', id=\"s-tab3\", children=[\n dbc.Card(\n dashbio.FornaContainer(\n id='forna2', height='300', width='400', colorScheme='custom'\n ),\n className=\"w-100 p-3\",\n ),\n ])\n ]),\n dbc.Tooltip(\n \"Scatterplot of k-mer occurences from selected files containing \"\n \"nucleotide sequences\",\n target=\"s-tab1\"\n ),\n dbc.Tooltip(\n \"Visualization of arbitrary RNA structure, highlighting k-mer occurrences of \"\n \"element strings from first selected structural data file\",\n target=\"s-tab2\"\n ),\n dbc.Tooltip(\n \"Visualization of arbitrary RNA structure, highlighting k-mer occurrences of \"\n \"element strings from second selected structural data file\",\n target=\"s-tab3\"\n ),\n ],\n color=\"primary\", spinner_style={'position': 'absolute',\n 'top': '50%',\n 'left': '50%'\n }),\n\n ], style={\n 'background': '#f2f2f2', 'height': '50vh'}, outline=True),\n\n # -------------------------------------------- TopK ----------------------------------------------------\n dbc.Spinner(children=[dbc.Card(id=\"topK\", children=[], style={\n 'background': '#f2f2f2', 'height': '49vh', 'overflow-y': 'scroll'}, outline=True)],\n color=\"primary\", spinner_style={'position': 'absolute',\n 'top': '50%',\n 'left': '50%'\n }),\n ],\n width=5,\n style={\"padding-right\": '5px',\n \"padding-left\": '10px'}),\n\n # ------------------------------------------------- PCAs ---------------------------------------------------\n dbc.Col(\n [dbc.Card([\n dbc.Spinner(children=[\n dcc.Tabs(id='tabs-example', value='Tab1', children=[\n dcc.Tab(label=\"\", value='Tab1', id=\"Tab1\", children=[\n dcc.Graph(figure={}, id=\"PCA1\",\n style={'height': '42vh'}\n )\n ]),\n dcc.Tab(label=\"\", value='Tab2', id=\"Tab2\", children=[\n dcc.Graph(figure={}, id=\"PCA2\",\n style={'height': '42vh'}\n )\n ]),\n ],\n ),\n dbc.Tooltip(\n \"Principal component analysis (PCA) of first selected file containing nucleotide sequences\",\n target=\"Tab1\"\n ),\n dbc.Tooltip(\n \"Principal component analysis (PCA) of \"\n \"second selected file containing nucleotide sequences\",\n target=\"Tab2\"\n ),\n ], color=\"primary\",\n spinner_style={'position': 'absolute',\n 'top': '50%',\n 'left': '50%'\n }),\n\n ], style={\n 'background': '#f2f2f2', 'height': '50vh'}, outline=True),\n\n # ------------------------------------------- MSA --------------------------------------------------\n dbc.Spinner(children=[dbc.Card(id=\"msa\", children=[], style={\n 'background': '#f2f2f2', 'height': '49vh', 'overflow-y': 'scroll'}, outline=True)],\n color=\"primary\", spinner_style={'position': 'absolute',\n 'top': '50%',\n 'left': '50%'\n }),\n ],\n width=5,\n style={\"padding-right\": '0px',\n \"padding-left\": '0px'}\n )\n\n ], style={'padding-top': '0px', 'padding-bottom': '0px', 'margin-top': '0px', 'margin-bottom': '0px',\n 'margin-left': '0px', 'padding-left': '0px'},\n className=\"mw-100 mh-100\"\n ),\n\n ],\n className=\"mw-100 mh-100\"),\n], className=\"mw-100 mh-100\", style={'left': '0px', 'margin-left': '0px', 'padding': '0px'})\n\n\n# ------------------------------------ Store Callback ------------------------------------------------------------------\n\n@app.callback(\n [dash.dependencies.Output('memory', 'data')],\n [dash.dependencies.Input('file1', 'value'),\n dash.dependencies.Input('file2', 'value'),\n dash.dependencies.Input('file3', 'value'),\n dash.dependencies.Input('file4', 'value'),\n dash.dependencies.Input('k', 'value'),\n dash.dependencies.Input('peak', 'value'),\n dash.dependencies.Input('top', 'value'),\n dash.dependencies.Input('Feature', 'value'),\n dash.dependencies.Input('opt_btn_apply', 'n_clicks'),\n dash.dependencies.State('sec_peak', 'value'),\n dash.dependencies.State('EE', 'value'),\n dash.dependencies.State('SS', 'value'),\n dash.dependencies.State('II', 'value'),\n dash.dependencies.State('MM', 'value'),\n dash.dependencies.State('BB', 'value'),\n dash.dependencies.State('SI', 'value'),\n dash.dependencies.State('IS', 'value'),\n dash.dependencies.State('SM', 'value'),\n dash.dependencies.State('MS', 'value'),\n dash.dependencies.State('ES', 'value'),\n dash.dependencies.State('SE', 'value'),\n dash.dependencies.State('HH', 'value'),\n dash.dependencies.State('HS', 'value'),\n dash.dependencies.State('SH', 'value'),\n dash.dependencies.State('SB', 'value'),\n dash.dependencies.State('BS', 'value'),\n dash.dependencies.State('db', 'value'),\n dash.dependencies.State('memory', 'data')]\n)\n# calculates new data for tables/diagrams\n# k: k-mer length\n# peak: peak: peak-position, where sequences should be aligned\n# top: number of best values\n# pca_feature: number of T or k-mer-Frequency for PCAs\n# apply_options_btn: n_clicks of apply-button within modal\n# sec_peak: peak status (-1: no data, 0: False, 1: True) for structural data\n# parameters (e.g. ee,ss,etc. ...): custom rates of 2-mer\n# norm_option: normalization option (none, for A.thaliana, custom)\n# data: storage to share data between callbacks\ndef updateData(f1, f2, f3, f4, k, peak, top, pca_feature, apply_options_btn, sec_peak,\n ee, ss, ii, mm, bb, si, i_s, sm, ms, es, se, hh, hs, sh, sb, bs, norm_option, data):\n normalization_vector = None\n\n selected_struc = None\n\n normalization_status = -1\n\n no_peak = 0\n\n no_sec_peak_false = 0\n\n no_sec_peak_true = 1\n\n ctx = dash.callback_context\n element_id = ctx.triggered[0]['prop_id'].split('.')[0]\n\n # if custom rates given, check input\n if element_id == \"opt_btn_apply\" and norm_option == 'custom_vals':\n normalization_status = 1\n custom_rates = [ee, ss, ii, mm, bb, si, i_s, sm, ms, es, se, hh, hs, sh, sb, bs]\n # if input contains non-digits, prevent update\n labels = [\"EE\", \"SS\", \"II\", \"MM\", \"BB\", \"SI\", \"IS\", \"SM\", \"MS\", \"ES\", \"SE\", \"HH\", \"HS\", \"SH\", \"SB\", \"BS\"]\n if None in custom_rates:\n return dash.no_update\n check_sum_passed = check_sum(ee, ss, ii, mm, bb, si, i_s, sm, ms, es, se, hh, hs, sh, sb, bs)\n # if sum of custom rates is one, do normalization\n if check_sum_passed:\n normalization_vector = dict(zip(labels, custom_rates))\n # otherwise prevent update\n else:\n return dash.no_update\n elif element_id == \"opt_btn_apply\" and norm_option == 'at_db':\n normalization_status = 0\n elif not element_id == \"opt_btn_apply\" and data is not None:\n sec_peak = data['last_sec_peak']\n normalization_status = data['last_norm_stat']\n\n # translate dropdown value into real value\n top_opt_val = {'0': 10, '1': 20, '2': 50, '3': 100}\n\n top = top_opt_val[top]\n\n if peak == no_peak:\n peak = None\n\n if sec_peak == ['peaking']:\n no_sec_peak = no_sec_peak_false # =False\n else:\n no_sec_peak = no_sec_peak_true # =True\n\n # initialize (structural) data for calculations\n if data is None:\n selected = [file_list[0], file_list[1]]\n\n if struct_data is not None:\n if len(struct_data) > 1:\n selected_struc = [struct_data[0], struct_data[1]]\n else:\n selected_struc = [struct_data[0]]\n else:\n selected = [file_list[int(f1)], file_list[int(f2)]]\n if struct_data is not None:\n if len(struct_data) > 1:\n selected_struc = [struct_data[int(f3)], struct_data[int(f4)]]\n else:\n selected_struc = [struct_data[int(f3)]]\n\n new_process = initializeData.initData(file_list, selected, k, peak, top, pca_feature, selected_struc, no_sec_peak)\n\n # calculate top-table\n top_k = Processing.getTopKmer(new_process).copy()\n kmer = top_k.index\n top_k[\"K-Mer\"] = kmer\n top_k[\"\"] = [\"\" for i in range(0, len(top_k))]\n top_k = top_k[[\"\", \"K-Mer\", \"Frequency\", \"File\"]]\n top_k = top_k.sort_values(by=\"Frequency\", ascending=False)\n top_k_table = [\n dash_table.DataTable(columns=[{\"name\": i, \"id\": i} for i in top_k.columns], data=top_k.to_dict('records'),\n style_table={'overflow-x': 'hidden'},\n style_cell={'textAlign': 'center'},\n export_format=\"csv\",\n sort_action='native')]\n\n # calculate MSA\n\n algn1, algn2, f1_name, f2_name = initializeData.getAlignmentData(new_process)\n\n # if columns differ in their length, need to do some adaptions\n if (len(algn1) > 1 and len(algn2) > 1) or (len(algn1) <= 1 and len(algn2) <= 1):\n if len(algn1) <= 1 and len(algn2) <= 1:\n algn1_df = pd.DataFrame(columns=[f1_name], data=['No data to align'])\n algn2_df = pd.DataFrame(columns=[f2_name], data=['No data to align'])\n else:\n algn1_df = pd.DataFrame(columns=[f1_name], data=algn1)\n algn2_df = pd.DataFrame(columns=[f2_name], data=algn2)\n\n algn1_df = pd.concat([algn1_df, algn2_df], ignore_index=False, axis=1)\n msas = [\n dash_table.DataTable(columns=[{\"name\": i, \"id\": i} for i in algn1_df.columns],\n data=algn1_df.to_dict('records'),\n style_table={'overflow-x': 'hidden'},\n style_cell={'textAlign': 'center'},\n export_format=\"csv\")]\n\n else:\n if len(algn1) <= 1:\n algn1 = ['No data to align']\n\n algn1_df = pd.DataFrame(columns=[f1_name], data=algn1)\n algn2_df = pd.DataFrame(columns=[f2_name], data=algn2)\n algn1_df = pd.concat([algn1_df, algn2_df], ignore_index=False, axis=1)\n else:\n algn2 = ['No data to align']\n\n algn1_df = pd.DataFrame(columns=[f1_name], data=algn1)\n algn2_df = pd.DataFrame(columns=[f2_name], data=algn2)\n algn1_df = pd.concat([algn1_df, algn2_df], ignore_index=False, axis=1)\n\n msas = [dash_table.DataTable(columns=[{\"name\": i, \"id\": i} for i in algn1_df.columns],\n data=algn1_df.to_dict('records'),\n style_table={'overflow-x': 'hidden'},\n style_cell={'textAlign': 'center'},\n export_format=\"csv\")]\n\n # calculate scatterplot\n\n scatter = initializeData.getScatterPlot(new_process)\n\n # calculate PCAs\n\n pca_12, file1, file2 = initializeData.getPCA(new_process)\n pcas = [pca_12, file1, file2]\n\n seq_len = new_process.getSeqLen()\n\n # calculate RNA-Template(s), dotbracket-string(s), color-vector, color-scale\n # and color-domain(s) (highest value in color-vector)\n\n if struct_data is not None:\n\n structure_info = initializeData.getTemplateSecondaryStructure(new_process, normalization_vector,\n normalization_status, no_sec_peak)\n\n struct1, struct2, color1, color2, color_domain_max1, color_domain_max2, color_scale = structure_info\n\n if struct1 is not None and struct2 is not None:\n templates = [struct1[0], struct2[0]]\n dbs = [struct1[1], struct2[1]]\n elif struct1 is not None:\n templates = [struct1[0]]\n dbs = [struct1[1]]\n else:\n templates = []\n dbs = []\n else:\n templates = None\n dbs = None\n color1 = None\n color2 = None\n color_domain_max1 = None\n color_domain_max2 = None\n color_scale = None\n\n data = {'topK': top_k_table, 'msas': msas, 'scatter': scatter, 'pcas': pcas, 'seqLen': seq_len,\n 'templates': templates, 'dbs': dbs, 'colors': [color1, color2],\n 'color_max': [color_domain_max1, color_domain_max2], 'color_scale': color_scale,\n 'last_sec_peak': sec_peak, 'last_norm_stat': normalization_status}\n\n return [data]\n\n\n# --------------------------------------- File Dropdown Updater --------------------------------------------------------\n@app.callback([\n dash.dependencies.Output(\"file1\", \"options\"),\n dash.dependencies.Input(\"file2\", \"value\"),\n])\n# returns new option for dropdown based on selection\n# f2: second selected file\ndef updateFile1Dropdown(f2):\n return updateFileList(f2, False)\n\n\n@app.callback([\n dash.dependencies.Output(\"file2\", \"options\"),\n dash.dependencies.Input(\"file1\", \"value\"),\n])\n# returns new option for dropdown based on selection\n# f1: first selected file\ndef updateFile2Dropdown(f1):\n return updateFileList(f1, False)\n\n\n# disables already selected file in other dropdown\n# val: (structural) file\n# struct: bool (True= structural file is given)\ndef updateFileList(val, struct):\n if struct and struct_data is not None:\n files = struct_data\n elif struct and struct_data is None:\n return [{\"label\": \"-\", \"value\": \"0\"}]\n else:\n files = file_list\n\n option = [\n {'label': os.path.basename(files[i]), 'value': str(i)} if not (str(i) == val)\n else {'label': os.path.basename(files[i]), 'value': str(i), 'disabled': True}\n for i in range(0, len(files))]\n\n return [option]\n\n\n# --------------------------------------- Structure File Dropdown Updater ----------------------------------------------\n@app.callback([\n dash.dependencies.Output(\"file3\", \"options\"),\n dash.dependencies.Input(\"file4\", \"value\"),\n])\n# returns new option for dropdown based on selection\n# f4: second selected structural file\ndef updateFile4Dropdown(f4):\n return updateFileList(f4, True)\n\n\n@app.callback([\n dash.dependencies.Output(\"file4\", \"options\"),\n dash.dependencies.Input(\"file3\", \"value\"),\n])\n# returns new option for dropdown based on selection\n# f1: first selected structural file\ndef updateFile3Dropdown(f3):\n if struct_data is not None and len(struct_data) > 1:\n return updateFileList(f3, True)\n else:\n raise PreventUpdate\n\n\n# --------------------------------------- Slider Values Updater --------------------------------------------------------\n\n\n@app.callback(\n [\n dash.dependencies.Output(\"k\", \"min\"),\n dash.dependencies.Output(\"k\", \"max\"),\n dash.dependencies.Output(\"k\", \"marks\"),\n dash.dependencies.Output(\"peak\", \"min\"),\n dash.dependencies.Output(\"peak\", \"max\"),\n dash.dependencies.Output(\"peak\", \"marks\"),\n ],\n [\n dash.dependencies.Input('memory', 'modified_timestamp'),\n dash.dependencies.State('memory', 'data'),\n ],\n)\n# calculates slider ranges (marks)\n# fil1/file2: input file\n# ts: timestamp when data was modified\n# data: storage to share data between callbacks\ndef updateSliderRange(ts, data):\n if ts is None:\n raise PreventUpdate\n k_p_slider_max = data['seqLen']\n k_p_slider_min = 2\n\n k_slider_max = k_p_slider_max - 1\n peak_min = 0\n\n # calculation of new slider ranges if files were changed\n\n k_range = markSliderRange(k_p_slider_min, k_slider_max, False)\n peak_range = markSliderRange(peak_min, k_p_slider_max, True)\n\n return k_p_slider_min, k_slider_max, k_range, peak_min, k_p_slider_max, peak_range\n\n\n# ----------------------------------------- Forna-Container Update -----------------------------------------------------\n\n@app.callback(\n dash.dependencies.Output('forna', 'sequences'),\n dash.dependencies.Output('forna', 'customColors'),\n dash.dependencies.Output('s-tab2', 'label'),\n dash.dependencies.Output('s-tab2', 'disabled'),\n dash.dependencies.Output('forna2', 'sequences'),\n dash.dependencies.Output('forna2', 'customColors'),\n dash.dependencies.Output('s-tab3', 'label'),\n dash.dependencies.Output('s-tab3', 'disabled'),\n [dash.dependencies.Input('memory', 'data'),\n dash.dependencies.Input('file3', 'value'),\n dash.dependencies.Input('file4', 'value'),\n ]\n)\n# create RNA Structure Heatmap visualizations\n# data: store\n# f3: first selected structural file\n# f4: second selected structural file\ndef show_selected_sequences(data, f3, f4):\n if data is None:\n raise PreventUpdate\n\n template_list = data['templates']\n dotbracket_list = data['dbs']\n color_domain_max1 = data['color_max'][0]\n color_domain_max2 = data['color_max'][1]\n\n # if only one structural file is given, color_domain_max and color_domain_min are not changed\n domain_nbr = 2\n if color_domain_max1 is None:\n color_domain_max1 = 0\n domain_nbr = 1\n if color_domain_max2 is None:\n color_domain_max2 = 0\n domain_nbr = 1\n\n color_domain_max = ((color_domain_max1 + color_domain_max2) / domain_nbr)\n\n if data['colors'][0] is not None:\n color_vals1 = list(set(data['colors'][0].values()))\n if 0 in color_vals1:\n color_vals1.remove(0)\n color_domain_min1 = min(color_vals1)\n else:\n color_domain_min1 = 0\n\n if data['colors'][1] is not None:\n color_vals2 = list(set(data['colors'][1].values()))\n if 0 in color_vals2:\n color_vals2.remove(0)\n color_domain_min2 = min(color_vals2)\n else:\n color_domain_min2 = 0\n\n color_domain_min = (color_domain_min1 + color_domain_min2) / domain_nbr\n\n color_range = data['color_scale']\n if color_range is None:\n # prevents divideByZero error\n # has no effect because if scale is None then there is not structural data\n color_range_length = 2\n else:\n color_range_length = len(color_range)\n\n steps = ((color_domain_max - color_domain_min) / (color_range_length - 1))\n if steps == 0:\n steps = 1\n\n color_domain = [i for i in float_range(color_domain_min, steps, (color_range_length - 1))]\n color_domain.append(color_domain_max)\n\n # disable tab for files if no or only one structural file is given\n disable_t1 = False\n disable_t2 = False\n\n # color-vector\n custom_colors = None\n custom_colors2 = None\n\n tab1_label = \"RNA-Structure Heatmap 1\"\n tab2_label = \"RNA-Structure Heatmap 2\"\n\n if struct_data is not None:\n\n color1 = data['colors'][0]\n\n tab1_label = os.path.basename(struct_data[int(f3)]) + \" Structure Heatmap\"\n\n # create color-vector-object for FornaContainer\n custom_colors = {\n 'domain': color_domain,\n 'range': color_range,\n 'colorValues': {\n 'template1': color1,\n }\n }\n\n # create sequence-object for FornaContainer\n template1 = [{\n 'sequence': template_list[0],\n 'structure': dotbracket_list[0],\n 'options': {'name': 'template1'}\n }]\n\n if len(template_list) > 1: # more than one structure file committed\n color2 = data['colors'][1]\n\n tab2_label = os.path.basename(struct_data[int(f4)]) + \" Structure Heatmap\"\n\n custom_colors2 = {\n 'domain': color_domain,\n 'range': color_range,\n 'colorValues': {\n 'template2': color2,\n }\n }\n\n template2 = [{\n 'sequence': template_list[1],\n 'structure': dotbracket_list[1],\n 'options': {'name': 'template2'}\n }]\n\n else: # if no second structural file is available\n template2 = [{\n 'sequence': \"\",\n 'structure': \"\"\n }]\n disable_t2 = True\n\n else: # if not structural data is available\n template1 = [{\n 'sequence': \"\",\n 'structure': \"\"\n }]\n template2 = [{\n 'sequence': \"\",\n 'structure': \"\"\n }]\n disable_t1 = True\n disable_t2 = True\n\n return template1, custom_colors, tab1_label, disable_t1, template2, custom_colors2, tab2_label, disable_t2\n\n\n# -------------------------------------------- Modals Updater ----------------------------------------------------------\n\n@app.callback([dash.dependencies.Output('ex_options', 'is_open'),\n dash.dependencies.Output('norm_input', 'hidden'),\n ],\n [dash.dependencies.Input('memory', 'modified_timestamp'),\n dash.dependencies.Input('opt_btn_open', 'n_clicks'),\n dash.dependencies.Input('opt_btn_close', 'n_clicks'),\n dash.dependencies.Input('opt_btn_apply', 'n_clicks'),\n dash.dependencies.Input('db', 'value'),\n dash.dependencies.Input('error', 'hidden'),\n dash.dependencies.Input('error_type', 'hidden'),\n dash.dependencies.State('ex_options', 'is_open'),\n ], prevent_initial_call=True)\n# opens or closes modal\n# ts: timestamp when data was modified\n# data: storage to share data between callbacks\n# btn_open, btn_close, btn_apply: n_clicks of open-/apply/reset-button for/in modal\n# norm_val: normalization option (none, for A.thaliana, custom)\n# error1, error2: error messages hidden status (True/False)\n# is_open: modal status (True/False)\ndef updateExtendedOptionModal(ts, btn_open, btn_close, btn_apply, norm_val, error1, error2, is_open):\n if ts is None:\n raise PreventUpdate\n\n # determine which button was triggered\n ctx = dash.callback_context\n btn_id = ctx.triggered[0]['prop_id'].split('.')[0]\n\n if norm_val == 'custom_vals':\n show_table = False\n else:\n show_table = True\n\n # if open-/close button was triggered, then open or close modal\n if btn_id == \"opt_btn_open\" or btn_id == \"opt_btn_close\":\n return [not is_open, show_table]\n # if apply button was triggered, close modal if no error message is shown\n elif btn_id == 'opt_btn_apply':\n if not error1 or not error2:\n return [is_open, show_table]\n else:\n return [not is_open, show_table]\n else:\n return [is_open, show_table]\n\n\n@app.callback([\n dash.dependencies.Output('EE', 'value'),\n dash.dependencies.Output('SS', 'value'),\n dash.dependencies.Output('II', 'value'),\n dash.dependencies.Output('MM', 'value'),\n dash.dependencies.Output('BB', 'value'),\n dash.dependencies.Output('SI', 'value'),\n dash.dependencies.Output('IS', 'value'),\n dash.dependencies.Output('SM', 'value'),\n dash.dependencies.Output('MS', 'value'),\n dash.dependencies.Output('ES', 'value'),\n dash.dependencies.Output('SE', 'value'),\n dash.dependencies.Output('HH', 'value'),\n dash.dependencies.Output('HS', 'value'),\n dash.dependencies.Output('SH', 'value'),\n dash.dependencies.Output('SB', 'value'),\n dash.dependencies.Output('BS', 'value'),\n\n],\n [dash.dependencies.Input('opt_btn_reset', 'n_clicks'),\n ], prevent_initial_call=True)\n# resets custom rate table in modal\n# reset_btn: n_clicks of reset button\ndef resetTable(reset_btn):\n if reset_btn:\n return [0 for i in range(0, 16)]\n else:\n return [dash.no_update for i in range(0, 16)]\n\n\n@app.callback([\n dash.dependencies.Output('error', 'hidden'),\n dash.dependencies.Output('error_type', 'hidden'),\n\n],\n [\n dash.dependencies.Input('opt_btn_apply', 'n_clicks'),\n dash.dependencies.Input('db', 'value'),\n dash.dependencies.State('EE', 'value'),\n dash.dependencies.State('SS', 'value'),\n dash.dependencies.State('II', 'value'),\n dash.dependencies.State('MM', 'value'),\n dash.dependencies.State('BB', 'value'),\n dash.dependencies.State('SI', 'value'),\n dash.dependencies.State('IS', 'value'),\n dash.dependencies.State('SM', 'value'),\n dash.dependencies.State('MS', 'value'),\n dash.dependencies.State('ES', 'value'),\n dash.dependencies.State('SE', 'value'),\n dash.dependencies.State('HH', 'value'),\n dash.dependencies.State('HS', 'value'),\n dash.dependencies.State('SH', 'value'),\n dash.dependencies.State('SB', 'value'),\n dash.dependencies.State('BS', 'value'),\n ], prevent_initial_call=True)\n# show error message, if input in custom rates table is invalid\n# apply_btn: n_clicks of apply button for custom rates\n# norm_option: normalization options (none, for A.thaliana, custom)\n# parameter (e.g. ee,ss,etc. ...): custom rates for 2-mer\ndef showErrorMessages(apply_btn, norm_option, ee, ss, ii, mm, bb, si, i_s, sm, ms, es, se, hh, hs, sh, sb, bs):\n hide_error_msg = True\n hide_error_type_msg = True\n\n ctx = dash.callback_context\n triggered_component = ctx.triggered[0]['prop_id'].split('.')[0]\n\n if triggered_component == 'opt_btn_apply':\n if not norm_option == 'custom_vals':\n return [hide_error_msg, hide_error_type_msg]\n custom_rates = [ee, ss, ii, mm, bb, si, i_s, sm, ms, es, se, hh, hs, sh, sb, bs]\n if None in custom_rates:\n hide_error_type_msg = False\n return [hide_error_msg, hide_error_type_msg]\n check_sum_passed = check_sum(ee, ss, ii, mm, bb, si, i_s, sm, ms, es, se, hh, hs, sh, sb, bs)\n if not check_sum_passed:\n hide_error_msg = False\n return [hide_error_msg, hide_error_type_msg]\n\n\n@app.callback([dash.dependencies.Output('opt_btn_open', 'disabled'),\n ],\n [dash.dependencies.Input('memory', 'modified_timestamp')])\n# disables 'extended options' button if no structural data is available\n# ts: store timestamp\ndef disableButton(ts):\n if ts is None:\n raise PreventUpdate\n\n disable_btn = False\n if struct_data is None:\n disable_btn = True\n\n return [disable_btn]\n\n\n# --------------------------------------------- Diagram/Table Updater --------------------------------------------------\n\n# Tables/Diagrams only get updated figures/datatables here\n\n@app.callback(dash.dependencies.Output('scatter', 'figure'),\n dash.dependencies.Input('memory', 'data'))\n# ts: timestamp when data was modified\n# data: storage to share data between callbacks\ndef updateScatter(data):\n if data is None:\n raise PreventUpdate\n return data.get('scatter', 0)\n\n\n@app.callback([dash.dependencies.Output('PCA1', 'figure'),\n dash.dependencies.Output('PCA2', 'figure'),\n dash.dependencies.Output('Tab1', 'label'),\n dash.dependencies.Output('Tab2', 'label')],\n dash.dependencies.Input('memory', 'data'))\n# ts: timestamp when data was modified\n# data: storage to share data between callbacks\ndef updatePCAs(data):\n if data is None:\n raise PreventUpdate\n pca_data = data.get('pcas', 0)\n pca1 = pca_data[0][0]\n pca2 = pca_data[0][1]\n file1 = pca_data[1]\n file2 = pca_data[2]\n return [pca1, pca2, file1, file2]\n\n\n@app.callback(dash.dependencies.Output('topK', 'children'),\n dash.dependencies.Input('memory', 'data'))\n# ts: timestamp when data was modified\n# data: storage to share data between callbacks\ndef updateTopK(data):\n if data is None:\n raise PreventUpdate\n return data.get('topK', 0)\n\n\n@app.callback(dash.dependencies.Output('msa', 'children'),\n dash.dependencies.Input('memory', 'data'))\n# ts: timestamp when data was modified\n# data: storage to share data between callbacks\ndef updateMSA(data):\n if data is None:\n raise PreventUpdate\n return data.get('msas', 0)\n","sub_path":"src/dashView/dashLayout.py","file_name":"dashLayout.py","file_ext":"py","file_size_in_byte":48636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"61847024","text":"from tkinter import *\r\n\r\nimport mysql.connector\r\n\r\nfrom globals import std_bg\r\n\r\n\r\ndef attributes_sets_options():\r\n db = mysql.connector.connect(host=\"localhost\", user=\"root\", database=\"test\")\r\n cursor = db.cursor()\r\n\r\n cursor.execute(\"SELECT * FROM attributes_sets\")\r\n db_results = cursor.fetchall()\r\n\r\n results = []\r\n for i in range(len(db_results)):\r\n results.append(list(db_results[i]))\r\n for j in range(len(results[i])):\r\n if results[i][j] is None:\r\n results[i][j] = ''\r\n\r\n # Sortowanie wyników dla lepszego odbioru użytkownika\r\n results = sorted(results, key=lambda x: x[1])\r\n\r\n root = Tk()\r\n root.title('Automatyzacja opisów')\r\n root.geometry('570x550')\r\n root.configure(bg=std_bg)\r\n\r\n # Nie rozumiem tego kodu, ale on dodaje suwak\r\n main_frame = Frame(root, bg=std_bg)\r\n main_frame.pack(fill=BOTH, expand=1)\r\n canvas = Canvas(main_frame, bg=std_bg)\r\n canvas.pack(side=LEFT, fill=BOTH, expand=1)\r\n scrollbar = Scrollbar(main_frame, orient=VERTICAL, command=canvas.yview, bg=std_bg)\r\n scrollbar.pack(side=RIGHT, fill=Y)\r\n canvas.configure(yscrollcommand=scrollbar.set, bg=std_bg)\r\n canvas.bind('', lambda e: canvas.configure(scrollregion=canvas.bbox(\"all\")))\r\n view_frame = Frame(canvas, bg=std_bg)\r\n canvas.create_window((0, 0), window=view_frame, anchor='nw')\r\n\r\n \"\"\" NAZWY KOLUMN \"\"\"\r\n\r\n frame_cols = LabelFrame(view_frame, text=\"Nazwy kolumn\")\r\n frame_cols.configure(bg=std_bg)\r\n Label(frame_cols, width=17, bg='#525252', fg='#EEEEEE', justify=LEFT, text=\"Nazwa\").grid(row=0, column=0,\r\n pady=2)\r\n Label(frame_cols, width=17, bg='#525252', fg='#EEEEEE', justify=LEFT, text=\"Zestaw1\").grid(row=0, column=1,\r\n pady=2)\r\n Label(frame_cols, width=17, bg='#525252', fg='#EEEEEE', justify=LEFT, text=\"Waga\").grid(row=0, column=2,\r\n pady=2)\r\n Label(frame_cols, width=17, bg='#525252', fg='#EEEEEE', justify=LEFT, text=\"Zestaw2\").grid(row=0, column=3,\r\n pady=2)\r\n Label(frame_cols, width=4, bg='#525252', fg='#EEEEEE', justify=LEFT, text=\"Usuń\").grid(row=0, column=4, pady=2)\r\n frame_cols.grid(row=1, column=0)\r\n\r\n \"\"\" DANE Z BAZY DANYCH \"\"\"\r\n\r\n def change_data():\r\n changes = []\r\n for i in range(len(res_label)):\r\n query_SET = ''\r\n if res_label[i][0].get() != results[i][1]:\r\n query_SET += f'Name=\"{res_label[i][0].get()}\", '\r\n if res_label[i][1].get() != results[i][2]:\r\n query_SET += f'Set1=\"{res_label[i][1].get()}\", '\r\n\r\n if res_label[i][2].get() == '':\r\n if results[i][3] == '':\r\n pass\r\n else:\r\n query_SET += f'Weight=NULL, '\r\n else:\r\n try:\r\n weight = int(res_label[i][2].get())\r\n if weight != results[i][3]:\r\n query_SET += f'Weight={weight}, '\r\n except ValueError:\r\n error['text'] = 'Waga musi być liczbą'\r\n return\r\n\r\n if res_label[i][3].get() != results[i][4]:\r\n query_SET += f'Set2=\"{res_label[i][3].get()}\", '\r\n\r\n if query_SET != '':\r\n changes.append([results[i][0], query_SET])\r\n\r\n for change in changes:\r\n cursor.execute(f\"\"\"\r\n UPDATE attributes_sets \r\n SET {change[1][:-2]}\r\n WHERE ID={change[0]}\r\n \"\"\")\r\n db.commit()\r\n root.destroy()\r\n attributes_sets_options()\r\n\r\n def remove():\r\n for i in range(len(var)):\r\n if var[i].get() == 1:\r\n cursor.execute(f\"\"\"\r\n DELETE FROM attributes_sets\r\n WHERE ID={results[i][0]};\r\n \"\"\")\r\n db.commit()\r\n root.destroy()\r\n attributes_sets_options()\r\n\r\n frame_data = LabelFrame(view_frame, text=\"Dane\")\r\n frame_data.configure(bg=std_bg)\r\n res_label = []\r\n var = []\r\n for i in range(len(results)):\r\n var.append(IntVar())\r\n res_label.append([\r\n Entry(frame_data, width=20, bg='#525252', fg='#EEEEEE', justify=LEFT),\r\n Entry(frame_data, width=20, bg='#525252', fg='#EEEEEE', justify=LEFT),\r\n Entry(frame_data, width=20, bg='#525252', fg='#EEEEEE', justify=LEFT),\r\n Entry(frame_data, width=20, bg='#525252', fg='#EEEEEE', justify=LEFT),\r\n Checkbutton(frame_data, width=1, bg=std_bg, variable=var[i], onvalue=1)\r\n ])\r\n\r\n for i in range(len(res_label)):\r\n res_label[i][0].insert(0, results[i][1])\r\n res_label[i][1].insert(0, results[i][2])\r\n res_label[i][2].insert(0, results[i][3])\r\n res_label[i][3].insert(0, results[i][4])\r\n res_label[i][0].grid(row=i, column=0, pady=2)\r\n res_label[i][1].grid(row=i, column=1, pady=2)\r\n res_label[i][2].grid(row=i, column=2, pady=2)\r\n res_label[i][3].grid(row=i, column=3, pady=2)\r\n res_label[i][4].grid(row=i, column=4, pady=2)\r\n\r\n change_button = Button(frame_data, text=\"Zmień\", width=16, bg='#525252', fg='#EEEEEE', command=change_data)\r\n change_button.grid(row=len(res_label) + 1, column=3)\r\n remove_button = Button(frame_data, text=\"Usuń\", width=4, bg='#525252', fg='#EEEEEE', command=remove)\r\n remove_button.grid(row=len(res_label) + 1, column=4)\r\n frame_data.grid(row=2, column=0)\r\n\r\n \"\"\" DODAWANIE NOWEJ POZYCJI \"\"\"\r\n\r\n def add_new():\r\n if not e_name.get():\r\n error['text'] = 'Nazwa musi być podana'\r\n return\r\n elif not e_set1.get():\r\n error['text'] = 'Pierwszy zestaw musi być podany'\r\n return\r\n if e_weight.get() != '':\r\n try:\r\n weight = int(e_weight.get())\r\n except ValueError:\r\n error['text'] = 'Waga musi być liczbą'\r\n return\r\n else:\r\n weight = \"NULL\"\r\n\r\n error['text'] = ''\r\n cursor.execute(f\"\"\"\r\n INSERT INTO attributes_sets (Name, Set1, Weight, Set2)\r\n VALUES ('{e_name.get()}', '{e_set1.get()}', {weight}, '{e_set2.get()}');\r\n \"\"\")\r\n db.commit()\r\n e_name.delete(0, END)\r\n e_set1.delete(0, END)\r\n e_weight.delete(0, END)\r\n e_set2.delete(0, END)\r\n root.destroy()\r\n attributes_sets_options()\r\n\r\n frame_new = LabelFrame(view_frame, text=\"Dodaj nowe\")\r\n frame_new.configure(bg=std_bg)\r\n e_name = Entry(frame_new, width=20, bg='#525252', fg='#EEEEEE', justify=LEFT)\r\n e_set1 = Entry(frame_new, width=20, bg='#525252', fg='#EEEEEE', justify=LEFT)\r\n e_weight = Entry(frame_new, width=20, bg='#525252', fg='#EEEEEE', justify=LEFT)\r\n e_set2 = Entry(frame_new, width=20, bg='#525252', fg='#EEEEEE', justify=LEFT)\r\n e_name.grid(row=0, column=0)\r\n e_set1.grid(row=0, column=1)\r\n e_weight.grid(row=0, column=2)\r\n e_set2.grid(row=0, column=3)\r\n add_button = Button(frame_new, text=\"Dodaj\", width=16, bg='#525252', fg='#EEEEEE', command=add_new)\r\n add_button.grid(row=1, column=3)\r\n error = Label(view_frame, text='', bg=std_bg, fg='red')\r\n frame_new.grid(row=0, column=0, pady=20)\r\n error.grid(row=3, column=0)\r\n\r\n exit_button = Button(view_frame, text=\"Wyjdź\", width=16, bg='#525252', fg='#EEEEEE', command=root.destroy)\r\n exit_button.grid(row=5, column=0)\r\n root.mainloop()\r\n","sub_path":"GUI/attributes_sets_options.py","file_name":"attributes_sets_options.py","file_ext":"py","file_size_in_byte":7870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"84899468","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nPipelines for building features\n\"\"\"\nimport logging\nimport numpy as np\n\nfrom sklearn.pipeline import Pipeline\n\nfrom sklearn.base import BaseEstimator\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.decomposition import LatentDirichletAllocation as LatentDirichlet\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\nclass DummyTransform(BaseEstimator):\n \"\"\"Return content length as features.\n do nothing to the labels\"\"\"\n\n def fit(self, X, y):\n return self\n\n def transform(self, X):\n return np.array(X['content'].str.len())[:, None]\n\n\nclass Tfidf(TfidfTransformer):\n\n def fit_transform(self, *args, **kwargs):\n logging.info('Fit & Transform TF-IDF...')\n return super().fit_transform(*args, **kwargs)\n\n\nclass SVD(TruncatedSVD):\n\n def fit_transform(self, *args, **kwargs):\n logging.info('Fit & Transform TruncatedSVD...')\n return super().fit_transform(*args, **kwargs)\n\n\nclass Count(CountVectorizer):\n\n def fit_transform(self, raw_documents, y=None):\n logging.info(f'Fit & Transform CountVectorizer...')\n ret = super().fit_transform(raw_documents, y=y)\n logging.info(f'Vocab Size: {len(self.vocabulary_)}')\n return ret\n\n\nclass SparseToDense(BaseEstimator):\n \"\"\"Return content length as features.\n do nothing to the labels\"\"\"\n\n def fit(self, X, y):\n return self\n\n def transform(self, X):\n return X.toarray()\n\n\nclass OverSample(BaseEstimator):\n\n def __init__(self, *args, **kwargs):\n return super().__init__(*args, **kwargs)\n\n\n# ------- Feature Builder -----------------\ndef is_list_or_tuple(obj):\n return isinstance(obj, tuple) or isinstance(obj, list)\n\n\n# Feature model specifications\n# For Chinese\nfm_spec = {\n 'count': Count(ngram_range=(1, 5), min_df=0.001, max_df=0.99),\n 'tfidf': ['count', Tfidf()],\n 'lsa_200': ['tfidf', SVD(n_components=200)],\n 'lsa_500': ['tfidf', SVD(n_components=500)],\n 'lsa_1k': ['tfidf', SVD(n_components=1000)],\n # smaller vocabulary (removed more stop and infrequent words)\n 'count_sv': Count(ngram_range=(1, 5), min_df=0.02, max_df=1.0),\n 'tfidf_sv': ['count_sv', Tfidf()],\n 'tfidf_sv_dense': ['tfidf_sv', SparseToDense()],\n 'lsa_200_sv': ['tfidf_sv', SVD(n_components=200)],\n 'lsa_500_sv': ['tfidf_sv', SVD(n_components=500)],\n}\n\n# For English\nfm_spec_en = fm_spec.copy()\nfm_spec_en['count'] = Count(\n ngram_range=(1, 4), min_df=0.01, stop_words='english')\nfm_spec_en['count_sv'] = Count(\n ngram_range=(1, 4), min_df=0.02, stop_words='english')\n\n\ndef ensure_named_steps(steps, spec=fm_spec, cache=None):\n \"\"\"make sure steps are named tuples.\n Also handles dependencies in steps.\n \"\"\"\n if not isinstance(steps, list):\n steps = [steps]\n # make a copy of the steps\n if is_list_or_tuple(steps):\n steps = list(steps)\n steps_ = []\n # while steps is not empty\n while steps:\n name, estimator = None, steps.pop(0)\n if isinstance(estimator, str):\n # if string, look it up from cache or spec\n if cache and estimator in cache:\n # if in cache, return cache\n name, estimator = estimator, cache[estimator]['model']\n else:\n # otherwise resolve spec\n name, estimator = estimator, spec[estimator]\n elif is_list_or_tuple(estimator):\n # when estimator has name already, expand it\n name, estimator = estimator\n\n # if is an array in cache\n if isinstance(estimator, list):\n # make sure current name is used for the last step\n # in the cached spec\n if not isinstance(estimator[-1], tuple):\n estimator[-1] = (name, estimator[-1])\n # add back to list\n steps = estimator + steps\n continue\n\n # Initialize estimator if necessary\n if callable(estimator):\n estimator = estimator()\n\n # if still haven't figured out step name\n if name is None:\n # get the name from class name\n name = estimator.__class__.__name__\n\n steps_.append((name, estimator))\n return steps_\n\n\nclass FeaturePipeline(Pipeline):\n \"\"\"\n FeaturePipeline with spec and cache support.\n\n Usage:\n\n fm_spec = {\n 'count': CountVectorizer(ngram_range=(1, 4), min_df=0.01,\n max_df=0.99),\n 'tfidf': ['count', TfidfTransformer],\n }\n fm = defaultdict(dict)\n model = FeaturePipeline('tfidf', spec=fm_spec, cache=fm)\n model.fit_transform(X_train)\n model.transform(X_test)\n\n Generates:\n\n > fm['tfidf']\n {'model': FeaturePipeline(...),\n 'train': numpy.array,\n 'test': numpy.array}\n > fm['count']\n {'model': FeaturePipeline(...), ...}\n\n Parameters\n ----------\n spec: a dictionary of specs matching count to id\n cache: a defaultdict to store estimator and train/test results\n \"\"\"\n\n @classmethod\n def from_spec(cls, name, spec=fm_spec, cache=None, **kwargs):\n if cache is not None and name in cache:\n return cache[name]['model']\n return cls(name, spec, cache, **kwargs)\n\n def __init__(self, steps='tfidf_sv', spec=fm_spec, cache=None, **kwargs):\n steps = ensure_named_steps(steps, spec, cache)\n super().__init__(steps, **kwargs)\n # if speficied cache, save self to cache\n if cache is not None:\n self.cache = cache[self._final_estimator_name]\n self.cache['model'] = self\n else:\n self.cache = None\n\n @property\n def _final_estimator_name(self):\n return self.steps[-1][0]\n\n def fit_transform(self, X, y=None, **fit_params):\n \"\"\"Fit transform the training data and save the results in cache\"\"\"\n cache_name = self._final_estimator_name\n cache = self.cache\n if cache and 'train' in cache:\n logger.info(f' {cache_name}: fit_transform use cache.')\n return cache['train']\n Xt = super().fit_transform(X, y, **fit_params)\n if cache is not None:\n cache['train'] = Xt\n return Xt\n\n def transform(self, X):\n \"\"\"Transform the testing data and save the results in cache\"\"\"\n cache_name = self._final_estimator_name\n cache = self.cache\n if cache and 'test' in cache:\n logger.info(f' {cache_name}: transform use cache.')\n return cache['test']\n Xt = super().transform(X)\n if cache is not None:\n cache['test'] = Xt\n return Xt\n\n\n# ------- Additional helpers and basic pipelines ---------\n\ndef build_features(X_train, X_test, steps='tfidf_sv', spec=fm_spec, **kwargs):\n # if provided both training and testing dataset\n # otherwise, load it from cache\n feature = FeaturePipeline(steps, spec=spec, **kwargs)\n X_train = feature.fit_transform(X_train)\n X_test = feature.transform(X_test)\n return X_train, X_test\n\n","sub_path":"fgclassifier/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":7202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"36990727","text":"import calculate\r\nimport sklearn\r\nfrom showData import loadDatadet\r\nfrom calculate import kNN\r\n\"\"\"\r\n成绩得出的难点问题:\r\n1.对于不同衡量标准,其数学形式抽象出来的具体问题 度量不是单纯的线性关系\r\n2.对于不同衡量标准,键值的高低并不能绝对的影响最终成绩,若干项低标准值可能产生较高的结果\r\n3.在标准的整个百分制中,不同等级问题出现阈值分布不均匀\r\n\r\n4.四个特征属性 一共对应至少81种状态结果\r\n\"\"\"\r\nscores = [\r\n [80.6, 78.80, 90.5, 76, 0],\r\n [70.6, 78.80, 90.5, 76, 0],\r\n [60.6, 78.80, 90.5, 76, 0],\r\n [50.6, 78.80, 90.5, 76, 0],\r\n [40.6, 78.80, 40.5, 76, 0],\r\n # 存在拼凑可能 <60.0 正常值 偏差较大 正常值\r\n [20.6, 78.80, 90.5, 76, 0],\r\n # 没按老师给定的方法完成任务,完成了一定量,但质量不高\r\n # <60.0 正常值 正常值 正常值\r\n [90.6, 78.80, 90.5, 76, 0],\r\n # 正常作业\r\n [95.6, 78.80, 90.5, 76, 0],\r\n # 跟着老师完成任务,改动不大\r\n [99.6, 78.80, 90.5, 76, 0]\r\n # 97->99 正常值 正常值 正常值\r\n # 存在抄袭嫌疑\r\n]\r\n\r\n# 相似度\r\ne = calculate.distance(scores[1], scores[7])\r\ncos = calculate.cos(scores[1], scores[8])\r\nprint(e)\r\nprint(cos)\r\n\r\n# 分类\r\ninfile='./dataSet.txt'\r\nk = 5\r\nsrc = loadDatadet(infile, k)\r\nkNN(src)\r\n\r\n\r\n","sub_path":"CodeSimi/getScore.py","file_name":"getScore.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"405373778","text":"from __future__ import absolute_import\nfrom .cifar import load_batch\nfrom ..utils.data_utils import get_file\nfrom .. import backend as K\nimport numpy as np\nimport os\n\n\ndef load_data(label_mode='fine'):\n \"\"\"Loads CIFAR100 dataset.\n\n # Arguments\n label_mode: one of \"fine\", \"coarse\".\n\n # Returns\n Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.\n\n # Raises\n ValueError: in case of invalid `label_mode`.\n \"\"\"\n if label_mode not in ['fine', 'coarse']:\n raise ValueError('`label_mode` must be one of `\"fine\"`, `\"coarse\"`.')\n\n dirname = 'cifar-100-python'\n origin = 'http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'\n path = get_file(dirname, origin=origin, untar=True)\n\n fpath = os.path.join(path, 'train')\n x_train, y_train = load_batch(fpath, label_key=label_mode + '_labels')\n\n fpath = os.path.join(path, 'test')\n x_test, y_test = load_batch(fpath, label_key=label_mode + '_labels')\n\n y_train = np.reshape(y_train, (len(y_train), 1))\n y_test = np.reshape(y_test, (len(y_test), 1))\n\n if K.image_data_format() == 'channels_last':\n x_train = x_train.transpose(0, 2, 3, 1)\n x_test = x_test.transpose(0, 2, 3, 1)\n\n return (x_train, y_train), (x_test, y_test)\n","sub_path":"Keras_tensorflow_nightly/source2.7/keras/datasets/cifar100.py","file_name":"cifar100.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"620393627","text":"# https://github.com/baldurk/renderdoc/search?l=Python&q=ExecuteAndInject\n\nimport logging\nimport os\nimport threading\nimport time\n\nimport renderdoc as rd\nfrom _android import get_device_name, get_main_activity\nfrom _shutil import get_date_str, get_home_path, setup_logger\n\n\ndef list_executables(remote):\n # GetHomeFolder() gives you a good default path to start with.\n # ListFolder() lists the contents of a folder and can recursively\n # browse the remote filesystem.\n home = remote.GetHomeFolder()\n paths = remote.ListFolder(home)\n logging.info(f\"Executables in home folder '{home}':\")\n for p in paths:\n logging.info(\" - \" + p.filename)\n\n\ndef main():\n # This sample is intended as an example of how to do remote capture and replay\n # as well as using device protocols to automatically enumerate remote targets.\n #\n # It is not complete since it requires filling in with custom logic to select\n # the executable and trigger the capture at the desired time\n # raise RuntimeError(\"This sample should not be run directly, read the source\")\n\n rd.InitialiseReplay(rd.GlobalEnvironment(), [])\n\n protocols = rd.GetSupportedDeviceProtocols()\n logging.info(f\"Supported device protocols: {protocols}\")\n\n protocol_to_use = \"adb\"\n\n # the protocol must be supported\n if protocol_to_use not in protocols:\n raise RuntimeError(f\"{protocol_to_use} protocol not supported\")\n\n protocol = rd.GetDeviceProtocolController(protocol_to_use)\n\n devices = protocol.GetDevices()\n\n if len(devices) == 0:\n raise RuntimeError(f\"no {protocol_to_use} devices connected\")\n\n if \"ANDROID_SERIAL\" in os.environ:\n device = os.environ[\"ANDROID_SERIAL\"]\n else:\n # Choose the first device\n device = devices[0]\n\n name = protocol.GetFriendlyName(device)\n logging.info(f\"Running test on {device} - {name}\")\n\n url = protocol.GetProtocolName() + \"://\" + device\n\n # Protocols can enumerate devices which are not supported. Capture/replay\n # is not guaranteed to work on these devices\n if not protocol.IsSupported(url):\n raise RuntimeError(f\"{device} doesn't support capture/replay - too old?\")\n\n # Protocol devices may be single-use and not support multiple captured programs\n # If so, trying to execute a program for capture is an error\n if not protocol.SupportsMultiplePrograms(url):\n # check to see if anything is running. Just use the URL\n ident = rd.EnumerateRemoteTargets(url, 0)\n\n if ident != 0:\n logging.info(f\"{name} already has a program running on {ident}\")\n # raise RuntimeError(f\"{name} already has a program running on {ident}\")\n\n while True:\n try:\n # Let's try to connect\n result, remote = rd.CreateRemoteServerConnection(url)\n\n if result == rd.ResultCode.NetworkIOFailed and protocol is not None:\n # If there's just no I/O, most likely the server is not running. If we have\n # a protocol, we can try to start the remote server\n logging.info(\"Couldn't connect to remote server, trying to start it\")\n\n result = protocol.StartRemoteServer(url)\n\n if result != rd.ResultCode.Succeeded:\n raise RuntimeError(\n f\"Couldn't launch remote server, got error {str(result)}\"\n )\n\n break\n\n except RuntimeError as ex:\n logging.warn(f\"Error on connection: {ex}\")\n logging.info(\"Try to connect again\")\n\n # We now have a remote connection. This works regardless of whether it's a device\n # with a protocol or not. In fact we are done with the protocol at this point\n logging.info(\"Got connection to remote server\")\n protocol = None\n\n # list_executables(remote)\n\n # Select your executable, perhaps hardcoded or browsing using the above\n # functions\n pkg_name = os.environ[\"PKG_NAME\"]\n\n exe = os.environ.get(\"START_ACTIVITY\")\n if not exe:\n exe = get_main_activity(pkg_name)\n\n workingDir = \"\"\n cmdLine = \"\"\n env = []\n opts = rd.GetDefaultCaptureOptions()\n\n logging.info(f\"Running {exe}\")\n\n result = remote.ExecuteAndInject(exe, workingDir, cmdLine, env, opts)\n\n if result.result != rd.ResultCode.Succeeded:\n remote.ShutdownServerAndConnection()\n raise RuntimeError(f\"Couldn't launch {exe}, got error {str(result.result)}\")\n\n # Spin up a thread to keep the remote server connection alive while we make a capture,\n # as it will time out after 5 seconds of inactivity\n def ping_remote(remote, kill):\n success = True\n while success and not kill.is_set():\n success = remote.Ping()\n time.sleep(1)\n\n kill = threading.Event()\n ping_thread = threading.Thread(target=ping_remote, args=(remote, kill))\n ping_thread.start()\n\n # Create target control connection\n target = rd.CreateTargetControl(url, result.ident, \"remote_capture.py\", True)\n\n if target is None:\n kill.set()\n ping_thread.join()\n remote.ShutdownServerAndConnection()\n raise RuntimeError(f\"Couldn't connect to target control for {exe}\")\n\n logging.info(\"Connected - waiting for desired capture\")\n\n # TODO: Wait for the capture condition we want\n # capture_condition()\n\n logging.info(\"Wait for 15 seconds\")\n time.sleep(15)\n\n logging.info(\"Triggering capture\")\n\n target.TriggerCapture(1)\n\n # Pump messages, keep waiting until we get a capture message. Time out after 30 seconds\n msg = None\n start = time.clock()\n while msg is None or msg.type != rd.TargetControlMessageType.NewCapture:\n msg = target.ReceiveMessage(None)\n\n if time.clock() - start > 30:\n break\n\n # Close the target connection, we're done either way\n target.Shutdown()\n target = None\n\n # Stop the background ping thread\n kill.set()\n ping_thread.join()\n\n # If we didn't get a capture, error now\n if msg.type != rd.TargetControlMessageType.NewCapture:\n remote.ShutdownServerAndConnection()\n raise RuntimeError(\n \"Didn't get new capture notification after triggering capture\"\n )\n\n cap_path = msg.newCapture.path\n cap_id = msg.newCapture.captureId\n\n logging.info(\n f\"Got new capture at {cap_path} which is frame {msg.newCapture.frameNumber} with {msg.newCapture.api}\"\n )\n\n # We could save the capture locally\n local_file = os.path.join(\n get_home_path(),\n \"Desktop\",\n f\"{pkg_name}-{get_device_name()}-{get_date_str()}.rdc\",\n )\n logging.info(f\"Save capture to {local_file}\")\n remote.CopyCaptureFromRemote(\n cap_path,\n local_file,\n None,\n )\n\n\nsetup_logger()\nmain()\n","sub_path":"scripts/r/rdoc/capture_renderdoc_android.py","file_name":"capture_renderdoc_android.py","file_ext":"py","file_size_in_byte":6784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"173537925","text":"from loguru import logger\n\nfrom utils import request, run_func, mongo\nfrom config import ACCESS_TOKEN, RUN_SIGN\n\n\n@run_func()\ndef writer(row: dict or list):\n if isinstance(row, dict):\n row.update({'times': f'{RUN_SIGN}'})\n elif isinstance(row, list):\n [x.update({'times': f'{RUN_SIGN}'}) for x in row]\n\n mongo.insert(row, 'kaiman')\n\n\n@run_func()\ndef auto_list():\n uri = 'https://kaiman.tradedge.cn/api/user/user_data/list'\n header = {\n 'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36 MicroMessenger/7.0.9.501 NetType/WIFI MiniProgramEnv/Windows WindowsWechat',\n 'auth-token': ACCESS_TOKEN,\n 'content-type': 'application/json',\n # 'Referer': 'https://servicewechat.com/wx2fc2b286963f2cff/1/page-frame.html',\n 'Accept-Encoding': 'gzip, deflate, br'\n }\n\n user_list = []\n\n data = {'keyword': '', 'nextKey': '', 'pageSize': '200'}\n resp = request(uri, header, data=data, json=True)\n\n while True:\n for user in resp.get('data').get('list'):\n user_id = user.get('userId')\n if user_id not in user_list:\n user_list.append(user_id)\n writer(user)\n else:\n logger.info(f'重复 - {user_id}')\n # writer(resp.get('data').get('list'))\n\n data['nextKey'] = resp.get('data').get('nextKey')\n resp = request(uri, header, data=data, json=True)\n\n\nif __name__ == '__main__':\n auto_list()\n","sub_path":"now/kaiman/spider_kaiman.py","file_name":"spider_kaiman.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"525872241","text":"import sys\nfrom pathlib import Path\nhome = str(Path.home())\nsys.path.insert(0, home+'/MasterThesis/')\n\n##\n\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\nfrom datetime import timedelta\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\nfrom Preprocessing.Point import Point\nfrom Preprocessing.Coord import Coord\nfrom Preprocessing.Bounding_box import Bounding_box\n\n##\n\ndef convert_cardial_to_angular(cardinal_values):\n dict_cardinal = {\"N\": 360, \"NbE\": 11.25, \"NNE\": 22.5, \"NEbN\": 33.75, \"NE\": 45, \"NEbE\": 56.25,\n \"ENE\": 67.5, \"EbN\": 78.75, \"E\": 90, \"EbS\": 101.25, \"ESE\": 112.5, \"SEbE\": 123.75,\n \"SE\": 135, \"SEbS\": 146.25, \"SSE\": 157.5, \"SbE\": 168.75, \"S\": 180, \"SbW\": 191.25,\n \"SSW\": 202.5, \"SWbS\": 213.75, \"SW\": 225, \"SWbW\": 236.25, \"WSW\": 247.5, \"WbS\": 258.75,\n \"W\": 270, \"WbN\": 281.25, \"WNW\": 292.5, \"NWbW\": 303.75, \"NW\": 315, \"NWbN\": 326.25,\n \"NNW\": 337.5, \"NbW\": 348.75}\n angular_data = np.empty(len(cardinal_values))\n angular_data[:] = np.nan\n\n for i in range(len(cardinal_values)):\n if pd.isna(cardinal_values.iloc[i]) ==False:\n angular_data[i] = dict_cardinal[cardinal_values.iloc[i]]\n return pd.Series(angular_data)\n\n##\n\n# Read data from 12 monitoring stations\n\n'''\ndata_Aotizhongxin = pd.read_csv('data/data2013-2017/Aotizhongxin.csv')\ndata = pd.DataFrame(columns=data_Aotizhongxin.columns)\ndata = pd.concat([data, data_Aotizhongxin], axis=0)\ndata_changping = pd.read_csv('data/data2013-2017/Changping.csv')\ndata = pd.concat([data, data_changping], axis=0)\ndata_dingling = pd.read_csv('data/data2013-2017/Dingling.csv')\ndata = pd.concat([data, data_dingling], axis=0)\ndata_dongsi = pd.read_csv('data/data2013-2017/Dongsi.csv')\ndata = pd.concat([data, data_dongsi], axis=0)\ndata_guanyuan = pd.read_csv('data/data2013-2017/Guanyuan.csv')\ndata = pd.concat([data, data_guanyuan], axis=0)\ndata_gucheng = pd.read_csv('data/data2013-2017/Gucheng.csv')\ndata = pd.concat([data, data_gucheng], axis=0)\ndata_huairou = pd.read_csv('data/data2013-2017/Huairou.csv')\ndata = pd.concat([data, data_huairou], axis=0)\ndata_Nongzhanguan = pd.read_csv('data/data2013-2017/Nongzhanguan.csv')\ndata = pd.concat([data, data_Nongzhanguan], axis=0)\ndata_Shunyi = pd.read_csv('data/data2013-2017/Shunyi.csv')\ndata = pd.concat([data, data_Shunyi], axis=0)\ndata_Tiantan = pd.read_csv('data/data2013-2017/Tiantan.csv')\ndata = pd.concat([data, data_Tiantan], axis=0)\ndata_Wanliu = pd.read_csv('data/data2013-2017/Wanliu.csv')\ndata = pd.concat([data, data_Wanliu], axis=0)\ndata_Wanshouxigong = pd.read_csv('data/data2013-2017/Wanshouxigong.csv')\ndata = pd.concat([data, data_Wanshouxigong], axis=0)\n'''\n\n##\n# See correlation\n'''\ndata = data.reset_index(drop=True)\ncorr_data = data.corr()\n'''\n\n##\n''''\ndata = data.drop(columns=[\"No\"])\n'''\n##\n'''\nimport datetime\ndata.insert(0,'utc_time',0)\n'''\n##\n# Merge data from 12 monitoring stations\n'''\nfor i in range(len(data)):\n data.iloc[i,0] = datetime.datetime(data.iloc[i,1], data.iloc[i,2], data.iloc[i,3], data.iloc[i,4], 0)\n print(i)\ndata.to_csv(\"data/data2013-2017/data_2013_2017.csv\", index=False)\n'''\n##\n\n# PREPROCESSING\n\n# Read data\nprint(\"reading data ...\")\ndata = pd.read_csv(home+\"/MasterThesis/data/data2013-2017/data_2013_2017.csv\")\nbb_aq_stations = pd.read_csv(home+'/MasterThesis/data/data2013-2017/unlabeled_points.csv')\ncoord_aq_stations = pd.read_csv(home+'/MasterThesis/data/data2013-2017/stations.csv')\npoint_centroids = pd.read_csv(home+'/MasterThesis/data/centroids_aq_csv.csv')\n\n##\n\n# Convert string to datetime\ndata['utc_time'] = pd.to_datetime(data['utc_time'],utc=True)\n\n##\n\nstart_date = '2016-01-01 00:00:00+00:00'\nend_date = \"2016-12-31 23:00:00+00:00\"\n\nprint(\"Selecting data of the 2016 ...\")\n# Select data of 2016\n\ndata_2016 = data[data['year'] == 2016]\n\n##\n\nprint(\"NaN values: \")\n\n# Nan values\nnull_columns= data_2016.columns[data_2016.isna().any()]\naq_sum_null = data_2016[null_columns].isna().sum()\nprint(\"Nan values in pollutant data\")\nprint(aq_sum_null)\n\n##\n\nprint(\"Percentage of NaN data\")\n# Get percentage of missing values\npercen_pm25 = (aq_sum_null['PM2.5'] * 100) / len(data_2016)\npercen_pm10 = (aq_sum_null['PM10'] * 100) / len(data_2016)\npercen_SO2 = (aq_sum_null['SO2'] * 100) / len(data_2016)\npercen_NO2 = (aq_sum_null['NO2'] * 100) / len(data_2016)\npercen_CO = (aq_sum_null['CO'] * 100) / len(data_2016)\npercen_O3 = (aq_sum_null['O3'] * 100) / len(data_2016)\nprint(\"Percentage PM2.5 Nan values: \", percen_pm25)\nprint(\"Percentage PM10 Nan values: \", percen_pm10)\nprint(\"Percentage O3 Nan values: \", percen_SO2)\nprint(\"Percentage PM2.5 Nan values: \", percen_NO2)\nprint(\"Percentage PM10 Nan values: \", percen_CO)\nprint(\"Percentage O3 Nan values: \", percen_O3)\n\n##\n\ndata_2016 = data_2016.drop(columns=[\"year\", \"month\", \"day\", \"hour\", \"RAIN\"])\n\n##\n\nprint(\"Showing if there are missing dates in the data ...\")\nend_date = pd.to_datetime(data_2016.utc_time.max())\ncount = 0\n\nflag = False\nfor station in data_2016.station.unique():\n start_date = pd.to_datetime(data_2016.utc_time.min())\n while start_date <= end_date:\n if start_date != pd.to_datetime(data_2016.iloc[count, 0]):\n print(\"Missing date: \", start_date)\n flag = True\n start_date += timedelta(hours=1)\n count += 1\n\nif flag == False:\n print(\"There aren't missing dates in the data\")\n\n##\n\nselected_columns = ['station', 'utc_time', 'TEMP', 'PRES', 'DEWP', 'WSPM', 'wd', 'PM2.5']\ndata = data_2016[selected_columns].reset_index(drop=True)\n\n##\n\ndata['wd'] = convert_cardial_to_angular(data['wd'])\n\n##\n\nprint(\"Applying Linear Interpolation for NaN values ...\")\n# Apply linear interpolation for NaN values\ndata = data.assign(pm25=data['PM2.5'].interpolate(method='linear'))\ndata = data.assign(temp=data['TEMP'].interpolate(method='linear'))\ndata = data.assign(pres=data['PRES'].interpolate(method='linear'))\ndata = data.assign(dewp=data['DEWP'].interpolate(method='linear'))\ndata = data.assign(ws=data['WSPM'].interpolate(method='linear'))\ndata = data.assign(wd=data['wd'].interpolate(method='linear'))\n\n##\n\nselected_columns = ['station', 'utc_time', 'temp', 'pres', 'dewp', 'ws', 'wd', 'pm25']\ndata = data[selected_columns]\n\n##\n\nplt.figure(figsize = (10, 12))\nplt.plot(data[\"pm25\"])\n\n##\n# See heat map of missing values\nsns.heatmap(data.isnull(), cbar=False)\n\n##\n\n# Get neighbors for each monitoring station\nknn = 7\n\ndict_neighbors = {}\nfor i in range(len(coord_aq_stations)):\n other_stations = coord_aq_stations.drop(i, axis=0)\n coord = Coord(coord_aq_stations.iloc[i, 1], coord_aq_stations.iloc[i, 2])\n neighbors = coord.get_neighbors(other_stations, knn)\n dict_neighbors.update({str(coord_aq_stations.iloc[i, 0]): neighbors})\n\n##\n\nprint(\"Creating data processed\")\ndata_processed = pd.DataFrame(columns=['station','utc_time','latitude',\n 'longitude','temp','pres','dewp','wind_speed', 'wind_direction',\n 'pm25_1','pm25_2','pm25_3', 'pm25_4','pm25_5','pm25_6','pm25_7',\n 'dist_1','dist_2','dist_3','dist_4','dist_5','dist_6','dist_7',\n 'pm25'])\n\nwith tqdm(total=len(data)) as pbar:\n for i in range(len(data)):#lendata\n current_station = data.iloc[i, 0]\n current_time = data.iloc[i, 1]\n current_lon, current_lat = coord_aq_stations[coord_aq_stations['station'] == current_station].iloc[0, 1:3]\n temp = data.iloc[i, 2]\n pres = data.iloc[i, 3]\n dewp = data.iloc[i, 4]\n wind_speed = data.iloc[i, 5]\n wind_direction = data.iloc[i, 6]\n current_pm25 = data.iloc[i, -1]\n\n row = {'station': current_station, 'utc_time': current_time, 'latitude': current_lat, 'longitude': current_lon,\n 'temp': temp, 'pres': pres, 'dewp': dewp, 'wind_speed': wind_speed, 'wind_direction': wind_direction}\n\n # Get PM2.5 of neighbors\n neighbors = dict_neighbors[current_station]\n for k in range(knn):\n station, distance = neighbors['stations'][k], neighbors['distances'][k]\n mask = (data['station'] == station) & (data['utc_time'] == current_time)\n neighbor_pm25 = data.loc[mask].iloc[0, -1]\n row.update({'pm25_'+str(k+1): neighbor_pm25})\n row.update({'dist_'+str(k+1): distance})\n\n row.update({'pm25': current_pm25})\n data_processed = data_processed.append(row, ignore_index=True)\n pbar.update(1)\n\n##\n\ndata_processed.to_csv(home+\"/MasterThesis/data/data2013-2017/data_processed_uci.csv\", index=False)\nprint(\"Data created and saved as 'data_processed_uci.csv'\")\n\n##\n","sub_path":"Preprocessing/preprocessing_2013_2017.py","file_name":"preprocessing_2013_2017.py","file_ext":"py","file_size_in_byte":8700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"184965162","text":"\"\"\"Settings for storage app.\n\nUsed to provide s simple default configuration.\n\"\"\"\nfrom django.conf import settings\n\ndata_dir = getattr(settings, \"FLOW_EXECUTOR\", {}).get(\"DATA_DIR\", \"/some_path\")\ndefault_local_connector = \"local\"\ndefault_storage_connectors = {\n default_local_connector: {\n \"connector\": \"resolwe.storage.connectors.localconnector.LocalFilesystemConnector\",\n \"config\": {\"priority\": 0, \"path\": data_dir},\n },\n}\n\nSTORAGE_LOCAL_CONNECTOR = getattr(\n settings, \"STORAGE_LOCAL_CONNECTOR\", default_local_connector\n)\nSTORAGE_CONNECTORS = getattr(settings, \"STORAGE_CONNECTORS\", default_storage_connectors)\n","sub_path":"resolwe/storage/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"382706035","text":"\"\"\"\nThis file is part of Linspector (https://linspector.org/)\nCopyright (c) 2013-2023 Johannes Findeisen . All Rights Reserved.\nSee LICENSE.\n\"\"\"\n\nimport os\nimport pprint\n\nimport paramiko\n\nfrom linspector.service import Service\n\n\ndef create(configuration, environment, log):\n return SSHService(configuration, environment, log)\n\n\nclass SSHService(Service):\n\n def execute(self, **kwargs):\n path = os.path.join(os.environ['HOME'], '.ssh', 'id_rsa')\n key = paramiko.RSAKey.from_private_key_file(path)\n\n client = paramiko.SSHClient()\n client.get_host_keys().add('hanez.org', 'ssh-rsa', key)\n pprint.pprint(client._host_keys)\n\n client.connect('hanez.org', username='hanez')\n\n # self.command.call() ist dann das:\n stdin, stdout, stderr = client.exec_command('ls')\n for line in stdout:\n print('... ' + line.strip('\\n'))\n client.close()\n","sub_path":"linspector/services/net/ssh.py","file_name":"ssh.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"78144775","text":"import time\r\n\r\ndef test_check_exists_by_css_selector(browser):\r\n\r\n link = f\"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/\"\r\n browser.get(link)\r\n time.sleep(30) \r\n button = len(browser.find_elements_by_css_selector('button.btn.btn-lg.btn-primary')) > 0\r\n\r\n assert button > 0, f\"No element: button\"\r\n\r\n","sub_path":"Step_3/test_items.py","file_name":"test_items.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"283872034","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nimport pymongo\n\nimport collections\n\nfrom matplotlib import rcParams\nrcParams.update({'figure.autolayout': True})\n\ndef flatten(d, parent_key='', sep='_'):\n \"\"\"\n flatten nested Mongodb records\n \"\"\"\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.extend(flatten(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)\n\nclient = pymongo.MongoClient()\ndb = client.lai\nresults = db.results\n\ndf = pd.DataFrame(list(map(flatten, results.find({'batch_size' : 20, 'window_size' : {'$gt' : 2}}))))\nfig, ax = plt.subplots()\n\nax.scatter(df['window_size'], df['overall_acc_avg'], color='red')\nax.set_ylabel('average accuracy\\n(across 20\\ntest chromosomes)', color='red',\n rotation='horizontal', horizontalalignment='right', multialignment='center')\n\nax2 = ax.twinx()\nax2.scatter(df['window_size'], df['time'] / 60, color='blue')\nax2.set_ylabel('runtime\\n(minutes)', color='blue',\n rotation='horizontal', horizontalalignment='left', multialignment='center')\n\nax.set_xlabel('window size')\nplt.title('window size vs. accuracy and runtime (using sliding windows)')\nplt.gcf().set_size_inches([9.4, 4.8])\n\nfig.savefig('results/sliding_windows/window_size_vs_accuracy_and_runtime')\n","sub_path":"bin/pythonScripts/plot_results.py","file_name":"plot_results.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"30790565","text":"#!/usr/bin/env python\n\nimport Prediction as Pred\nfrom itertools import chain\nfrom Render import FigureCanvasPixbuf, drawHist, drawBar\nfrom os.path import basename\nfrom operator import itemgetter\nfrom multiprocessing import Process, Manager, Value\nfrom time import time\nfrom math import sqrt\nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\n\nclass Model():\n def __init__(self):\n self.tst_inpt = [['','']]\n \n def _pred_part(self, i, partisi, wdw, hist, hmms, al_fst):\n train_seqs_part = []\n for e, train in enumerate(partisi):\n if e == i:\n test_seqs_part = train\n else:\n train_seqs_part += train\n traind_mdl, hmm = Pred.train(train_seqs_part, wdw, al_fst)\n pred_seqs, probs = Pred.predict(traind_mdl, test_seqs_part, wdw, al_fst)\n _, acc = Pred.test(pred_seqs, test_seqs_part)\n hmms.value += hmm\n hist += acc\n \n def _calc_yres(self, hist):\n res = []\n res.append(min(hist))\n res.append(max(hist))\n res.append(sum(hist) / len(hist))\n v = sum((i - res[2])**2 for i in hist) / (len(hist) - 1)\n res.append(sqrt(v))\n \n p = 0\n for i in hist:\n if res[2] - res[3] <= i <= res[2] + res[3]:\n p +=1 \n\n res.append(p / float(len(hist))) \n return res\n \n def _calc_ytot(self, res):\n s = []\n s.append(sum(res[k][0] for k in res))\n s.append(sum(res[k][1] for k in res))\n s.append(min(res[k][2] for k in res))\n s.append(max(res[k][3] for k in res))\n s.append(sum(res[k][4] for k in res) / len(res))\n s.append(sqrt(sum(res[k][5] ** 2 for k in res) / len(res)))\n s.append(sum(res[k][6] for k in res) / len(res))\n return s\n \n def _calc_wtot(self, wtot):\n s = []\n for i in range(7):\n s.append(sum(wtot[w][i] for w in wtot) / float(len(wtot)))\n \n return s\n \n def _draw_ytot(self, key, hist, al_fst, wdw, yres):\n hist_all = list(chain.from_iterable([hist[k] for k in hist]))\n al = 'Aligned' if al_fst else 'Unaligned'\n title = '%s (s = %d, %s)' % (key, wdw, al)\n \n return [drawHist(hist_all, title), drawBar(yres, title)]\n \n def _draw_tot(self, wtot, al_fst, key):\n al = 'Aligned' if al_fst else 'Unaligned'\n title = '%s (All Slicing Size, %s)' % (key, al)\n \n return drawBar(wtot, title)\n \n def _create_yres(self, key, manager, fsts, y, al_fst, wdw):\n train_input = Pred.inputTraining(fsts[y], fsts[y+1], al_fst)\n yres = [len(train_input)]\n partisi = Pred.partitioning(train_input) \n p = []\n hist = manager.list([])\n yres.append(Value('i', 0))\n \n for i in range(len(partisi)):\n arg = (i, partisi, wdw, hist, yres[1], al_fst)\n p.append(Process(target=self._pred_part, args=arg))\n p[i].start()\n \n for t in p:\n t.join()\n\n yres[1] = yres[1].value\n yres += self._calc_yres(hist)\n al = 'Aligned' if al_fst else 'Unaligned'\n title = '%s (s = %d, %s)' % (key, wdw, al)\n yres.append(drawHist(hist, title))\n \n return yres, hist\n \n def _create_ytot(self, fsts, yres, hist, al_fst, wdw):\n key = basename(fsts[0])[:-6] + '-' + basename(fsts[-1])[:-6]\n ytot = self._calc_ytot(yres)\n ytot.append(self._draw_ytot(key, hist, al_fst, wdw, yres))\n ytot.append(key)\n \n return ytot\n \n def _create_tot(self, fsts, wtot, al_fst):\n key = basename(fsts[0])[:-6] + '-' + basename(fsts[-1])[:-6]\n tot = self._calc_wtot(wtot)\n tot.append(self._draw_tot(wtot, al_fst, key))\n tot.append(key)\n \n return tot\n \n def inputTraining(self, train_fst, target_fst, win, alf):\n self.wdw = win\n self.al_fst = alf\n self.train_input = Pred.inputTraining(train_fst, target_fst, alf)\n \n return len(self.train_input)\n \n def train(self):\n self.traind_mdl, no = Pred.train(self.train_input, self.wdw, self.al_fst)\n \n return no\n \n def predict(self, test_seq):\n self.tst_inpt[0][0] = SeqRecord(Seq(test_seq))\n p = Pred.predict(self.traind_mdl, self.tst_inpt, self.wdw, self.al_fst)\n self.pred_seqs, self.probs = p[0], p[1]\n \n return self.pred_seqs[0], self.probs[0]\n \n def test(self, target_seq):\n self.tst_inpt[0][1] = SeqRecord(Seq(target_seq))\n self.aligned, self.acc = Pred.test(self.pred_seqs, self.tst_inpt)\n \n return self.aligned[0][0], self.aligned[0][1], self.acc[0] \n \n def validate(self, fasta, w, al_fst):\n fsts = sorted(fasta)\n wres, wtot, tot = {}, {}, []\n manager = Manager()\n \n for wdw in w:\n yres, ytot, hist = {}, [], {}\n \n for y in range(len(fsts[:-1])):\n key = basename(fsts[y])[:-6] + '-' + basename(fsts[y+1])[:-6]\n yres[key], hist[key] = self._create_yres(key, manager, fsts, y, al_fst, wdw)\n \n if len(hist) > 1:\n ytot = self._create_ytot(fsts, yres, hist, al_fst, wdw)\n \n wres[wdw], wtot[wdw] = yres, ytot\n \n if len(w) > 1:\n tot = self._create_tot(fsts, wtot, al_fst)\n \n return wres, wtot, tot\n\n def get_pred_seq(self):\n return self.pred_seqs[0]","sub_path":"src/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":5532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"253626358","text":"# BACKJOON #2851 <슈퍼 마리오>\n# https://www.acmicpc.net/problem/2851\n\narr = [int(input()) for i in range(10)]\nscore = 0\nfor n in arr:\n\ttemp = score + n\n\tif temp >= 100:\n\t\tif temp-100 > 100-score:\n\t\t\tprint(score)\n\t\telse: print(temp)\n\t\texit()\n\telse: score = temp\nprint(score)\n","sub_path":"Baekjoon/2851_SuperMario.py","file_name":"2851_SuperMario.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"69366337","text":"# encoding=utf-8\nimport sys\nfrom hyper_and_conf import hyper_param as hyperParam\nfrom hyper_and_conf import hyper_train, hyper_optimizer\nimport core_lip_main\nimport core_data_SRCandTGT\nfrom core_resnet import identity_block, conv_block\nfrom tensorflow.python.client import device_lib\n# from tensorflow.python.keras import initializers\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.python.keras import regularizers\nimport core_Transformer_model\nL2_WEIGHT_DECAY = 1e-4\nBATCH_NORM_DECAY = 0.9\nBATCH_NORM_EPSILON = 1e-5\nDATA_PATH = sys.path[0]\nSYS_PATH = sys.path[1]\n# TRAIN_PATH = '/home/vivalavida/massive_data/lip_reading_data/sentence_level_lrs2'\n# C = '/home/vivalavida/massive_data/lip_reading_data/sentence_level_lrs2/main'\nsrc_data_path = [DATA_PATH + \"/corpus/lip_corpus.txt\"]\n\ntgt_data_path = [DATA_PATH + \"/corpus/lip_corpus.txt\"]\n# TFRECORD = '/home/vivalavida/massive_data/lip_reading_TFRecord/tfrecord_word'\n# TFRECORD = '/home/vivalavida/massive_data/sentence_lip_data_tfrecord_train_v1'\nTFRECORD = '/home/vivalavida/massive_data/'\n# TFRECORD = '/home/wonderwall/data'\n\n# TFRECORD = '/home/vivalavida/massive_data/fc1'\n\n# TFRECORD = '/data'\n\n# TFRECORD = '/Users/barid/Documents/workspace/batch_data/'\n\n# PADDED_IMG = 150\n# PADDED_TEXT = 80\nPADDED_IMG = 50\nPADDED_TEXT = 1\n\n\ndef get_vgg(self):\n if tf.io.gfile.exists('pre_train/vgg16_pre_all'):\n vgg16 = tf.keras.models.load_model('pre_train/vgg16_pre_all')\n else:\n vgg16 = tf.keras.applications.vgg16.VGG16(\n include_top=True, weights='imagenet')\n return vgg16\n\n\ndef get_available_cpus():\n local_device_protos = device_lib.list_local_devices()\n return len([x.name for x in local_device_protos if x.device_type == 'CPU'])\n\n\ndef get_available_gpus():\n local_device_protos = device_lib.list_local_devices()\n return len([x.name for x in local_device_protos if x.device_type == 'GPU'])\n\n\ndef cpus_device():\n local_device_protos = device_lib.list_local_devices()\n return [x.name for x in local_device_protos if x.device_type == 'CPU']\n\n\ndef gpus_device():\n local_device_protos = device_lib.list_local_devices()\n return [x.name for x in local_device_protos if x.device_type == 'GPU']\n\n\ngpu = get_available_gpus()\nTRAIN_MODE = 'large' if gpu > 0 else 'test'\nhp = hyperParam.HyperParam(TRAIN_MODE, gpu=get_available_gpus())\nPAD_ID_int64 = tf.cast(hp.PAD_ID, tf.int64)\nPAD_ID_float32 = tf.cast(hp.PAD_ID, tf.float32)\n\ndata_manager = core_data_SRCandTGT.DatasetManager(\n src_data_path,\n tgt_data_path,\n batch_size=hp.batch_size,\n PAD_ID=hp.PAD_ID,\n EOS_ID=hp.EOS_ID,\n # shuffle=hp.data_shuffle,\n shuffle=hp.data_shuffle,\n max_length=hp.max_sequence_length,\n tfrecord_path=TFRECORD)\n\n# train_dataset, val_dataset, test_dataset = data_manager.prepare_data()\n\n\ndef get_hp():\n return hp\n\n\ndef backend_config():\n config = tf.compat.v1.ConfigProto()\n # config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1\n config.intra_op_parallelism_threads = 4\n config.inter_op_parallelism_threads = 4\n # # Don't pre-allocate memory; allocate as-needed\n config.gpu_options.allow_growth = True\n\n # Only allow a total of half the GPU memory to be allocated\n # config.gpu_options.per_process_gpu_memory_fraction = 0.999\n # config.allow_soft_placement = True\n\n return config\n\n\ndef input_fn(flag=\"TRAIN\"):\n if flag == \"VAL\":\n dataset = data_manager.get_raw_val_dataset()\n else:\n if flag == \"TEST\":\n dataset = data_manager.get_raw_test_dataset()\n else:\n if flag == \"TRAIN\":\n dataset = data_manager.get_raw_train_dataset()\n else:\n assert (\"data error\")\n return dataset\n\n\ndef map_data_for_feed_pertunated(x, y):\n return ((x, randomly_pertunate_input(y)), y)\n\n\ndef map_data_for_feed(x, y):\n return ((x, y), y)\n\n\ndef map_data_for_text(x):\n return ((x, x), x)\n\n\ndef randomly_pertunate_input(x):\n determinater = np.random.randint(10)\n if determinater > 3:\n return x\n else:\n index = np.random.randint(2, size=(1, 80))\n x = x * index\n return x\n\n\ndef pad_sample(dataset, batch_size):\n # dataset = dataset.shuffle(200000, reshuffle_each_iteration=True)\n dataset = dataset.padded_batch(\n hp.batch_size,\n (\n [PADDED_IMG, 32, 64, 3], # source vectors of unknown size\n [PADDED_TEXT]), # target vectors of unknown size\n drop_remainder=True)\n\n return dataset\n\n\ndef pad_text_sample(dataset, batch_size):\n # dataset = dataset.shuffle(200000, reshuffle_each_iteration=True)\n dataset = dataset.padded_batch(\n hp.batch_size,\n [120], # target vectors of unknown size\n drop_remainder=True)\n\n return dataset\n\n\ndef reshape_data(src, tgt):\n # return tf.reshape(src, [-1, 32, 64, 3]), tgt\n return tf.reshape(src, [-1, 32, 64, 3]) / 127.5 - 1.0, tgt\n\n\ndef map_data_for_val(src, tgt):\n return src, tgt\n\n\ndef train_Transformer_input():\n dataset = data_manager.get_text_train_dataset()\n # dataset = dataset.shuffle(100000)\n dataset = pad_text_sample(dataset, batch_size=hp.batch_size)\n\n dataset = dataset.map(map_data_for_text)\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n return dataset\n\n\ndef train_input(seq2seq=True, pertunate=False):\n\n dataset = input_fn('TRAIN')\n # dataset = dataset.shuffle(100000)\n dataset = dataset.map(reshape_data)\n dataset = pad_sample(dataset, batch_size=hp.batch_size)\n\n if pertunate:\n dataset = dataset.map(map_data_for_feed_pertunated)\n else:\n dataset = dataset.map(map_data_for_feed)\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n return dataset\n\n\ndef test_input(seq2seq=True, pertunate=False):\n\n dataset = input_fn('TRAIN')\n # dataset = dataset.shuffle(100000)\n dataset = dataset.map(reshape_data)\n dataset = dataset.batch(1)\n dataset = dataset.map(map_data_for_val)\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n return dataset\n\n\ndef val_input(seq2seq=True):\n dataset = input_fn(\"TRAIN\")\n dataset = dataset.map(reshape_data)\n dataset = pad_sample(dataset, 4)\n # dataset = dataset.map(map_data_for_val)\n dataset = dataset.map(map_data_for_val)\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n return dataset\n\n\ndef get_external_loss():\n return hyper_train.Onehot_CrossEntropy(hp.vocabulary_size, smoothing=0.1)\n\n\ndef get_image_processor():\n # with tf.device(\"/cpu:0\"):\n if tf.io.gfile.exists('pre_train/res50_pre_all'):\n res = tf.keras.models.load_model('pre_train/res50_pre_all')\n else:\n res = tf.keras.applications.resnet50.ResNet50(\n include_top=False, weights=None, input_shape=[32, 64, 3])\n # pooling='avg',\n # classes=10000)\n res.save('pre_train/res50_pre_all')\n return res\n\n\ndef model_structure(training=True, batch=0, mode='LIP'):\n if batch != 0:\n batch_size = batch\n else:\n batch_size = hp.batch_size\n img_input = tf.keras.layers.Input(\n shape=[PADDED_IMG, 32, 64, 3], dtype=tf.float32, name='Raw_input')\n if mode != 'LIP':\n img_input = tf.keras.layers.Input(\n shape=[None], dtype=tf.int64, name='src_text')\n if training:\n tgt = tf.keras.layers.Input(\n shape=[None], dtype=tf.int64, name='target_text')\n daedalus = core_lip_main.Daedalus(\n hp.max_sequence_length, hp.vocabulary_size, hp.embedding_size,\n hp.batch_size, hp.num_units, hp.num_heads, hp.num_encoder_layers,\n hp.num_decoder_layers, hp.dropout, hp.EOS_ID, hp.PAD_ID,\n hp.MASK_ID)\n # res_out = tf.reshape(res_out, [-1, 200, 4 * 4 * 512])\n logits = daedalus([img_input, tgt], training=training, mode=mode)\n logits = hyper_train.MetricLayer(hp.vocabulary_size)([logits, tgt])\n logits = hyper_train.CrossEntropy_layer(hp.vocabulary_size,\n 0.1)([logits, tgt])\n logits = tf.keras.layers.Lambda(lambda x: x, name=\"logits\")(logits)\n\n model = tf.keras.Model(inputs=[img_input, tgt], outputs=logits)\n else:\n daedalus = core_lip_main.Daedalus(\n hp.max_sequence_length, hp.vocabulary_size, hp.embedding_size,\n batch_size, hp.num_units, hp.num_heads, hp.num_encoder_layers,\n hp.num_decoder_layers, hp.dropout, hp.EOS_ID, hp.PAD_ID,\n hp.MASK_ID)\n metric = hyper_train.MetricLayer(hp.vocabulary_size)\n loss = hyper_train.CrossEntropy(hp.vocabulary_size, 0.1)\n ret = daedalus([img_input], training=training)\n outputs, scores = ret[\"outputs\"], ret[\"scores\"]\n model = tf.keras.Model(img_input, outputs)\n return model\n\n\ndef text_model_structure(training=True, batch=0):\n if batch != 0:\n batch_size = batch\n else:\n batch_size = hp.batch_size\n src = tf.keras.layers.Input(shape=[None], dtype=tf.int64, name='src_text')\n if training:\n tgt = tf.keras.layers.Input(\n shape=[None], dtype=tf.int64, name='target_text')\n daedalus = core_Transformer_model.Transformer(\n hp.max_sequence_length,\n hp.vocabulary_size,\n hp.embedding_size,\n hp.batch_size,\n hp.num_units,\n hp.num_heads,\n hp.num_encoder_layers,\n hp.num_decoder_layers,\n hp.dropout,\n hp.EOS_ID,\n hp.PAD_ID,\n )\n # res_out = tf.reshape(res_out, [-1, 200, 4 * 4 * 512])\n logits = daedalus([src, tgt], training=training)\n logits = hyper_train.MetricLayer(hp.vocabulary_size)([logits, tgt])\n logits = hyper_train.CrossEntropy_layer(hp.vocabulary_size,\n 0.1)([logits, tgt])\n logits = tf.keras.layers.Lambda(lambda x: x, name=\"logits\")(logits)\n\n model = tf.keras.Model(inputs=[src, tgt], outputs=logits)\n # else:\n # daedalus = core_lip_main.Daedalus(\n # hp.max_sequence_length, hp.vocabulary_size, hp.embedding_size,\n # batch_size, hp.num_units, hp.num_heads, hp.num_encoder_layers,\n # hp.num_decoder_layers, hp.dropout, hp.EOS_ID, hp.PAD_ID,\n # hp.MASK_ID)\n # metric = hyper_train.MetricLayer(hp.vocabulary_size)\n # loss = hyper_train.CrossEntropy(hp.vocabulary_size, 0.1)\n # ret = daedalus([img_input], training=training)\n # outputs, scores = ret[\"outputs\"], ret[\"scores\"]\n # model = tf.keras.Model(img_input, outputs)\n return model\n\n\ndef train_model():\n return model_structure(training=True)\n\n\ndef test_model(batch=1):\n return model_structure(training=False, batch=1)\n\n\ndef get_optimizer():\n return tf.keras.optimizers.Adam(beta_1=0.1, beta_2=0.98, epsilon=1.0e-9)\n # return hyper_optimizer.LazyAdam(beta_1=0.1, beta_2=0.98, epsilon=1.0e-9)\n\n\ndef get_callbacks():\n lr_fn = hyper_optimizer.LearningRateFn(hp.lr, hp.num_units,\n hp.learning_warmup)\n LRschedule = hyper_optimizer.LearningRateScheduler(lr_fn, 0)\n TFboard = tf.keras.callbacks.TensorBoard(\n log_dir=hp.model_summary_dir,\n write_grads=True,\n histogram_freq=100,\n write_images=True,\n update_freq=100)\n TFchechpoint = tf.keras.callbacks.ModelCheckpoint(\n hp.model_checkpoint_dir + '/model.{epoch:02d}.ckpt',\n save_weights_only=True,\n verbose=1)\n NaNchecker = tf.keras.callbacks.TerminateOnNaN()\n ForceLrReduce = tf.keras.callbacks.ReduceLROnPlateau(\n monitor='accuracy', factor=0.2, patience=1, mode='max', min_lr=0.00001)\n return [LRschedule, TFboard, TFchechpoint, NaNchecker, ForceLrReduce]\n","sub_path":"core_model_initializer.py","file_name":"core_model_initializer.py","file_ext":"py","file_size_in_byte":11783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"43712397","text":"import sys\nsys.path.insert(0,'../examples/')\nfrom lorenz63 import *\nfrom numpy import *\nfrom scipy.interpolate import *\ndef plot_clvs():\n fig, ax = subplots(1,2)\n s = [0.7,0.3]\n eps = 1.e-2\n d = 2\n u0 = random.rand(d,1)\n n = 10000\n u = step(u0,s,n) #shape: (n+1)xdx1\n u = u[1:].T[0] # shape:dxn\n du = dstep(u,s) #shape:nxdxd\n P = clvs(u,du,d).T #shape:nxdxd\n v1 = P[0]\n v2 = P[1]\n ax[0].plot([u[0] - eps*v1[0], u[0] + eps*v1[0]],\\\n [u[1] - eps*v1[1], u[1] + eps*v1[1]],\\\n lw=2.0, color='red')\n ax[1].plot([u[0] - eps*v2[0], u[0] + eps*v2[0]],\\\n [u[1] - eps*v2[1], u[1] + eps*v2[1]],\\\n lw=2.0, color='black')\n\n ax[0].set_title('$V^1$',fontsize=24)\n \n ax[1].set_title('$V^2$',fontsize=24)\n for j in range(2):\n ax[j].xaxis.set_tick_params(labelsize=24)\n ax[j].yaxis.set_tick_params(labelsize=24)\n\n\n return fig,ax\n\ndef test_dstep():\n n = 100\n u = rand(n,3)\n s = rand(3)\n du_ana = dstep(u, s).T\n eps = 1.e-7\n du_x = (step(u + eps*reshape([1.,0.,0.],[1,3]),s,1) - \\\n step(u - eps*reshape([1.,0.,0.],[1,3]),s,1))/\\\n (2*eps)\n du_y = (step(u + eps*reshape([0.,1.,0.],[1,3]),s,1) - \\\n step(u - eps*reshape([0.,1.,0.],[1,3]),s,1))/\\\n (2*eps)\n\n du_z = (step(u + eps*reshape([0.,0.,1.],[1,3]),s,1) - \\\n step(u - eps*reshape([0.,0.,1.],[1,3]),s,1))/\\\n (2*eps)\n du_fd_x = du_x[-1]\n du_fd_y = du_y[-1]\n du_fd_z = du_z[-1]\n\n assert(allclose(du_fd_x, du_ana[0]))\n assert(allclose(du_fd_y, du_ana[1]))\n assert(allclose(du_fd_z, du_ana[2]))\n\n\ndef test_d2step():\n n = 100\n u = rand(n,3)\n \n s = rand(3)\n d2_ana = d2step(u, s)\n\n eps = 1.e-10\n d2_x = (dstep(u + eps*reshape([1.,0.,0.],[1,3]), s) -\\\n dstep(u - eps*reshape([1.,0.,0.],[1,3]), s))/\\\n (2*eps)\n d2_y = (dstep(u + eps*reshape([0.,1.,0.],[1,3]), s) -\\\n dstep(u - eps*reshape([0.,1.,0.],[1,3]), s))/\\\n (2*eps)\n d2_z = (dstep(u + eps*reshape([0.,0.,1.],[1,3]), s) -\\\n dstep(u - eps*reshape([0.,0.,1.],[1,3]), s))/\\\n (2*eps)\n \n assert(allclose(d2_x, d2_ana[:,0])) \n assert(allclose(d2_y, d2_ana[:,1])) \n assert(allclose(d2_z, d2_ana[:,2])) \n","sub_path":"tests/test_lorenz63.py","file_name":"test_lorenz63.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"414749304","text":"\n\nfrom xai.brain.wordbase.verbs._smile import _SMILE\n\n#calss header\nclass _SMILES(_SMILE, ):\n\tdef __init__(self,): \n\t\t_SMILE.__init__(self)\n\t\tself.name = \"SMILES\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"smile\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_smiles.py","file_name":"_smiles.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"145005211","text":"# Run_convert_CSV_to_JSON.py\n\n\"\"\"convert the CSV file of crunchbase into JSON\"\"\"\n\nimport csv # read the CSV file\nimport json # dump the data from CSV (CSVReader variable) into organizations.json\n\n\"\"\"creating a list of CSVFile rows\"\"\"\ndata = list()\nwith open('data/02-crunchbase/organizations.csv') as csvFile:\n csvReader = csv.DictReader(csvFile)\n for rows in csvReader:\n data.append(rows)\n\"\"\"dumping the rows into list of dictionary in json format\"\"\"\nwith open('data/02-crunchbase/organizations.json', 'w') as jsonFile:\n json.dump(data, jsonFile, indent=1)\n","sub_path":"Run_convert_CSV_to_JSON.py","file_name":"Run_convert_CSV_to_JSON.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"515464539","text":"import random\r\nimport datetime\r\nfrom customer import Customer\r\n# Instance the class from customer.py\r\natm = Customer(id) # id -> python built-in function to return an identity of an object\r\n\r\nwhile True:\r\n pin = int(input('Enter Your PIN: '))\r\n \r\n # Verifikasi Pin\r\n tries_chance = 0\r\n while pin != atm.checkPin() and tries_chance < 3:\r\n pin = int(input('Wrong PIN, Enter Again: '))\r\n tries_chance += 1\r\n\r\n if tries_chance == 3:\r\n print('Error, please re-insert Your ATM card...')\r\n # Exit the program\r\n exit()\r\n \r\n # Menu ATM\r\n while True:\r\n print('\\nWelcome to ATM')\r\n \r\n print('\\n1 - Check Balance\\t2 - Withdraw\\t3 - Deposit\\t4 - Change Pin\\t5 - Exit')\r\n select_menu = int(input('\\nChoose Menu: '))\r\n \r\n # Cek Saldo\r\n if select_menu == 1:\r\n print(f'Your Balance is: IDR {atm.checkBalance()}\\n')\r\n # Ambil Uang\r\n elif select_menu == 2:\r\n nominal = int(input('Enter your withdraw nominal: '))\r\n verify_withdraw = input(f'It is correct nominal? (y/n) IDR {nominal}: ')\r\n if verify_withdraw == 'y':\r\n print(f'Initial Balance is IDR {atm.checkBalance()}')\r\n if atm.checkBalance() - nominal < 10000:\r\n print('Sorry, Your Balance cant lower than IDR 10000')\r\n elif nominal < atm.checkBalance():\r\n atm.withdrawBalance(nominal)\r\n print('Transaction Success')\r\n print(f'Current Balance is IDR {atm.checkBalance()}')\r\n else:\r\n print('Sorry, Your Balance is not enough')\r\n else:\r\n break\r\n # Simpan Uang\r\n elif select_menu == 3:\r\n nominal = int(input('Enter your deposit nominal: '))\r\n verify_deposit = input(f'It is correct nominal? (y/n) IDR {nominal}: ')\r\n if verify_deposit == 'y':\r\n atm.depositBalance(nominal)\r\n print(f'Current Balance is IDR {atm.checkBalance()}')\r\n else:\r\n break\r\n # Ganti Pin\r\n elif select_menu == 4:\r\n current_pin = int(input('Enter Your current PIN: '))\r\n if current_pin != atm.checkPin():\r\n print('Wrong PIN!')\r\n break\r\n \r\n new_pin = int(input('Enter Your new PIN: '))\r\n verify_new_pin = int(input('Enter Your new PIN again: '))\r\n if verify_new_pin == new_pin:\r\n # Ubah PIN awal ke PIN baru\r\n atm.pin = new_pin\r\n print('Success')\r\n else:\r\n print('Failed, Your new PIN is not match')\r\n # Cetak Receipt dan exit\r\n elif select_menu == 5:\r\n print(\"\\nReceipt is printed automatically when you leave.\\nPlease keep this receipt as proof of your transaction.\")\r\n print(f'\\nNo. Record{\" \": <7}: {random.randint(100000, 1000000)}')\r\n print(f'Date{\" \": <13}: {datetime.datetime.now()}')\r\n print(f'Current Balance{\" \": <2}: IDR {atm.checkBalance()}')\r\n print('Thank you for using ATM service')\r\n exit()\r\n # Menu tidak tersedia\r\n else:\r\n print('Sorry, that menu is not available')","sub_path":"Challenges/ATM/atm_program.py","file_name":"atm_program.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"453403497","text":"# -*- coding: utf-8 -*-\n# file: data_utils_for_training.py\n# time: 02/11/2022 15:39\n# author: YANG, HENG (杨恒)\n# github: https://github.com/yangheng95\n# GScholar: https://scholar.google.com/citations?user=NPq5a_0AAAAJ&hl=en\n# ResearchGate: https://www.researchgate.net/profile/Heng-Yang-17/research\n# Copyright (C) 2022. All Rights Reserved.\n\nimport tqdm\n\nfrom pyabsa.framework.dataset_class.dataset_template import PyABSADataset\nfrom pyabsa.utils.file_utils.file_utils import load_dataset_from_file\nfrom pyabsa.utils.pyabsa_utils import check_and_fix_labels, fprint\n\n\nclass BERTTADDataset(PyABSADataset):\n def load_data_from_dict(self, dataset_dict, **kwargs):\n pass\n\n def load_data_from_file(self, dataset_file, **kwargs):\n lines = load_dataset_from_file(\n self.config.dataset_file[self.dataset_type], config=self.config\n )\n\n all_data = []\n\n label_set1 = set()\n label_set2 = set()\n label_set3 = set()\n\n for i in tqdm.tqdm(range(len(lines)), desc=\"preparing dataloader\"):\n line = lines[i].strip().split(\"$LABEL$\")\n text, labels = line[0], line[1]\n text = text.strip()\n label, is_adv, adv_train_label = labels.strip().split(\",\")\n label, is_adv, adv_train_label = (\n label.strip(),\n is_adv.strip(),\n adv_train_label.strip(),\n )\n\n if is_adv == \"1\" or is_adv == 1:\n adv_train_label = label\n label = \"-100\"\n else:\n label = label\n adv_train_label = \"-100\"\n # adv_train_label = '-100'\n\n text_indices = self.tokenizer.text_to_sequence(\"{}\".format(text))\n\n data = {\n \"text_indices\": text_indices,\n \"text_raw\": text,\n \"label\": label,\n \"adv_train_label\": adv_train_label,\n \"is_adv\": is_adv,\n }\n\n label_set1.add(label)\n label_set2.add(adv_train_label)\n label_set3.add(is_adv)\n\n all_data.append(data)\n\n check_and_fix_labels(label_set1, \"label\", all_data, self.config)\n check_and_fix_adv_train_labels(\n label_set2, \"adv_train_label\", all_data, self.config\n )\n check_and_fix_is_adv_labels(label_set3, \"is_adv\", all_data, self.config)\n self.config.class_dim = len(label_set1 - {\"-100\"})\n self.config.adv_det_dim = len(label_set3 - {\"-100\"})\n\n self.data = all_data\n\n def __init__(self, config, tokenizer, dataset_type=\"train\", **kwargs):\n super().__init__(config, tokenizer, dataset_type, **kwargs)\n\n def __getitem__(self, index):\n return self.data[index]\n\n def __len__(self):\n return len(self.data)\n\n\ndef check_and_fix_adv_train_labels(label_set: set, label_name, all_data, config):\n # update output_dim, init model behind execution of this function!\n if \"-100\" in label_set:\n adv_train_label_to_index = {\n origin_label: int(idx) - 1 if origin_label != \"-100\" else -100\n for origin_label, idx in zip(sorted(label_set), range(len(label_set)))\n }\n index_to_adv_train_label = {\n int(idx) - 1 if origin_label != \"-100\" else -100: origin_label\n for origin_label, idx in zip(sorted(label_set), range(len(label_set)))\n }\n else:\n adv_train_label_to_index = {\n origin_label: int(idx)\n for origin_label, idx in zip(sorted(label_set), range(len(label_set)))\n }\n index_to_adv_train_label = {\n int(idx): origin_label\n for origin_label, idx in zip(sorted(label_set), range(len(label_set)))\n }\n if \"index_to_adv_train_label\" not in config.args:\n config.index_to_adv_train_label = index_to_adv_train_label\n config.adv_train_label_to_index = adv_train_label_to_index\n\n if config.index_to_adv_train_label != index_to_adv_train_label:\n # raise KeyError('Fail to fix the labels, the number of labels are not equal among all datasets!')\n config.index_to_adv_train_label.update(index_to_adv_train_label)\n config.adv_train_label_to_index.update(adv_train_label_to_index)\n num_label = {l: 0 for l in label_set}\n num_label[\"Sum\"] = len(all_data)\n for item in all_data:\n try:\n num_label[item[label_name]] += 1\n item[label_name] = adv_train_label_to_index[item[label_name]]\n except Exception as e:\n # fprint(e)\n num_label[item.polarity] += 1\n item.polarity = adv_train_label_to_index[item.polarity]\n fprint(\"Dataset Label Details: {}\".format(num_label))\n\n\ndef check_and_fix_is_adv_labels(label_set: set, label_name, all_data, config):\n # update output_dim, init model behind execution of this function!\n if \"-100\" in label_set:\n is_adv_to_index = {\n origin_label: int(idx) - 1 if origin_label != \"-100\" else -100\n for origin_label, idx in zip(sorted(label_set), range(len(label_set)))\n }\n index_to_is_adv = {\n int(idx) - 1 if origin_label != \"-100\" else -100: origin_label\n for origin_label, idx in zip(sorted(label_set), range(len(label_set)))\n }\n else:\n is_adv_to_index = {\n origin_label: int(idx)\n for origin_label, idx in zip(sorted(label_set), range(len(label_set)))\n }\n index_to_is_adv = {\n int(idx): origin_label\n for origin_label, idx in zip(sorted(label_set), range(len(label_set)))\n }\n if \"index_to_is_adv\" not in config.args:\n config.index_to_is_adv = index_to_is_adv\n config.is_adv_to_index = is_adv_to_index\n\n if config.index_to_is_adv != index_to_is_adv:\n # raise KeyError('Fail to fix the labels, the number of labels are not equal among all datasets!')\n config.index_to_is_adv.update(index_to_is_adv)\n config.is_adv_to_index.update(is_adv_to_index)\n num_label = {l: 0 for l in label_set}\n num_label[\"Sum\"] = len(all_data)\n for item in all_data:\n try:\n num_label[item[label_name]] += 1\n item[label_name] = is_adv_to_index[item[label_name]]\n except Exception as e:\n # fprint(e)\n num_label[item.polarity] += 1\n item.polarity = is_adv_to_index[item.polarity]\n fprint(\"Dataset Label Details: {}\".format(num_label))\n","sub_path":"pyabsa/tasks/TextAdversarialDefense/dataset_utils/__plm__/data_utils_for_training.py","file_name":"data_utils_for_training.py","file_ext":"py","file_size_in_byte":6477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"877309","text":"\"\"\"\nCopyright 2020 The Magma Authors.\n\nThis source code is licensed under the BSD-style license found in the\nLICENSE file in the root directory of this source tree.\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport unittest\n\nfrom magma.common import metrics_export\nfrom magma.monitord.metrics import SUBSCRIBER_ICMP_LATENCY_MS\n\n\nclass MetricTests(unittest.TestCase):\n \"\"\"\n Tests for the Service303 metrics interface\n \"\"\"\n def test_metrics_defined(self):\n \"\"\" Test that all metrics are defined in proto enum \"\"\"\n SUBSCRIBER_ICMP_LATENCY_MS.labels('IMSI00000001').observe(10.33)\n\n metrics_protos = list(metrics_export.get_metrics())\n for metrics_proto in metrics_protos:\n if metrics_proto.name == \"subscriber_latency_ms\":\n metric = metrics_proto.metric[0]\n self.assertEqual(metric.histogram.sample_sum, 10.33)\n self.assertEqual(metric.label[0].value, 'IMSI00000001')\n","sub_path":"modules/lte/gateway/python/magma/monitord/tests/test_metrics.py","file_name":"test_metrics.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"79500274","text":"import json\n\nimport socket\n\nimport asyncio\n\nfrom utility.configuration.configuration import ConfigMetaclass, ConfigAttribute\n\n\nclass SocketConfig(metaclass=ConfigMetaclass):\n\n buffer_size = ConfigAttribute('buffer_size', int, 1500000)\n\n encoding = ConfigAttribute('encoding', str, 'utf-8')\n\n host = ConfigAttribute('host', str, 'localhost')\n\n port = ConfigAttribute('port', int, 8000)\n\n\n def __init__(self, path=None):\n\n if path:\n\n with open(path, 'r') as file:\n\n config_json = json.load(file)\n\n for name, value in config_json.items():\n\n setattr(self, name, value)\n\n\nclass SocketManager(metaclass=ConfigMetaclass):\n\n def __init__(self):\n\n self._config = SocketConfig()\n\n\n @property\n def address(self):\n\n return (self._config.host, self._config.port)\n\n\n async def send_request_message(self,TypeOfSend, **kwargs):\n\n with socket.socket() as sock:\n\n\n\n if TypeOfSend=='sendtoall':\n context = {'action':'sendall', 'message':kwargs}\n elif TypeOfSend=='sendto':\n context = {'action': 'sendto', 'message': kwargs}\n elif TypeOfSend=='auth':\n context = {'action': 'auth', 'message': kwargs}\n elif TypeOfSend =='MyLogo':\n context = {\"action\": 'MyLogo', 'message': kwargs}\n else:\n context = {'action': TypeOfSend, 'message': kwargs}\n\n response_str = json.dumps(context)\n\n response_bytes = response_str.encode(self._config.encoding)\n\n sock.connect(self.address)\n sock.send(response_bytes)\n\n\n def receive_response_message(self):\n\n with socket.socket() as sock:\n\n sock.connect(self.address)\n\n while True:\n\n response_bytes = sock.recv(self._config.buffer_size)[0:]\n\n return response_bytes.decode(self._config.encoding)\n","sub_path":"client/appcore/socketmanager.py","file_name":"socketmanager.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"100990445","text":"import torchvision\nimport torchvision.transforms as transforms\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision.models as models\n\n\nclass InverseNet(nn.Module):\n\n def __init__(self, l):\n super(InverseNet, self).__init__()\n self.an = models.alexnet(pretrained=True)\n feats = self.an.features\n cl = self.an.classifier\n if l < 5:\n self.conv_lin = 'conv'\n split_point = 0\n conv_cntr = 0\n for lay in feats:\n if isinstance(lay, nn.Conv2d):\n conv_cntr += 1\n split_point += 1\n\n if conv_cntr == l:\n break\n \n self.an.features = nn.Sequential(*list(feats)[:split_point+1])\n \n elif l >=6 and l <=8:\n self.conv_lin = 'lin'\n split_point = 0\n lin_cntr = 5\n for lay in feats:\n if isinstance(lay, nn.Linear):\n lin_cntr += 1\n split_point += 1\n\n if lin_cntr == l:\n break\n \n self.an.cl = nn.Sequential(*list(cl)[:split_point+1])\n\n # Freeze base network parameters\n for param in self.an.parameters():\n param.requires_grad = False\n\n self.lin_net1 = nn.Sequential(nn.Linear(1000, 4096),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(4096, 4096),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(4096, 4096),\n nn.LeakyReLU(0.2, inplace=True))\n\n self.lin_net2 = nn.Sequential(nn.ConvTranspose2d(256, 256, 5, stride=2),\n nn.LeakyReLU(0.2, inplace=True),\n nn.ConvTranspose2d(256, 128, 5, stride=2),\n nn.LeakyReLU(0.2, inplace=True),\n nn.ConvTranspose2d(128, 64, 5, stride=2),\n nn.LeakyReLU(0.2, inplace=True),\n nn.ConvTranspose2d(64, 32, 5, stride=2),\n nn.LeakyReLU(0.2, inplace=True),\n nn.ConvTranspose2d(32, 3, 8, stride=2),\n nn.LeakyReLU(0.2, inplace=True))\n \n def forward(self, x):\n x = self.an(x)\n if self.conv_lin == 'conv':\n pass\n elif self.conv_lin == 'lin':\n x = self.lin_net1(x)\n x = x.view(x.size(0), 256, 4, 4)\n x = self.lin_net2(x)\n return x\n\ndef main():\n transform = transforms.Compose([transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n \n epochs = 1\n batch_size = 4\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n imagenet_train = torchvision.datasets.ImageFolder('data/train', transform=transform)\n trainloader = torch.utils.data.DataLoader(imagenet_train, batch_size=batch_size, shuffle=True, num_workers=2)\n\n imagenet_test = torchvision.datasets.ImageFolder('data/test', transform=transform)\n testloader = torch.utils.data.DataLoader(imagenet_test, batch_size=batch_size, shuffle=True, num_workers=2)\n\n model = InverseNet(8)\n model = model.to(device)\n criterion = nn.MSELoss()\n optimizer = optim.Adam(filter(lambda x: x.requires_grad, model.parameters()))\n\n\n for e in range(epochs):\n total_loss = 0.0\n for i, data in enumerate(trainloader):\n img, cl = data\n img = img.to(device)\n\n optimizer.zero_grad()\n\n out = model(img)\n\n loss = criterion(out, img)\n loss.backward()\n\n total_loss += loss\n\n optimizer.step()\n\n if i % 500 == 0 and i != 0:\n print('Loss on image ' + str(batch_size * i) + ', ' + \"{0:.5f}\".format(total_loss))\n print('-' * 10)\n total_loss = 0.0\n \n\nif __name__ == '__main__':\n main()\n","sub_path":"inversion.py","file_name":"inversion.py","file_ext":"py","file_size_in_byte":4309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"114798717","text":"'''\nSome script functions are derived and modified based on the sample tutorials-ballons.py in the repo\nCredit should be given to the Repo owner Matterport, Inc\n\nhonour the original author:\nCopyright (c) 2018 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\ntutorial originally Written by Waleed Abdulla\n'''\n\n'''\nA simple flask application allow users to select detection or applying color splash on an uploaded images.\n\nThe app will load the pretrained model and initialize it during the flask setup progress\nIt is found that the computer must run the detection immediately loaded the model in order to save the model into the\nmemory, otherwise users will not be able to run detection at all, as the model is not in the memory\n\nNote:\n--Only jpg images will be allowed.\n--Depending on the computational power, the initializing time and detection time can vary [greatly].\n--It is highly recommend that users should have a decent graphic card, and have NVIDIA GPU Computing Toolkit installed\n--This repo is found to be only runnable on a specific combinations of library versions shown as below:\nGPU: Acceptable graphic cards, here we using GTX1080\nCUDA: V10.0\ntensorflow: 1.14-gpu\nkeras:2.1.3\n'''\nfrom flask import Flask, request, render_template, jsonify, redirect\nfrom datetime import timedelta\n\n\nfrom werkzeug.utils import secure_filename\n###################################\nimport os\nimport sys\nimport random\nimport numpy as np\nimport skimage.io\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport mrcnn.model as modellib\nfrom mrcnn import visualize\n\nimport fruit\n###############################configure necessary path#######################################################\n# Root directory of the project\nROOT_DIR = os.path.abspath(\"../../\")\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\n\n# Import config\nsys.path.append(os.path.join(ROOT_DIR, \"samples/pearBanana/\")) # To find local version\n\n# Directory to save logs and trained model\nMODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n# Local path to trained weights file\nCOCO_MODEL_PATH = os.path.join(ROOT_DIR, \"logs/res101-2class/mask_rcnn_fruit_0065.h5\")\n\n# Directory of images to run detection on\nIMAGE_DIR = os.path.join(ROOT_DIR, \"samples/pearBanana/static/initializeImage\")\n\n\nUPLOAD_FOLDER = os.path.join(ROOT_DIR, \"samples/pearBanana/upload_images\")\nALLOWED_EXTENSIONS = set(['jpg'])\n\n########################configure flask object#################\napp = Flask(__name__, template_folder='')\n\n# avoid caching, which prevent showing the detection/splash result\napp.config['SEND_FILE_MAX_AGE_DEFAULT'] = timedelta(seconds=1)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\nclass InferenceConfig(fruit.FruitConfig):\n # Set batch size to 1 since we'll be running inference on\n # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n\n POST_NMS_ROIS_INFERENCE = 2000\n\n # proved->the higher the image quality, the better the detection accuracy\n # How every, the detection speed will be slowed dramatically\n # depending on your computational power,\n # you might need to modify the 'IMAGE_MAX_DIM = 3520' to fit your graphic card memory capacity\n # as a guidance, we use GTX1080 with 8gb memory, 3520p is the maximum resolution can be dealt with.\n IMAGE_RESIZE_MODE = \"square\"\n IMAGE_MIN_DIM = 800\n IMAGE_MAX_DIM = 3520 # was 1024\n\n # Non-max suppression threshold to filter RPN proposals.\n # You can increase this during training to generate more propsals.\n RPN_NMS_THRESHOLD = 0.7\n\n # Minimum probability value to accept a detected instance\n # ROIs below this threshold are skipped\n DETECTION_MIN_CONFIDENCE = 0.6\n\n # Max number of final detections\n DETECTION_MAX_INSTANCES = 200\n\n###### create model in inference mode, must run a detection imeediately to save the model to computer memory #######\nconfig = InferenceConfig()\n# config.display()\nprint('\\n\\n -----Please be patient, the initializing process can take a while depending on your computability-----\\n\\n')\n# Create model object in inference mode.\nmodel = modellib.MaskRCNN(mode=\"inference\", model_dir=MODEL_DIR, config=config)\n\n# Load weights trained on MS-COCO\nmodel.load_weights(COCO_MODEL_PATH, by_name=True)\n\nclass_names = ['BG', 'banana', 'pear']\n\nfile_names = next(os.walk(IMAGE_DIR))[2]\nimage = skimage.io.imread(os.path.join(IMAGE_DIR, random.choice(file_names)))\nprint('\\n\\n -----Please be patient, almost done initialing-----\\n\\n')\n\n# Run detection\nresults = model.detect([image], verbose=1)\nr = results[0] ### the length of this will be the count of items found\nprint('\\n\\n -----Initialization Complete -----\\n\\n')\n\ndef detect_onsite(model):\n class_names = ['BG', 'banana', 'pear']\n\n user_file_names = next(os.walk(UPLOAD_FOLDER))[2]\n names_chosen = random.choice(user_file_names)\n image = skimage.io.imread(os.path.join(UPLOAD_FOLDER, names_chosen))\n # Run detection\n results = model.detect([image], verbose=1)\n\n # Visualize results\n r = results[0] ### the length of this will be the count of items found\n print('the class id of all detected objects as follows')\n print('1: banana, 2: pear')\n print(r['class_ids'], '\\nthere are', len(r['class_ids']), 'fruits detected')\n\n # Modified visualize.py line166, so need to run 'python setup.py install' again\n visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],\n class_names, r['scores'])\n print('executed detect_onsite')\n print('completed detecting: ' + names_chosen)\n banana_count = 0\n pear_count = 0\n for category in r['class_ids']:\n if category == 1:\n banana_count = banana_count + 1\n elif category == 2:\n pear_count = pear_count + 1\n count = {'banana': banana_count, 'pear': pear_count}\n return count\n########################### only accepet jpg file ####################################\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n####################### implement color spalsh effect ########################################\ndef color_splash(image, mask):\n \"\"\"Apply color splash effect.\n image: RGB image [height, width, 3]\n mask: instance segmentation mask [height, width, instance count]\n\n Returns result image.\n \"\"\"\n # Make a grayscale copy of the image. The grayscale copy still\n # has 3 RGB channels, though.\n gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255\n # Copy color pixels from the original color image where mask is set\n if mask.shape[-1] > 0:\n # We're treating all instances as one, so collapse the mask into one layer\n mask = (np.sum(mask, -1, keepdims=True) >= 1)\n splash = np.where(mask, image, gray).astype(np.uint8)\n else:\n splash = gray.astype(np.uint8)\n return splash\n\n\ndef detect_and_color_splash(model):\n # Run model detection and generate the color splash effect\n # Read image\n user_file_names = next(os.walk(UPLOAD_FOLDER))[2]\n names_chosen = random.choice(user_file_names)\n image = skimage.io.imread(os.path.join(UPLOAD_FOLDER, names_chosen))\n # Detect objects\n r = model.detect([image], verbose=1)[0]\n # Color splash\n splash = color_splash(image, r['masks'])\n # # Save output\n skimage.io.imsave('static/images/splash_result.jpg', splash)\n print('executed color splash')\n################################################################\n@app.route('/')\ndef home():\n if request.method == 'GET':\n return render_template('index.html')\n\n return render_template('index.html')\n\n\n@app.route('/UploadDetect', methods=['GET', 'POST'])\ndef upload_file_detect():\n if request.method == 'GET':\n return render_template('upload_detect.html')\n\n if request.method == 'POST':\n f = request.files['file']\n print(request.files)\n if f and allowed_file(f.filename):\n filename = secure_filename(f.filename)\n f.save(os.path.join(app.config['UPLOAD_FOLDER'], 'uploaded_image.jpg'))\n return redirect('/detect')\n else:\n print('file type is not correct')\n return render_template('upload_detect.html')\n\n@app.route('/UploadSplash', methods=['GET', 'POST'])\ndef upload_file_splash():\n if request.method == 'GET':\n return render_template('upload_splash.html')\n\n if request.method == 'POST':\n f = request.files['file']\n print(request.files)\n if f and allowed_file(f.filename):\n filename = secure_filename(f.filename)\n f.save(os.path.join(app.config['UPLOAD_FOLDER'], 'uploaded_image.jpg'))\n return redirect('/splash')\n else:\n print('file type is not correct')\n return render_template('upload_splash.html')\n\n\n@app.route('/detect')\ndef detect():\n count = detect_onsite(model)\n return render_template('result_detect.html', countresult = count)\n\n@app.route('/splash')\ndef splash():\n detect_and_color_splash(model)\n return render_template('result_splash.html')\n'''\nMain function to run Flask server\n'''\nif __name__ == '__main__':\n app.run()\n","sub_path":"samples/pearBanana/app-2class.py","file_name":"app-2class.py","file_ext":"py","file_size_in_byte":9230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"16035100","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright 2014 Alexander Craig\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nA POX module implementation of multicast routing, supported by management of group state using the IGMP manager module.\n\nImplementation adapted from NOX-Classic CastFlow implementation provided by caioviel. Multicast routing records are stored for each \ncombination of multicast group and source address. For each of these records the GroupFlow module will calculate a shortest path tree \nusing Dijkstra's algorithm from the multicast source to all routers in the network (where each edge is weighted according to the number \nof hops from the multicast source). Branches of this tree which correspond to active multicast receivers are installed into the network\nthrough OpenFlow, and the spanning tree is only recalculated when the network topology changes. This should enable rapid changes of \nmulticast group, as there is no need to completely recalculate the multicast tree when new receivers join a group.\n\nThe following command line arguments are supported:\n\n* link_weight_type: Determines the method by which link weights are scaled with link utilization. Supported options are 'linear'\n (link weight scales as a linear function of utilization) or 'exponential' (link weight grows exponentially with increasing utilization).\n Default: linear\n* static_link_weight: Determines the static weight which is applied to all links regardless of utilization.\n Default: 1\n* util_link_weight: Determines the scaling factor by which utilization based link weight will be multiplied. Higher values cause the current\n traffic state to be more heavily weighted in routing (relative to the network topology). Note that setting this to 0 with either link\n weight type will produce shortest cost trees in terms of number of hops only.\n Default: 10\n* flow_replacement_mode: Determines the manner in which replacement of existing flows is triggered. Supported options:\n 'none': Existing flows are never replaced.\n 'periodic': Existing flows are periodically replaced.\n 'cong_threshold': In this mode, flow replacement is triggered by the FlowTracker module reporting congestion on a link traversed by the flow.\n Upon receiving a LinkUtilizationEvent, the GroupFlow module will attempt to replace the largest flows traversing the link until the link is\n brought back under its congestion threshold.\n Default: 'none'\n* flow_replacement_interval: Determines the flow replacement interval in a mode specific fashion (always specified in seconds): \n 'none': Has no effect\n 'periodic': Sets the periodic interval at which flows are replaced.\n 'cong_threshold': Sets the minimum interval that must elapse after flow placement, before the flow can be replaced.\n Default: 10\n\nDepends on openflow.igmp_manager, misc.groupflow_event_tracer (optional)\n\nCreated on July 16, 2013\n\nAuthor: Alexander Craig - alexcraig1@gmail.com\n\"\"\"\n\nfrom collections import defaultdict\nfrom sets import Set\nfrom heapq import heappop, heappush\nimport time\n\n# POX dependencies\nfrom pox.openflow.discovery import Discovery\nfrom pox.core import core\nfrom pox.lib.revent import *\nfrom pox.misc.groupflow_event_tracer import *\nfrom pox.openflow.flow_tracker import *\nfrom pox.lib.util import dpid_to_str\nimport pox.lib.packet as pkt\nfrom pox.lib.packet.igmp import * # Required for various IGMP variable constants\nfrom pox.lib.packet.ethernet import *\nimport pox.openflow.libopenflow_01 as of\nfrom pox.lib.addresses import IPAddr, EthAddr\nfrom pox.lib.recoco import Timer\nimport sys\n\nlog = core.getLogger()\n\n# Constants used to determine which link weighting scheme is used\nLINK_WEIGHT_LINEAR = 1\nLINK_WEIGHT_EXPONENTIAL = 2\n\nSTATIC_LINK_WEIGHT = 1 # Scaling factor for link weight which is statically assigned (implements shortest hop routing if no dynamic link weight is set)\nUTILIZATION_LINK_WEIGHT = 10 # Scaling factor for link weight which is determined by current link utilization\n\n# Default flow replacement interval\nFLOW_REPLACEMENT_INTERVAL_SECONDS = 10\n\n# Constants to determine flow replacement mode\nNO_FLOW_REPLACEMENT = 0\nPERIODIC_FLOW_REPLACEMENT = 1\nCONG_THRESHOLD_FLOW_REPLACEMENT = 2\n\n# Developer constants\n# The below constants enable/configure experimental features which have not yet been integrated into the module API\nENABLE_OUT_OF_ORDER_PACKET_DELIVERY = False\n\nclass MulticastPath(object):\n \"\"\"Manages multicast route calculation and installation for a single pair of multicast group and multicast sender.\"\"\"\n\n def __init__(self, src_ip, src_router_dpid, ingress_port, dst_mcast_address, groupflow_manager, groupflow_trace_event = None):\n self.src_ip = src_ip\n self.ingress_port = ingress_port\n self.src_router_dpid = src_router_dpid\n self.dst_mcast_address = dst_mcast_address\n self.path_tree_map = defaultdict(lambda : None) # self.path_tree_map[router_dpid] = Complete path from receiver router_dpid to src\n self.weighted_topo_graph = []\n self.node_list = [] # List of all managed router dpids\n self.installed_node_list = [] # List of all router dpids with rules currently installed\n self.receivers = [] # Tuples of (router_dpid, port)\n self.groupflow_manager = groupflow_manager\n self.flow_cookie = self.groupflow_manager.get_new_mcast_group_cookie()\n self.calc_path_tree_dijkstras(groupflow_trace_event)\n self._last_flow_replacement_time = None\n self._flow_replacement_timer = None\n\n def calc_path_tree_dijkstras(self, groupflow_trace_event = None):\n \"\"\"Calculates a shortest path tree from the group sender to all network switches, and caches the resulting tree.\n\n Note that this function does not install any flow modifications.\"\"\"\n if not groupflow_trace_event is None:\n groupflow_trace_event.set_tree_calc_start_time(self.dst_mcast_address, self.src_ip)\n self._last_flow_replacement_time = time.time()\n \n self._calc_link_weights()\n \n nodes = set(self.node_list)\n edges = self.weighted_topo_graph\n graph = defaultdict(list)\n for src,dst,cost in edges:\n graph[src].append((cost, dst))\n \n path_tree_map = defaultdict(lambda : None)\n queue, seen = [(0,self.src_router_dpid,())], set()\n while queue:\n (cost,node1,path) = heappop(queue)\n if node1 not in seen:\n seen.add(node1)\n path = (node1, path)\n path_tree_map[node1] = path\n \n for next_cost, node2 in graph.get(node1, ()):\n if node2 not in seen:\n new_path_cost = cost + next_cost\n heappush(queue, (new_path_cost, node2, path))\n \n self.path_tree_map = path_tree_map\n \n log.debug('Calculated shortest path tree for source at router_dpid: ' + dpid_to_str(self.src_router_dpid))\n for node in self.path_tree_map:\n log.debug('Path to Node ' + dpid_to_str(node) + ': ' + str(self.path_tree_map[node]))\n \n if not groupflow_trace_event is None:\n groupflow_trace_event.set_tree_calc_end_time()\n \n def _calc_link_weights(self):\n \"\"\"Calculates link weights for all links in the network to be used by calc_path_tree_dijkstras().\n\n The cost assigned to each link is based on the link's current utilization (as determined by the FlowTracker\n module), and the exact manner in which utilization is converted to a link wieght is determined by\n groupflow_manager.link_weight_type. Valid options are LINK_WEIGHT_LINEAR and LINK_WEIGHT_EXPONENTIAL. Both options\n include a static weight which is always assigned to all links (determined by groupflow_manager.static_link_weight),\n and a dynamic weight which is based on the current utilization (determined by\n groupflow_manager.utilization_link_weight). Setting groupflow_manager.utilization_link_weight to 0 will always\n results in shortest hop routing.\n \"\"\"\n curr_topo_graph = self.groupflow_manager.topology_graph\n self.node_list = list(self.groupflow_manager.node_set)\n \n weighted_topo_graph = []\n current_util = core.openflow_flow_tracker.get_max_flow_utilization(self.flow_cookie) / core.openflow_flow_tracker.link_max_bw\n log.info('Current utilization of flow ' + str(self.flow_cookie) + ': ' + str(current_util * core.openflow_flow_tracker.link_max_bw) + ' Mbps')\n \n for edge in curr_topo_graph:\n output_port = self.groupflow_manager.adjacency[edge[0]][edge[1]]\n raw_link_util = core.openflow_flow_tracker.get_link_utilization_normalized(edge[0], output_port);\n link_util_mcast_flow = core.openflow_flow_tracker.get_flow_utilization_normalized(edge[0], output_port, self.flow_cookie)\n \n link_util = max(0, (raw_link_util * (1 - link_util_mcast_flow)))\n \n # link_util = raw_link_util # Uncommenting this line will cause flows to reroute around their own traffic, good for testing\n \n # Current utilization here is doubled as a simple attempt to handle variability in flow rates\n if link_util + (current_util * 2) > 1:\n link_util = 1\n \n link_weight = 1\n \n if self.groupflow_manager.util_link_weight == 0:\n link_weight = self.groupflow_manager.static_link_weight\n else:\n if self.groupflow_manager.link_weight_type == LINK_WEIGHT_LINEAR:\n if link_util >= 1:\n link_weight = sys.float_info.max / core.openflow_flow_tracker.get_num_tracked_links()\n else:\n link_weight = min(self.groupflow_manager.static_link_weight + (self.groupflow_manager.util_link_weight * link_util),\n sys.float_info.max / core.openflow_flow_tracker.get_num_tracked_links())\n elif self.groupflow_manager.link_weight_type == LINK_WEIGHT_EXPONENTIAL:\n if link_util >= 1:\n link_weight = sys.float_info.max / core.openflow_flow_tracker.get_num_tracked_links()\n else:\n link_weight = min(self.groupflow_manager.static_link_weight + (self.groupflow_manager.util_link_weight * ((1 / (1 - link_util)) - 1)),\n sys.float_info.max / core.openflow_flow_tracker.get_num_tracked_links())\n \n log.debug('Router DPID: ' + dpid_to_str(edge[0]) + ' Port: ' + str(output_port) + \n ' TotalUtil: ' + str(raw_link_util) + ' FlowUtil: ' + str(link_util_mcast_flow) + ' OtherFlowUtil: ' + str(link_util) \n + ' Weight: ' + str(link_weight))\n\n weighted_topo_graph.append([edge[0], edge[1], link_weight])\n self.weighted_topo_graph = weighted_topo_graph\n \n log.debug('Calculated link weights for source at router_dpid: ' + dpid_to_str(self.src_router_dpid))\n for edge in self.weighted_topo_graph:\n log.debug(dpid_to_str(edge[0]) + ' -> ' + dpid_to_str(edge[1]) + ' W: ' + str(edge[2]))\n \n def install_openflow_rules(self, groupflow_trace_event = None):\n \"\"\"Selects routes for active receivers from the cached shortest path tree, and installs/removes OpenFlow rules accordingly.\"\"\"\n reception_state = self.groupflow_manager.get_reception_state(self.dst_mcast_address, self.src_ip)\n log.debug('Reception state for ' + str(self.dst_mcast_address) + ': ' + str(reception_state))\n outgoing_rules = defaultdict(lambda : None)\n \n if not groupflow_trace_event is None:\n groupflow_trace_event.set_route_processing_start_time(self.dst_mcast_address, self.src_ip)\n \n # Calculate the paths for the specific receivers that are currently active from the previously\n # calculated mst\n edges_to_install = []\n calculated_path_router_dpids = []\n for receiver in reception_state:\n if receiver[0] == self.src_router_dpid:\n continue\n if receiver[0] in calculated_path_router_dpids:\n continue\n \n # log.debug('Building path for receiver on router: ' + dpid_to_str(receiver[0]))\n receiver_path = self.path_tree_map[receiver[0]]\n log.debug('Receiver path for receiver ' + str(receiver[0]) + ': ' + str(receiver_path))\n if receiver_path is None:\n log.warn('Path could not be determined for receiver ' + dpid_to_str(receiver[0]) + ' (network is not fully connected)')\n continue\n \n while receiver_path[1]:\n edges_to_install.append((receiver_path[1][0], receiver_path[0]))\n receiver_path = receiver_path[1]\n calculated_path_router_dpids.append(receiver[0])\n \n # Get rid of duplicates in the edge list (must be a more efficient way to do this, find it eventually)\n edges_to_install = list(Set(edges_to_install))\n if not edges_to_install is None:\n # log.info('Installing edges:')\n for edge in edges_to_install:\n log.debug('Installing: ' + str(edge[0]) + ' -> ' + str(edge[1]))\n \n if not groupflow_trace_event is None:\n groupflow_trace_event.set_route_processing_end_time()\n groupflow_trace_event.set_flow_installation_start_time()\n \n for edge in edges_to_install:\n if edge[0] in outgoing_rules:\n # Add the output action to an existing rule if it has already been generated\n output_port = self.groupflow_manager.adjacency[edge[0]][edge[1]]\n outgoing_rules[edge[0]].actions.append(of.ofp_action_output(port = output_port))\n #log.debug('ER: Configured router ' + dpid_to_str(edge[0]) + ' to forward group ' + \\\n # str(self.dst_mcast_address) + ' to next router ' + \\\n # dpid_to_str(edge[1]) + ' over port: ' + str(output_port))\n else:\n # Otherwise, generate a new flow mod\n msg = of.ofp_flow_mod()\n msg.hard_timeout = 0\n msg.idle_timeout = 0\n if edge[0] in self.installed_node_list:\n msg.command = of.OFPFC_MODIFY\n else:\n msg.command = of.OFPFC_ADD\n msg.match.dl_type = 0x800 # IPV4\n msg.match.nw_dst = self.dst_mcast_address\n msg.match.nw_src = self.src_ip\n msg.cookie = self.flow_cookie\n output_port = self.groupflow_manager.adjacency[edge[0]][edge[1]]\n msg.actions.append(of.ofp_action_output(port = output_port))\n outgoing_rules[edge[0]] = msg\n #log.debug('NR: Configured router ' + dpid_to_str(edge[0]) + ' to forward group ' + \\\n # str(self.dst_mcast_address) + ' to next router ' + \\\n # dpid_to_str(edge[1]) + ' over port: ' + str(output_port))\n \n for receiver in reception_state:\n if receiver[0] in outgoing_rules:\n # Add the output action to an existing rule if it has already been generated\n output_port = receiver[1]\n outgoing_rules[receiver[0]].actions.append(of.ofp_action_output(port = output_port))\n #log.debug('ER: Configured router ' + dpid_to_str(receiver[0]) + ' to forward group ' + \\\n # str(self.dst_mcast_address) + ' to network over port: ' + str(output_port))\n else:\n # Otherwise, generate a new flow mod\n msg = of.ofp_flow_mod()\n msg.hard_timeout = 0\n msg.idle_timeout = 0\n if receiver[0] in self.installed_node_list:\n msg.command = of.OFPFC_MODIFY\n else:\n msg.command = of.OFPFC_ADD\n msg.cookie = self.flow_cookie\n msg.match.dl_type = 0x800 # IPV4\n msg.match.nw_dst = self.dst_mcast_address\n msg.match.nw_src = self.src_ip\n output_port = receiver[1]\n msg.actions.append(of.ofp_action_output(port = output_port))\n outgoing_rules[receiver[0]] = msg\n #log.debug('NR: Configured router ' + dpid_to_str(receiver[0]) + ' to forward group ' + \\\n # str(self.dst_mcast_address) + ' to network over port: ' + str(output_port))\n \n # Setup empty rules for any router not involved in this path\n for router_dpid in self.node_list:\n if not router_dpid in outgoing_rules and router_dpid in self.installed_node_list:\n msg = of.ofp_flow_mod()\n msg.cookie = self.flow_cookie\n msg.match.dl_type = 0x800 # IPV4\n msg.match.nw_dst = self.dst_mcast_address\n msg.match.nw_src = self.src_ip\n msg.command = of.OFPFC_DELETE\n outgoing_rules[router_dpid] = msg\n #log.debug('Removed rule on router ' + dpid_to_str(router_dpid) + ' for group ' + str(self.dst_mcast_address))\n \n for router_dpid in outgoing_rules:\n connection = core.openflow.getConnection(router_dpid)\n if connection is not None:\n connection.send(outgoing_rules[router_dpid])\n if not outgoing_rules[router_dpid].command == of.OFPFC_DELETE:\n self.installed_node_list.append(router_dpid)\n else:\n self.installed_node_list.remove(router_dpid)\n else:\n log.warn('Could not get connection for router: ' + dpid_to_str(router_dpid))\n \n log.debug('New flows installed for Group: ' + str(self.dst_mcast_address) + ' Source: ' + str(self.src_ip) + ' FlowCookie: ' + str(self.flow_cookie))\n \n if self.groupflow_manager.flow_replacement_mode == PERIODIC_FLOW_REPLACEMENT and self._flow_replacement_timer is None:\n log.debug('Starting flow replacement timer for Group: ' + str(self.dst_mcast_address) + ' Source: ' + str(self.src_ip) + ' FlowCookie: ' + str(self.flow_cookie))\n self._flow_replacement_timer = Timer(self.groupflow_manager.flow_replacement_interval, self.update_flow_placement, recurring=True)\n \n if not groupflow_trace_event is None:\n groupflow_trace_event.set_flow_installation_end_time()\n core.groupflow_event_tracer.archive_trace_event(groupflow_trace_event)\n\n \n def remove_openflow_rules(self):\n \"\"\"Removes all OpenFlow rules associated with this multicast group / sender pair.\n\n This should be used when the group has no active receivers.\"\"\"\n log.info('Removing rules on all routers for Group: ' + str(self.dst_mcast_address) + ' Source: ' + str(self.src_ip))\n for router_dpid in self.node_list:\n msg = of.ofp_flow_mod()\n msg.cookie = self.flow_cookie\n msg.match.dl_type = 0x800 # IPV4\n msg.match.nw_dst = self.dst_mcast_address\n msg.match.nw_src = self.src_ip\n msg.match.in_port = None\n msg.command = of.OFPFC_DELETE\n connection = core.openflow.getConnection(router_dpid)\n if connection is not None:\n connection.send(msg)\n else:\n log.warn('Could not get connection for router: ' + dpid_to_str(router_dpid))\n self.installed_node_list = []\n \n if self._flow_replacement_timer is not None:\n self._flow_replacement_timer.cancel()\n self._flow_replacement_timer = None\n \n def update_flow_placement(self, groupflow_trace_event = None):\n \"\"\"Replaces the existing flows by recalculating the cached shortest path tree, and installing new OpenFlow rules.\"\"\"\n self.calc_path_tree_dijkstras(groupflow_trace_event)\n self.install_openflow_rules(groupflow_trace_event)\n log.info('Replaced flows for Group: ' + str(self.dst_mcast_address) + ' Source: ' + str(self.src_ip) + ' FlowCookie: ' + str(self.flow_cookie))\n \n\n\nclass GroupFlowManager(EventMixin):\n \"\"\"The GroupFlowManager implements multicast routing for OpenFlow networks.\"\"\"\n _core_name = \"openflow_groupflow\"\n \n def __init__(self, link_weight_type, static_link_weight, util_link_weight, flow_replacement_mode, flow_replacement_interval):\n # Listen to dependencies\n def startup():\n core.openflow.addListeners(self, priority = 99)\n core.openflow_igmp_manager.addListeners(self, priority = 99)\n core.openflow_flow_tracker.addListeners(self, priority = 99)\n\n self.link_weight_type = link_weight_type\n log.info('Set link weight type: ' + str(self.link_weight_type))\n self.static_link_weight = float(static_link_weight)\n if self.static_link_weight == 0:\n self.static_link_weight = sys.float_info.min\n self.util_link_weight = float(util_link_weight)\n log.info('Set StaticLinkWeight:' + str(self.static_link_weight) + ' UtilLinkWeight:' + str(self.util_link_weight))\n self.flow_replacement_mode = flow_replacement_mode\n self.flow_replacement_interval = flow_replacement_interval\n log.info('Set FlowReplacementMode:' + str(flow_replacement_mode) + ' FlowReplacementInterval:' + str(flow_replacement_interval) + ' seconds')\n \n self.adjacency = defaultdict(lambda : defaultdict(lambda : None))\n self.topology_graph = []\n self.node_set = Set()\n self.multicast_paths = defaultdict(lambda : defaultdict(lambda : None))\n self.multicast_paths_by_flow_cookie = {} # Stores references to the same objects as self.multicast_paths, except this map is keyed by flow_cookie\n self._next_mcast_group_cookie = 54345; # Arbitrary, not set to 1 to avoid conflicts with other modules\n \n # Desired reception state as delivered by the IGMP manager, keyed by the dpid of the router for which\n # the reception state applies\n self.desired_reception_state = defaultdict(lambda : None)\n \n # Setup listeners\n core.call_when_ready(startup, ('openflow', 'openflow_igmp_manager', 'openflow_flow_tracker'))\n \n def get_new_mcast_group_cookie(self):\n \"\"\"Returns a new, unique cookie which should be assigned to a multicast_group / sender pair.\n\n Using a unique cookie per multicast group / sender allows the FlowTracker module to accurately track\n bandwidth utilization on a per-flow basis.\n \"\"\"\n self._next_mcast_group_cookie += 1\n log.debug('Generated new flow cookie: ' + str(self._next_mcast_group_cookie - 1))\n return self._next_mcast_group_cookie - 1\n \n def get_reception_state(self, mcast_group, src_ip):\n \"\"\"Returns locations to which traffic must be routed for the specified multicast address and sender IP.\n\n Returns a list of tuples of the form (router_dpid, output_port).\n \"\"\"\n # log.debug('Calculating reception state for mcast group: ' + str(mcast_group) + ' Source: ' + str(src_ip))\n reception_state = []\n for router_dpid in self.desired_reception_state:\n # log.debug('Considering router: ' + dpid_to_str(router_dpid))\n if mcast_group in self.desired_reception_state[router_dpid]:\n for port in self.desired_reception_state[router_dpid][mcast_group]:\n if not self.desired_reception_state[router_dpid][mcast_group][port]:\n reception_state.append((router_dpid, port))\n # log.debug('Reception from all sources desired on port: ' + str(port))\n elif src_ip in self.desired_reception_state[router_dpid][mcast_group][port]:\n reception_state.append((router_dpid, port))\n # log.debug('Reception from specific source desired on port: ' + str(port))\n else:\n return reception_state\n\n \n def drop_packet(self, packet_in_event):\n \"\"\"Drops the packet represented by the PacketInEvent without any flow table modification\"\"\"\n msg = of.ofp_packet_out()\n msg.data = packet_in_event.ofp\n msg.buffer_id = packet_in_event.ofp.buffer_id\n msg.in_port = packet_in_event.port\n msg.actions = [] # No actions = drop packet\n packet_in_event.connection.send(msg)\n\n def get_topo_debug_str(self):\n debug_str = '\\n===== GroupFlow Learned Topology'\n for edge in self.topology_graph:\n debug_str += '\\n(' + dpid_to_str(edge[0]) + ',' + dpid_to_str(edge[1]) + ')'\n return debug_str + '\\n===== GroupFlow Learned Topology'\n \n def parse_topology_graph(self, adjacency_map):\n \"\"\"Parses an adjacency map into a node and edge graph (which is cached in self.topology_graph and self.node_set).\"\"\"\n new_topo_graph = []\n new_node_list = []\n for router1 in adjacency_map:\n for router2 in adjacency_map[router1]:\n new_topo_graph.append((router1, router2))\n if not router2 in new_node_list:\n new_node_list.append(router2)\n if not router1 in new_node_list:\n new_node_list.append(router1)\n self.topology_graph = new_topo_graph\n self.node_set = Set(new_node_list)\n \n def _handle_PacketIn(self, event):\n \"\"\"Processes PacketIn events to detect multicast sender IPs.\"\"\"\n router_dpid = event.connection.dpid\n if not router_dpid in self.node_set:\n # log.debug('Got packet from unrecognized router.')\n return # Ignore packets from unrecognized routers\n \n igmp_pkt = event.parsed.find(pkt.igmpv3)\n if not igmp_pkt is None:\n return # IGMP packets should be ignored by this module\n \n ipv4_pkt = event.parsed.find(pkt.ipv4)\n if not ipv4_pkt is None:\n # ==== IPv4 Packet ====\n # Check the destination address to see if this is a multicast packet\n if ipv4_pkt.dstip.inNetwork('224.0.0.0/4'):\n # Ignore multicast packets from adjacent routers\n for router_dpid2 in self.adjacency[router_dpid]:\n if self.adjacency[router_dpid][router_dpid2] == event.port:\n return\n \n group_reception = self.get_reception_state(ipv4_pkt.dstip, ipv4_pkt.srcip)\n if group_reception:\n if not self.multicast_paths[ipv4_pkt.dstip][ipv4_pkt.srcip] is None:\n log.debug('Got multicast packet from source which should already be configured Router: ' + dpid_to_str(event.dpid) + ' Port: ' + str(event.port))\n if ENABLE_OUT_OF_ORDER_PACKET_DELIVERY:\n # This may cause OFPBRC_BUFFER_UNKNOWN errors if the controller takes too long to respond\n # Send the packet back to the switch for forwarding\n msg = of.ofp_packet_out()\n msg.data = event.ofp\n msg.buffer_id = event.ofp.buffer_id\n msg.in_port = event.port\n msg.actions = [of.ofp_action_output(port = of.OFPP_TABLE)]\n event.connection.send(msg)\n return\n \n log.info('Got multicast packet from new source. Router: ' + dpid_to_str(event.dpid) + ' Port: ' + str(event.port))\n log.debug('Reception state for this group:')\n \n for receiver in group_reception:\n log.debug('Multicast Receiver: ' + dpid_to_str(receiver[0]) + ':' + str(receiver[1]))\n\n groupflow_trace_event = None\n try:\n groupflow_trace_event = core.groupflow_event_tracer.init_groupflow_event_trace()\n except:\n pass\n path_setup = MulticastPath(ipv4_pkt.srcip, router_dpid, event.port, ipv4_pkt.dstip, self, groupflow_trace_event)\n self.multicast_paths[ipv4_pkt.dstip][ipv4_pkt.srcip] = path_setup\n self.multicast_paths_by_flow_cookie[path_setup.flow_cookie] = path_setup\n path_setup.install_openflow_rules(groupflow_trace_event)\n \n def _handle_MulticastGroupEvent(self, event):\n \"\"\"Processes MulticastGroupEvents (generated by the IGMPManager module) and adjusts routing as neccesary to fulfill desired reception state\"\"\"\n log.debug(event.debug_str())\n # Save a copy of the old reception state to account for members which left a group\n old_reception_state = None\n if event.router_dpid in self.desired_reception_state:\n old_reception_state = self.desired_reception_state[event.router_dpid]\n \n # Set the new reception state\n self.desired_reception_state[event.router_dpid] = event.desired_reception\n log.info('Set new reception state for router: ' + dpid_to_str(event.router_dpid))\n \n # Build a list of all multicast groups that may be impacted by this change\n mcast_addr_list = []\n removed_mcast_addr_list = []\n for multicast_addr in self.desired_reception_state[event.router_dpid]:\n mcast_addr_list.append(multicast_addr)\n \n if not old_reception_state is None:\n for multicast_addr in old_reception_state:\n # Capture groups which were removed in this event\n if not multicast_addr in mcast_addr_list:\n log.info('Multicast group ' + str(multicast_addr) + ' no longer requires reception')\n removed_mcast_addr_list.append(multicast_addr)\n elif multicast_addr in self.desired_reception_state[event.router_dpid] \\\n and set(old_reception_state[multicast_addr]) == set(self.desired_reception_state[event.router_dpid][multicast_addr]):\n # Prevent processing of groups that did not change\n mcast_addr_list.remove(multicast_addr)\n log.debug('Prevented redundant processing of group: ' + str(multicast_addr))\n \n # Rebuild multicast trees for relevant multicast groups\n log.debug('Recalculating paths due to new reception state change')\n for multicast_addr in mcast_addr_list:\n if multicast_addr in self.multicast_paths:\n log.debug('Recalculating paths for group ' + str(multicast_addr))\n groupflow_trace_event = None\n try:\n groupflow_trace_event = core.groupflow_event_tracer.init_groupflow_event_trace(event.igmp_trace_event)\n except:\n pass\n for source in self.multicast_paths[multicast_addr]:\n log.info('Recalculating paths for group ' + str(multicast_addr) + ' Source: ' + str(source))\n self.multicast_paths[multicast_addr][source].install_openflow_rules(groupflow_trace_event)\n else:\n log.debug('No existing sources for group ' + str(multicast_addr))\n \n for multicast_addr in removed_mcast_addr_list:\n if multicast_addr in self.multicast_paths:\n sources_to_remove = []\n for source in self.multicast_paths[multicast_addr]:\n log.info('Removing flows for group ' + str(multicast_addr) + ' Source: ' + str(source))\n self.multicast_paths[multicast_addr][source].remove_openflow_rules()\n del self.multicast_paths_by_flow_cookie[self.multicast_paths[multicast_addr][source].flow_cookie]\n sources_to_remove.append(source)\n \n for source in sources_to_remove:\n del self.multicast_paths[multicast_addr][source]\n else:\n log.info('Removed multicast group ' + str(multicast_addr) + ' has no known paths')\n \n def _handle_MulticastTopoEvent(self, event):\n \"\"\"Processes MulticastTopoEvents (generated by the IGMPManager module) and adjusts routing as neccesary to account for topology changes\n \n Note: In the current implementation, this recalculates all multicast routes.\n \"\"\"\n # log.info(event.debug_str())\n self.adjacency = event.adjacency_map\n self.parse_topology_graph(event.adjacency_map)\n # log.info(self.get_topo_debug_str())\n\n if self.multicast_paths:\n log.warn('Multicast topology changed, recalculating all paths.')\n for multicast_addr in self.multicast_paths:\n for source in self.multicast_paths[multicast_addr]:\n groupflow_trace_event = None\n try:\n groupflow_trace_event = core.groupflow_event_tracer.init_groupflow_event_trace()\n except:\n pass\n self.multicast_paths[multicast_addr][source].update_flow_placement(groupflow_trace_event)\n \n def _handle_LinkUtilizationEvent(self, event):\n \"\"\"Processes LinkUtilizationEvents (generated by the FlowTracker module), and replaces flows that traverse the specified link\"\"\"\n \n if event.link_utilization >= core.openflow_flow_tracker.link_max_bw:\n log.debug('Link Fully Utilized! Switch:' + dpid_to_str(event.router_dpid) + ' Port:' + str(event.output_port))\n \n # Ignore the event if congestion threshold based flow replacement is not enabled\n if self.flow_replacement_mode != CONG_THRESHOLD_FLOW_REPLACEMENT:\n return\n \n log.debug('Got LinkUtilEvent - Switch: ' + dpid_to_str(event.router_dpid) + ' Port: ' + str(event.output_port) + '\\n\\tUtil: ' + str(event.link_utilization))\n \n replacement_time = time.time()\n \n # 1) Determine the amount of utilization that should be replaced to bring the link back under the congestion threshold\n replacement_utilization = event.link_utilization - event.cong_threshold\n if replacement_utilization < 0:\n log.warn('LinkUtilizationEvent specified negative replacement utilization.')\n return\n log.debug('Attempting replacement of ' + str(replacement_utilization) + ' Mbps of flows')\n \n # 2) Build a list of the flows managed by this module that are contributing to congestion, sorted by decreasing utilization\n replacement_flows = []\n for event_flow_cookie in event.flow_map:\n if event_flow_cookie in self.multicast_paths_by_flow_cookie:\n replacement_flows.append((event_flow_cookie, event.flow_map[event_flow_cookie]))\n replacement_flows.sort(key = lambda flow: flow[1])\n log.debug('Candidates for flow replacement: ' + str(replacement_flows))\n \n # 3) Replace flows until all candidates have been processed, or the targetted replacement utilization is reached\n # Note that flows which have been recently replaced will not be replaced again\n replaced_utilization = 0\n for flow in replacement_flows:\n log.debug('FlowCookie: ' + str(flow[0]) + ' CurrentTime: ' + str(replacement_time) + ' LastReplacementTime: ' + str(self.multicast_paths_by_flow_cookie[flow[0]]._last_flow_replacement_time))\n if self.multicast_paths_by_flow_cookie[flow[0]]._last_flow_replacement_time is not None:\n log.debug('Replacement Interval: ' + str(self.multicast_paths_by_flow_cookie[flow[0]]._last_flow_replacement_time))\n \n if (self.multicast_paths_by_flow_cookie[flow[0]]._last_flow_replacement_time is None) or (\n replacement_time - self.multicast_paths_by_flow_cookie[flow[0]]._last_flow_replacement_time >= self.flow_replacement_interval):\n log.debug('Replacing multicast flow with cookie: ' + str(flow[0]) + ' Bitrate: ' + str(flow[1]) + ' Mbps')\n self.multicast_paths_by_flow_cookie[flow[0]].update_flow_placement()\n \n replaced_utilization += flow[1]\n # Note: This causes the replacement to stop after replacing a single flow (may help prevent thrashing)\n # Uncomment this to have the module replace flows until the current link utilization minus the replacement bandwidth \n # is less than the link's congestion threshold.\n break\n \n # Note: Flows which are not actually replaced are counted toward the replacement utilization here, as it assumed that these flows\n # are already in the process of being replaced (this assumption should hold valid as long as the flow replacement interval is not\n # greater than 3 sampling intervals of the flow tracker)\n if replaced_utilization >= replacement_utilization:\n break\n \n log.debug('Replaced ' + str(replaced_utilization) + ' Mbps of flows')\n\n\ndef launch(link_weight_type = 'linear', static_link_weight = STATIC_LINK_WEIGHT, util_link_weight = UTILIZATION_LINK_WEIGHT, \n flow_replacement_mode = 'none', flow_replacement_interval = FLOW_REPLACEMENT_INTERVAL_SECONDS):\n # Method called by the POX core when launching the module\n link_weight_type_enum = LINK_WEIGHT_LINEAR # Default\n if 'linear' in str(link_weight_type):\n link_weight_type_enum = LINK_WEIGHT_LINEAR\n elif 'exponential' in str(link_weight_type):\n link_weight_type_enum = LINK_WEIGHT_EXPONENTIAL\n \n flow_replacement_mode_int = NO_FLOW_REPLACEMENT\n if 'periodic' in str(flow_replacement_mode):\n flow_replacement_mode_int = PERIODIC_FLOW_REPLACEMENT\n if 'cong_threshold' in str(flow_replacement_mode):\n flow_replacement_mode_int = CONG_THRESHOLD_FLOW_REPLACEMENT\n \n groupflow_manager = GroupFlowManager(link_weight_type_enum, float(static_link_weight), float(util_link_weight), flow_replacement_mode_int,\n float(flow_replacement_interval))\n core.register('openflow_groupflow', groupflow_manager)","sub_path":"mymcast/pox/openflow/groupflow.py","file_name":"groupflow.py","file_ext":"py","file_size_in_byte":39177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"224330479","text":"# Copyright 2015-2016 Nigel Small\n#\n# This file is part of Ampersand.\n#\n# Ampersand is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ampersand is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ampersand. If not, see .\n\nfrom ampersand import TokenStream, TokenType, Script\nfrom functools import reduce\nfrom itertools import combinations, product\nfrom unittest import TestCase\n\n\nclass TokenStreamTest(object):\n\n def __init__(self, source):\n self.source = source\n source_bytes = source.encode(\"utf-8\")\n self.script = Script(source_bytes)\n self.tokens = self.script.lines[0].tokens\n\n def assert_tokens(self, *expected):\n try:\n for i, token in enumerate(self.tokens):\n assert token == expected[i], \\\n \"%r does not lex correctly; token %d incorrect\\n%s != %s\" % \\\n (self.source, i, self.tokens, expected)\n except IndexError:\n assert False, \"%r does not lex correctly; tokens incorrect\\n%s != %s\" % \\\n (self.source, self.tokens, expected)\n\n\nclass NameTestCase(TestCase):\n \"\"\" A name token is composed of a head character followed by zero\n or more tail characters. Valid head characters are those classified\n by Unicode as letters (Ll|Lm|Lo|Lt|Lu), connectors (Pc) and\n currency symbols (Sc). Tail characters can also be any of these as\n well as numbers (Nd|Nl|No), non-spacing marks (Mn) and spacing\n combining marks (Mc).\n \"\"\"\n\n names = [\n \"n\",\n \"_\",\n \"__\",\n \"name\",\n \"a_name\",\n \"_a_name_\",\n \"__a__name__\",\n \"name_1\",\n \"_1st_name\",\n \"_1º_name\",\n \"_1ª_name\",\n \"ñâmé\",\n \"µname\",\n \"$\",\n \"¢\",\n \"£\",\n \"¤\",\n \"¥\",\n \"€\",\n \"$jquery\",\n \"STR$\",\n ]\n\n def test_names(self):\n for source in self.names:\n name_bytes = source.encode(\"utf-8\")\n tokens = list(TokenStream(name_bytes))\n assert len(tokens) == 2, \"%r does not contain 2 tokens\" % source\n token = tokens[1]\n expected_token = (TokenType.name, 0, 0, 0.0, name_bytes)\n assert token == expected_token, \"%r does not lex correctly\" % source\n\n\nclass NaturalTestCase(TestCase):\n\n numbers = [\n (\"0\", 0),\n (\"1\", 1),\n (\"1234\", 1234),\n (\"1_234\", 1234),\n (\"1__2__3__4__\", 1234),\n ]\n\n def test_natural_numbers(self):\n for source, value in self.numbers:\n TokenStreamTest(source).assert_tokens(\n (TokenType.natural, value, 0, 0.0, b\"\"),\n )\n\n def test_positive_integers(self):\n for source, value in self.numbers:\n TokenStreamTest(\"+\" + source).assert_tokens(\n (TokenType.symbol, ord(\"+\"), 0, 0.0, b\"+\"),\n (TokenType.natural, value, 0, 0.0, b\"\"),\n )\n\n def test_negative_integers(self):\n for source, value in self.numbers:\n TokenStreamTest(\"-\" + source).assert_tokens(\n (TokenType.symbol, ord(\"-\"), 0, 0.0, b\"-\"),\n (TokenType.natural, value, 0, 0.0, b\"\"),\n )\n\n def test_natural_numbers_with_units(self):\n for source, value in self.numbers:\n TokenStreamTest(source + \"B\").assert_tokens(\n (TokenType.natural, value, 0, 0.0, b\"B\"),\n )\n\n def test_natural_number_units_must_be_adjacent(self):\n for source, value in self.numbers:\n TokenStreamTest(source + \" B\").assert_tokens(\n (TokenType.natural, value, 0, 0.0, b\"\"),\n (TokenType.name, 0, 0, 0.0, b\"B\"),\n )\n\n\nclass RationalNumberTestCase(TestCase):\n\n fractions = [\n (\"¼\", 1, 4),\n (\"½\", 1, 2),\n (\"¾\", 3, 4),\n (\"⅐\", 1, 7),\n (\"⅑\", 1, 9),\n (\"⅒\", 1, 10),\n (\"⅓\", 1, 3),\n (\"⅔\", 2, 3),\n (\"⅕\", 1, 5),\n (\"⅖\", 2, 5),\n (\"⅗\", 3, 5),\n (\"⅘\", 4, 5),\n (\"⅙\", 1, 6),\n (\"⅚\", 5, 6),\n (\"⅛\", 1, 8),\n (\"⅜\", 3, 8),\n (\"⅝\", 5, 8),\n (\"⅞\", 7, 8),\n (\"↉\", 0, 3),\n (\"1¼\", 5, 4),\n (\"1½\", 3, 2),\n (\"1¾\", 7, 4),\n ]\n\n percentages = [\n (\"0%\", 0, 100),\n (\"1½%\", 3, 200),\n (\"37⅞%\", 303, 800),\n (\"10%\", 10, 100),\n (\"50%\", 50, 100),\n (\"100%\", 100, 100),\n (\"1000%\", 1000, 100),\n ]\n\n def test_rational_numbers(self):\n for source, numerator, denominator in self.fractions + self.percentages:\n TokenStreamTest(source).assert_tokens(\n (TokenType.rational, numerator, denominator, 0.0, b\"\"),\n )\n\n def test_positive_rational_numbers(self):\n for source, numerator, denominator in self.fractions + self.percentages:\n TokenStreamTest(\"+\" + source).assert_tokens(\n (TokenType.symbol, ord(\"+\"), 0, 0.0, b\"+\"),\n (TokenType.rational, numerator, denominator, 0.0, b\"\"),\n )\n\n def test_negative_rational_numbers(self):\n for source, numerator, denominator in self.fractions + self.percentages:\n TokenStreamTest(\"-\" + source).assert_tokens(\n (TokenType.symbol, ord(\"-\"), 0, 0.0, b\"-\"),\n (TokenType.rational, numerator, denominator, 0.0, b\"\"),\n )\n\n def test_rational_numbers_with_units(self):\n for source, numerator, denominator in self.fractions + self.percentages:\n TokenStreamTest(source + \"kB\").assert_tokens(\n (TokenType.rational, numerator, denominator, 0.0, b\"kB\"),\n )\n\n def test_rational_number_units_must_be_adjacent(self):\n for source, numerator, denominator in self.fractions + self.percentages:\n TokenStreamTest(source + \" kB\").assert_tokens(\n (TokenType.rational, numerator, denominator, 0.0, b\"\"),\n (TokenType.name, 0, 0, 0.0, b\"kB\"),\n )\n\n\nclass RealNumberTestCase(TestCase):\n\n formats = [\"{x}.\", \".{y}\", \"{x}.{y}\", \"{x}{e}{z}\", \"{x}.{e}{z}\", \".{y}{e}{z}\", \"{x}.{y}{e}{z}\"]\n x_values = [\"5\", \"5_\"]\n y_values = [\"8\", \"8_\"]\n e_values = [\"E\", \"e\", \"⏨\"]\n z_values = [\"1\", \"1_\", \"+1\", \"+1_\", \"+1\", \"+1_\"]\n\n numbers = set(f[0].format(**dict(zip(\"xyez\", f[1:])))\n for f in product(formats, x_values, y_values, e_values, z_values))\n\n def test_real_numbers(self):\n for source in self.numbers:\n expected_value = float(source.replace(\"_\", \"\").replace(\"⏨\", \"E\"))\n # print(\"%s -> %f\" % (source, expected_value))\n TokenStreamTest(source).assert_tokens(\n (TokenType.real, 0, 0, expected_value, b\"\"),\n )\n\n def test_real_numbers_with_units(self):\n for source in self.numbers:\n expected_value = float(source.replace(\"_\", \"\").replace(\"⏨\", \"E\"))\n # print(\"%s -> %f\" % (source, expected_value))\n TokenStreamTest(source + \"kB\").assert_tokens(\n (TokenType.real, 0, 0, expected_value, b\"kB\"),\n )\n\n def test_real_number_units_must_be_adjacent(self):\n for source in self.numbers:\n expected_value = float(source.replace(\"_\", \"\").replace(\"⏨\", \"E\"))\n # print(\"%s -> %f\" % (source, expected_value))\n TokenStreamTest(source + \" kB\").assert_tokens(\n (TokenType.real, 0, 0, expected_value, b\"\"),\n (TokenType.name, 0, 0, 0.0, b\"kB\"),\n )\n\n\nclass BytesTestCase(TestCase):\n\n numbers = [\n (\"#0\", b\"\\x00\"),\n (\"#1\", b\"\\x01\"),\n (\"#F\", b\"\\x0F\"),\n (\"#00\", b\"\\x00\"),\n (\"#01\", b\"\\x01\"),\n (\"#0F\", b\"\\x0F\"),\n (\"#FF\", b\"\\xFF\"),\n (\"#000\", b\"\\x00\\x00\"),\n (\"#001\", b\"\\x00\\x01\"),\n (\"#0FF\", b\"\\x00\\xFF\"),\n (\"#FFF\", b\"\\x0F\\xFF\"),\n (\"#0FFF\", b\"\\x0F\\xFF\"),\n (\"#FFFF\", b\"\\xFF\\xFF\"),\n (\"#00_00_00_01\", b\"\\x00\\x00\\x00\\x01\"),\n (\"#1234\", b\"\\x12\\x34\"),\n (\"#ABCD\", b\"\\xAB\\xCD\"),\n (\"#abcd\", b\"\\xAB\\xCD\"),\n (\"#1234ABCD\", b\"\\x12\\x34\\xAB\\xCD\"),\n (\"#1234abcd\", b\"\\x12\\x34\\xAB\\xCD\"),\n (\"#ABCD1234\", b\"\\xAB\\xCD\\x12\\x34\"),\n (\"#abcd1234\", b\"\\xAB\\xCD\\x12\\x34\"),\n (\"#12_34A_BCD\", b\"\\x12\\x34\\xAB\\xCD\"),\n (\"#AB_CD1_234\", b\"\\xAB\\xCD\\x12\\x34\"),\n (\"#01234ABCD\", b\"\\x00\\x12\\x34\\xAB\\xCD\"),\n (\"#1234_\", b\"\\x12\\x34\"),\n (\"#ABCD_\", b\"\\xAB\\xCD\"),\n ]\n\n def test_bytes(self):\n for source, value in self.numbers:\n TokenStreamTest(source).assert_tokens(\n (TokenType.bytes, len(value), 0, 0.0, value),\n )\n\n def test_bytes_and_continue_parsing(self):\n for source, value in self.numbers:\n TokenStreamTest(source + \":\" + source).assert_tokens(\n (TokenType.bytes, len(value), 0, 0.0, value),\n (TokenType.symbol, ord(\":\"), 0, 0.0, b\":\"),\n (TokenType.bytes, len(value), 0, 0.0, value),\n )\n\n\nclass SymbolTestCase(TestCase):\n\n symbols = {\n \"--\",\n \"->\",\n \">>\",\n \"---\",\n \"-->\",\n \"->>\",\n \">>>\",\n }\n\n def symbol_combinations(self):\n symbol_combinations = set()\n for combo in reduce(set.__or__, [set(combinations(self.symbols, i))\n for i in range(len(self.symbols) + 1)], set()):\n full_combo = set()\n for symbol in combo:\n for i in range(2, len(symbol) + 1):\n full_combo.add(symbol[:i])\n symbol_combinations.add(tuple(sorted(full_combo)))\n return sorted(symbol_combinations)\n\n def test_composite_symbols(self):\n for valid_symbols in sorted(self.symbol_combinations()):\n for symbol in sorted(self.symbols):\n expression = \"5 %s 8\" % symbol\n tokens = Script(expression.encode(\"utf-8\"), valid_symbols).lines[0].tokens\n symbol_tokens = [t for t in tokens if t.type == TokenType.symbol]\n combined_symbol = b\"\".join(token.s for token in symbol_tokens).decode(\"utf-8\")\n # check all symbol characters are represented once and once only\n assert combined_symbol == symbol\n # check for exact match\n if symbol in valid_symbols:\n assert len(symbol_tokens) == 1\n assert (TokenType.symbol, 0, 0, 0.0, symbol.encode(\"utf-8\")) in symbol_tokens\n # check each symbol token represents either a single token or a valid combo\n for token in symbol_tokens:\n assert len(token.s) == 1 or token.s.decode(\"utf-8\") in valid_symbols\n","sub_path":"test/test_lex.py","file_name":"test_lex.py","file_ext":"py","file_size_in_byte":11251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"571534065","text":"from .mapper import Mapper\n\n\nclass Metaclass(type):\n\n def __call__(cls, *args, **kwargs):\n obj = cls.__new__(cls,*args, **kwargs)\n args_list = list(args)\n if Mapper.exist(cls):\n params = Mapper.get_params(cls)\n args_list.extend(params)\n obj.__init__(*args_list, **kwargs)\n return obj","sub_path":"src/main/python/media_downloader/infrastructure/dependency_injection/meta_class.py","file_name":"meta_class.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"70497631","text":"\n\n\ndef Quicksort(list,begin,end):\n if begin>=end: #防止递归无限继续下去报错,在最后分成1个数排序之后 例如[34] 递归的时候return出来\n return\n temp=list[begin]\n i=begin\n j=end\n while i= temp:\n j-=1\n list[i]=list[j]\n\n while i 1:\n print(\"Please choose the device with which you want to continue:\\n\")\n for key, value in devices_dict.items():\n print(f\"{key}.\", value['device_id'], '-', value['device_md'])\n\n # User picks the device number and is converted to int\n # This also blocks the user to input anything but a number\n print(\"\\nPick a number that reflects the ones from the devices above.\\n\")\n\n try:\n user_input = input(\"> \")\n user_input = int(user_input)\n except Exception as e:\n print(f\"The value '{user_input}' is not a number. The script will now exit.\")\n sys.exit(0)\nelse:\n user_input = 1\n\n# Get the device id and model that the user selected\ntry:\n device_id = devices_dict[user_input]['device_id']\n device_model = devices_dict[user_input]['device_md']\nexcept Exception as e:\n print(f\"The position {user_input} does not exist.\")\n sys.exit(0)\n\nprint(f\"\\n### The device id: '{device_id}' which has the model: '{device_model}' was assigned for the script ###\")\n\n# Get the screenshots folder\ndevice_mtp = '/sdcard/'\nfolder_searched = 'Screenshots'\nstream = subprocess.check_output(['adb', '-s', device_id, 'shell', 'find', device_mtp, '-name', folder_searched])\nfolders_found = stream.decode(\"utf-8\").strip()\n\nif folders_found:\n export_folder = [item for item in folders_found.split('\\r\\n') if '.' not in item][0]\nelse:\n raise Exception(f\"There were no '{folder_searched}' folders found.\")\n\n# Get all photos from the folder found\nstream = subprocess.check_output(['adb', '-s', device_id, 'shell', 'cd', export_folder, ';', 'ls'])\nphotos_text = stream.decode(\"utf-8\").strip()\nphotos_list = compile(r'\\r\\n|\\r\\r\\n').split(photos_text)\nphotos_size = len(photos_list)\n\n# Create a dict with all the data existing in the screenshots folder\n# While some devices decide to only put screenshots here, some\n# manufacturers also add screen recordings here\nphotos_sorted = []\nfor photo_name in photos_list:\n stream = subprocess.check_output(['adb', '-s', device_id, 'shell', 'stat', '-c', '%y', f\"{export_folder}/'{photo_name}'\"])\n photo_attr = stream.decode(\"utf-8\").strip().split()\n photo_cdate = photo_attr[0]\n photo_ctime = search(r'.*(?=\\.|\\\\b)' ,photo_attr[1])[0]\n photos_sorted.append({'photo_name': photo_name, 'photo_cdate': photo_cdate, 'photo_ctime': photo_ctime})\n\n# Sort the list by their date and time and reverse them in order to get from the\n# oldest to the newest\nphotos_sorted = sorted(photos_sorted, \n key=lambda x: (datetime.datetime.strptime(x['photo_cdate'], '%Y-%m-%d'),\n datetime.datetime.strptime(x['photo_ctime'], '%H:%M:%S')),\n reverse=True)\n\n# Create new folder on the Desktop in which the export will be done\ndefault_export_folder = 'export_screenshots'\ndesktop_path = os.environ['USERPROFILE'] + '\\\\Desktop\\\\'\ncomplete_path = desktop_path + default_export_folder\n\nif not os.path.exists(complete_path):\n print(f\"\\n### Creating export folder at: '{complete_path}' ###\")\n os.mkdir(complete_path)\n\n# Check if the parameter volume gets a value higher than the number of items available\nif args.volume > photos_size:\n print(f\"\\n### Changing the volume to {photos_size} since there aren't {args.volume} items ###\")\n args.volume = photos_size\n\n# Exporting the files requested\nfor pos in range(args.volume):\n photo_name = photos_sorted[pos]['photo_name']\n stream = subprocess.check_output(['adb', '-s', device_id, 'pull', \n f'{export_folder}/{photo_name}',\n complete_path])\n result_string = stream.decode(\"utf-8\")\n print(f'\\n### {result_string} ###')","sub_path":"export_screenshots/get_screenshots.py","file_name":"get_screenshots.py","file_ext":"py","file_size_in_byte":5594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"486863577","text":"def fibo(n,f,s):\n \n if f == s:\n print('f == s ', n)\n if n <= f-1:\n return 0;\n elif n == f:\n return f\n else:\n return ((fibo((n-1),f,s))**2 + fibo((n-2),f,s))\n else:\n print('data -> ', n)\n if n == f:\n return f\n elif n == s:\n return s\n else:\n return ((fibo((n-1),f,s))**2 + fibo((n-2),f,s))\n\ndef main():\n d=input()\n data=fibo(int(d),1,1)\n print(data)\n\nif __name__:\n import cProfile\n cProfile.run(\"main()\")\n","sub_path":"algorithm/customfibo.py","file_name":"customfibo.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"542487391","text":"import bs4 as bs\nimport pickle\nimport requests\nimport datetime as dt\nimport os\nimport pandas as pd\nimport pandas_datareader.data as web\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nimport numpy as np\n\nstyle.use('ggplot')\n\n#\tUsing the examples taught by sentdex from his youtube channel\n\n\ndef save_sp500_tickers():\n\tprint (\"Saving the S&P500 tickers...\")\t#\tJust for debug\n\tresp = requests.get('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')\n\tsoup = bs.BeautifulSoup(resp.text, \"lxml\")\n\ttable = soup.find('table', {'class':'wikitable sortable'})\n\ttickers = []\n\tfor row in table.findAll('tr')[1:]:\n\t\tticker = row.findAll('td')[0].text\n#\tTreating these two companies for this character exception\n#\tFrom the wiki they are shown as a '.' and you need to use the\n#\t'-' to pull data from Yahoo Finance\n\t\tif ticker == 'BRK.B':\n\t\t\tticker = 'BRK-B'\n\t\tif ticker == 'BF.B':\n\t\t\tticker = 'BF-B'\n\t\ttickers.append(ticker)\n\n\twith open(\"sp500tickers.pickle\", \"wb\") as f:\n\t\tpickle.dump(tickers, f)\n\t\tprint(\"File sp500tickers.pickle created\")\t# Just for Debug\n\n\treturn tickers\n\n\n#save_sp500_tickers() - nao vamos mais usar\n\ndef get_data_from_yahoo(reload_sp500=True):\n\n\tif reload_sp500:\n\t\ttickers = save_sp500_tickers()\n\telse:\n\t\twith open(\"sp500tickers.pickle\",\"rb\") as f:\n\t\t\ttickers = pickle.load(f)\n\n\tif not os.path.exists('stock_dfs'):\n\t\tos.makedirs('stock_dfs')\n\n\tstart = dt.datetime(2000,1,1)\t#\tThese dates can change\n\tend = dt.datetime(2016,12,31)\n\n\tfor ticker in tickers: #\t[:10]: Este indice do ticker\n\t#\tDeve ser modificado se quero pegar a lista inteira\n\t#\tNo caso, estou usando os 10 primeiros para poder\n\t#\ttestar\n\n\t#\tThat ticker index can be changed so you dont need to wait for all \n\t#\tthe #500 companies to be pulled (it should take some 10-20 minutes\n\t#\tdepending on your computer)\n\t\tprint(ticker+'...ready')\t#\tJust for debug\n\t\tif not os.path.exists('stock_dfs/{}.csv'.format(ticker)):\n\t\t\tdf = web.DataReader(ticker,'yahoo',start,end)\n\t\t\tdf.to_csv('stock_dfs/{}.csv'.format(ticker))\n\t\telse:\n\t\t\tprint('Already have {} file'.format(ticker)) #\tSentdex debug\n\n#get_data_from_yahoo()\n\ndef compile_data():\n\n\twith open(\"sp500tickers.pickle\",\"rb\") as f:\n\t\ttickers = pickle.load(f)\n\t\tprint('loaded tickers to compile')\t#\tJust for debug\n\t#tickers = save_sp500_tickers()\n\tmain_df = pd.DataFrame()\n\n\tfor count,ticker in enumerate(tickers): #\t[:10]: - see comment above\n\t\tprint('reading '+ticker)\t#just for debug\n\t\tdf = pd.read_csv('stock_dfs/{}.csv'.format(ticker))\n\t\tdf.set_index('Date', inplace=True)\n \n\t\tdf.rename(columns = {'Adj Close' : ticker}, inplace=True)\n\t\tdf.drop(['Open','High','Low','Close','Volume'], 1, inplace=True)\n\n\t\tif main_df.empty:\n\t\t\tmain_df = df\n\t\telse:\n\t\t\tmain_df = main_df.join(df, how='outer')\n\n\t\tif count % 10 == 0:\n\t\t\tprint(count)\t#\tjust for debug, counting each 10 companies\n\t\t\t\t\t\t\t#\tprocessed\n\t\tprint(ticker+' compiled. Next...')\t#\tjust for debug\n\n\tprint (main_df.head())\n\tprint('Saving sp500_joined_closes.csv file...')\t# \tjust for debug\n\tmain_df.to_csv('sp500_joined_closes.csv')\n\n#get_data_from_yahoo()\n#compile_data()\n\ndef visualize_data():\n\tdf = pd.read_csv('sp500_joined_closes.csv')\n#\tdf['MMM'].plot()\n#\tprint('Plotting...')\n#\tplt.show()\n\tdf_corr = df.corr()\t\t\t#\tcreating correlation table\n#\tprint(df_corr.head())\t\t#\tthis line is not necessary\n\n\tdata = df_corr.values\t\t#\tget only the values, ignore header\n\t\t\t\t\t\t\t\t#\tand index\n\tfig = plt.figure()\t\t\t#\tcreating the graphic\n\tax = fig.add_subplot(1,1,1)\n\n\theatmap = ax.pcolor(data, cmap=plt.cm.RdYlGn)\t#\tsetting up the heatmap\n\n\tfig.colorbar(heatmap)\t#\tput heat color scale (legenda) to the side\n\tax.set_xticks(np.arange(data.shape[0]) + 0.5, minor=False)\n\tax.set_yticks(np.arange(data.shape[1]) + 0.5, minor=False)\n\tax.invert_yaxis()\t#\tinverting the yaxis so it doesnt have any \n\t\t\t\t\t\t#\tempty space on top\n\tax.xaxis.tick_top()\t#\tMove the X ticks to the top\n\n\tcolumn_labels = df_corr.columns #\tget the names from the tickers\n\trow_labels = df_corr.index \t\t#\tget the names from the tickers\n\n\tax.set_xticklabels(column_labels) #\tSet the names for the axis\n\tax.set_yticklabels(row_labels)\t #\tset the names for the axis as well\n\tplt.xticks(rotation=90)\t\t\t #\trotate the graphic to be shown down \n\t\t\t\t\t\t\t\t\t# and to the right\n\theatmap.set_clim(-1,1)\t#\tDefine the range\n\tplt.tight_layout()\t#\tshow the data tightly\n\tplt.show()\n\n\n\n\n#\tIf this is the first time running, please run first these two:\n\n#get_data_from_yahoo()\n#compile_data()\n\n#\tTo create all related files and spreadsheets\n\n\nvisualize_data()\n\n#\tThis code has until the 8th video of sentdex playlist 'Python for \n#\tfinance' videos from youtube\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"python_finance_2.py","file_name":"python_finance_2.py","file_ext":"py","file_size_in_byte":4608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"72846163","text":"# -*- coding: utf-8 -*-\nimport io\nfrom unittest import TestCase\nimport unittest\nimport os\nfrom xhtml2pdf.document import pisaDocument\n\n__doc__ = \"\"\"\n FontFamilyCombination provides us auxiliary functions to check\n the correct operation code that check one we have one or more font-name in CSS font-family.\n \"\"\"\n\nclass FontFamilyCombination(TestCase):\n\n\n tests_folder = os.path.dirname(os.path.realpath(__file__))\n fRegular_path = os.path.join(tests_folder, 'samples', 'font', 'Noto_Sans', 'NotoSans-Regular.ttf')\n fBold_path = os.path.join(tests_folder, 'samples', 'font', 'Noto_Sans', 'NotoSans-Bold.ttf')\n\n FFRegular = \"@font-face {{font-family: '#Noto_Regular','Times New Roman'; src: url(\\'{ttf}\\');}}\".format(ttf=fRegular_path)\n FFBold = \"@font-face {{font-family: Noto_Bold; src: url(\\'{ttf}\\');}}\".format(ttf=fBold_path)\n\n fRegular = \".fRegular{font-family: '#Noto_Regular', 'Times New Roman';}\"\n fBold = \".fBold{font-family: Noto_Bold;}\"\n\n pisa_doc = None\n\n #TRUE IF WE USE MORE THAN ONE FONT-NAME AS FAMILY-NAME VALUE\n values = True\n\n HTML_CONTENT = u\"\"\"\n \n \n \n \n \n \n\n \n Regular font type\n Bold font type\n \n\n \"\"\"\n\n def setUp(self):\n #Setting values that to be used in the following methods\n html = self.HTML_CONTENT.format(FFBold=self.FFBold, FFRegular=self.FFRegular,\n fRegular=self.fRegular, fBold=self.fBold)\n with io.BytesIO() as pdf_file:\n self.pisa_doc = pisaDocument(src=html,\n dest=pdf_file)\n\n\n def test_check_more_than_one_fontName(self):\n \"\"\"\n this function help us to check is the font-family contain a font-name list.\n \"\"\"\n fonts = []\n for css_class in self.pisa_doc.css[0].values():\n for font in css_class.values():\n fonts.append(font)\n for font in fonts:\n if isinstance(font,list):\n result = font\n break\n #here we are checking if fonts in pdf-doc contain a font-name list\n self.assertIsInstance(result,list)\n\n @unittest.skipIf(values == True,'\"test_check_only_one_fontName\" just need to run if font-family only have one font-name')\n def test_check_only_one_fontName(self):\n \"\"\"\n this function help us to check is the font-family contain only one font-name .\n \"\"\"\n fonts = []\n result = False\n for css_class in self.pisa_doc.css[0].values():\n for font in css_class.values():\n fonts.append(font)\n for font in fonts:\n if not isinstance(font, list):\n result = True\n else:\n result = False\n break\n #here we are checking if all objects in fonts list are str, the result have to be True\n self.assertTrue(result)\n","sub_path":"tests/test_CSS_font-family_font_combination.py","file_name":"test_CSS_font-family_font_combination.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"319605014","text":"# #################################################################################\nfrom __future__ import division, print_function;\n\nimport os, sys, time, datetime;\nimport numpy as np;\nimport matplotlib.pyplot as plt;\nfrom matplotlib import cm;\n\nplt.ion();\n\nimport tkinter as tk;\nimport tkinter.font as tkFont;\nfrom tkinter.filedialog import askopenfilename, asksaveasfilename, askdirectory;\nimport tkinter.messagebox as mBox;\nimport tkinter.filedialog as tkFileDialog;\nfrom tkinter import ttk # separator\n\n# to show tooltip box,\nimport platform;\nfrom platform import python_version\nfrom sys import platform as _platform\nfrom pprint import *\n\nimport pdb;\n\n# set path to import myModule/*.py\nmyHomeDir = os.path.expanduser(\"~\");\nmyModuleDirCurr = os.path.join(os.getcwd(), 'myModules');\nmyModuleDirCloud = os.path.join(os.path.split(os.getcwd())[0], 'myModules');\n\nif os.path.exists(myModuleDirCurr): myModuleDir = myModuleDirCurr;\nelif os.path.exists(myModuleDirCloud): myModuleDir = myModuleDirCloud;\nelse: myModuleDir = None;\n\nif myModuleDir != None: sys.path.insert(0, myModuleDir);\n\nimport re;\n\nimport mcsPstProc as mPstP; # post-processing MCS data\nfrom mcsPstProc import colored, cPrint, getCurrLineNo;\n\nfrom myToolTip import *;\n\nGPS = None;\n\ngpsLblTxt = \"MCS\\nPst-Proc\\n\";\n\nimport multiprocessing; # get number of cores available, ...\n\nimport mcsHelps as HELP; # import help text\nimport mcsGlobalVars as MGV; # import help text\n\ncsFont = {'fontname':'Comic Sans MS', 'fontsize':'14'};\nhFont = {'fontname':'Helvetica', 'fontsize':'14'};\nmFont = {'fontname':'Monaco', 'fontsize':'14'};\n\nroot = tk.Tk();\n\nbShowToolTip = True; # False;\n\nclass GUI_MCS():\n global GPS, gpsLblTxt;\n\n def __init__(self):\n # tk.tk.Frame.__init__(self, root, height=42, width=42)\n # ===============initialize GUI frames=========================\n self.frm0, self.frm1, self.frm2, self.frm3, self.frm4 = None, None, None, None, None;\n\n self.tmStr = None;\n\n if sys.platform in [\"win32\", \"windows\", \"linux\"]: self.bUnit, self.dUnit = \"s/mm2\", \"mm2/s\";\n else: self.bUnit, self.dUnit = u\"mm\\u00B2/s\", u\"\\u00D710\\u207B\\u00B3 s/mm\\u00B2\";\n\n self.mcsPstPro = tk.BooleanVar(); # Run mcs post processing\n self.mcsPstPro.set(True);\n self.bMaxTxt, self.delGTxt, self.delDeltaTxt = u\"maxB:%s\" % self.bUnit, \"\\u0394G\\u20D7 (mT/m)\", \"\\u0394Delta (ms)\";\n self.varyDelTxt, self.varyGdTxt, self.constBTxt = u\"vary \\u0394\", \"vary G\\u20D7\", \"cnst B\";\n\n self.postProCal, self.initDsp = tk.BooleanVar(), tk.BooleanVar();\n self.postProCal.set(True), self.initDsp.set(True);\n self.varyDelGdB = None;\n\n self.bMkDiffMovie, self.openHdrTxt, self.bCalcT2 = tk.BooleanVar(), tk.BooleanVar(), tk.BooleanVar();\n\n self.bMkDiffMovie.set(False), self.openHdrTxt.set(False), self.bCalcT2.set (False);\n self.bMcsDataLoaded = None;\n\n self.constantDiffTime, self.bMpiPySigComp = tk.BooleanVar(), tk.BooleanVar();\n self.constantDiffTime.set(True), self.bMpiPySigComp.set(False);\n\n self.smallDel, self.bigDel, self.GdStep, self.nbVals = tk.DoubleVar(), tk.DoubleVar(), tk.DoubleVar(), tk.IntVar();\n self.smallDel.set(10.0), self.bigDel.set(100.0), self.GdStep.set(10.0), self.nbVals.set(10);\n\n self.grdDirFromZ = tk.DoubleVar();\n self.grdDirFromZ.set(90.);\n\n # to display max bVal for mcsPostProcess, ...\n self.gamma = 6.28318* 42577481.6;\n self.dGradNB, self.dGradNBL = None, []; # related to diffusion table, ...\n\n self.TE_ms, self.maxG = tk.DoubleVar(), tk.DoubleVar();\n\n # to read a text G-Waveform table (timw_sec, amplitude, duration_sec)\n self.bGWaveTblRead = None;\n self.gWaveFileName, self.dGWaveTbl = None, None;\n\n self.dirFileName = None;\n self.dataFileName = []; # mcs data file names, ...\n\n self.fInfo = None # file info populated from the function mPstP.selectDataFiles()\n\n self.btnPsP = None;\n #self.btnPsP_0 = None;\n self.cBtnPsP, self.cBtnPsP_2, self.lblPsP, self.entPsP, self.cBtnPsP_3 = [], [], [], [], [];\n self.btnPsP_4 = [];\n\n self.cBtnPsPVal, self.cBtnPsP_2Val, self.cBtnPsP_3Val, self.Btn4Val = [], [], [], [];\n # ========================MPI post processing Ends=================\n\n self.w0Wd, self.w0Ht, self.w0X, self.w0Y = None, None, None, None;\n\n # colors for checkButton\n self.fgClrC, self.bgClrC = \"yellow\", \"blue\"; # \"black\";\n self.fgClrB, self.bgClrB = \"navy\", \"gray64\";\n self.hlBgColor = \"green\";\n\n self.colorMap4Dsp = eval(\"cm.gray\");\n\n if _platform in [\"linux\", \"linux2\"]:\n self.titleFont = tkFont.Font(family=\"Ariel\", weight='bold', size=10);\n # self.titleFont = tkFont.Font(family=\"Fixedsys\", weight='bold', size=10);\n self.procFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=12);\n self.lableFont = tkFont.Font(family=\"Ariel\", weight='bold', size=10);\n self.buttonFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=10);\n self.buttonFontSm= tkFont.Font(family=\"Helvetica\", weight='bold', size= 8);\n self.inputFont = tkFont.Font(family=\"Monaco\", weight='bold', size=10);\n self.boldFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=10);\n self.optFnt, self.optFntSz = \"Fixedsys\", 9;\n self.infoFont = tkFont.Font(family=\"Helvetica\", weight='bold', size= 9);\n winWd, winHt = 750, 400;\n elif _platform in [\"win32\", \"windows\"]:\n self.titleFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=10);\n self.procFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=10);\n self.lableFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=11);\n self.buttonFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=10);\n self.buttonFontSm= tkFont.Font(family=\"Helvetica\", weight='bold', size= 9);\n self.inputFont = tkFont.Font(family=\"Monaco\", weight='bold', size= 9);\n self.boldFont = tkFont.Font(family=\"Helvetica\", weight='bold', size= 9);\n self.infoFont = tkFont.Font(family=\"Helvetica\", weight='bold', size= 8);\n self.optFnt, self.optFntSz = \"Fixedsys\", 9;\n winWd, winHt = 750, 400;\n elif _platform == \"darwin\":\n self.titleFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=14);\n self.procFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=10);\n self.lableFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=12);\n self.buttonFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=11);\n self.buttonFontSm= tkFont.Font(family=\"Helvetica\", weight='bold', size=10);\n self.inputFont = tkFont.Font(family=\"Monaco\", weight='bold', size=12);\n self.boldFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=12);\n self.boldFont13 = tkFont.Font(family=\"Helvetica\", weight='bold', size=13);\n self.infoFont = tkFont.Font(family=\"Helvetica\", weight='bold', size= 9);\n self.optFnt, self.optFntSz = \"Monaco\", 11;\n winWd, winHt = 800, 400;\n\n self.initGUI (); # Initialize GUI frame\n\n def gui_makeInfoFrame (self):\n fgClrB, bgClrB = 'navy', 'black';\n hlBgColor = \"green\"\n if _platform in [\"win32\", \"windows\", \"linux\"]:\n titleFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=10);\n cmdFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=12);\n else:\n titleFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=14);\n cmdFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=16);\n\n self.hLine01 = ttk.Separator(self.frm0, orient=\"horizontal\"\n ).grid(row=0, column=1, columnspan=5, pady=1, sticky=\"ewns\")\n\n # png -> gif: http://image.online-convert.com/convert-to-gif\n self.myLogo = tk.PhotoImage(file=\"myLogoBGW.gif\"); # *.gif with ~ 72x100 pixels\n self.myLogo = self.myLogo.subsample(5,5); # only integer fraction\n #self.myLogo = self.myLogo.zoom(4, 4);\n\n self.myLogo_L = tk.Label(self.frm0, image=self.myLogo, bg='gray')\n self.myLogo_L.image = self.myLogo; # become transparent without this reference\n self.myLogo_L.grid (row=0, rowspan=4, column=0, sticky=\"ewns\");\n\n self.myLogo_L.bind (\"\",\n lambda event: getHelp(\"Yes for further HELP\",\n hlp.myGreeting_L, hlp.myGreeting_L2, \"no\"));\n\n # The selBox: Use a StringVar to access the selector's value\n # Look for \"unicode, Hangul Syllables\" on the web\n titleText = (\"myMCS:\\tv.08122021: -Click HERE for contact & more info\");\n titleText += (\"\\n\\t- postProcess MCS position data from MPI Server\");\n\n widB = 6 if sys.platform in [\"darwin\"] else 12;\n self.myGreeting_L = tk.Label(self.frm0, text=titleText, font=self.boldFont,\n fg='white', bg='black', padx=4, pady=0, width=78,\n anchor=\"w\", justify=tk.LEFT);\n self.myGreeting_L.grid (row=0, rowspan=1, column=1, columnspan=6, sticky=\"wens\");\n\n # color map for display\n self.colorMap4Dsp_L = tk.Label(self.frm0, text=\"colorMap\",\n borderwidth=1, relief=\"groove\",\n font=self.boldFont, fg=\"white\", bg=\"navy\");\n self.colorMap4Dsp_L.grid (row=1, column=1, sticky=\"wesn\");\n self.colorMap4Dsp = tk.StringVar();\n self.colorMap4Dsp_O = tk.OptionMenu(self.frm0, self.colorMap4Dsp,\n \"gray\", \"Blues\", \"Greens\", \"Reds\",\n \"OrRd\", \"YlGn_r\", \"hot\", \"cool\",\n \"jet\", \"rainbow\", \"ocean\",\n \"magma\", \"plasma\", \"BuGn\");\n self.colorMap4Dsp_O.grid (row=1, column=2, sticky=\"wesn\");\n self.colorMap4Dsp_O.config(font=(self.optFnt,self.optFntSz),bg='green',fg='black',width=6);\n self.colorMap4Dsp_O['menu'].config(font=(self.optFnt,self.optFntSz),bg='yellow',fg='black');\n self.colorMap4Dsp.set(\"magma\");\n mgv.colorMap4Dsp = eval(\"cm.\" + self.colorMap4Dsp.get());\n if bShowToolTip:\n tip = ToolTip(self.colorMap4Dsp_O, \"{colorMap4Dsp} Choose a colormap for display.\");\n\n self.colorMap4Dsp_L.bind (\"\", lambda event:\n getHelp(\"Yes for further HELP\", hlp.hlpColorMap_L, \"no\"));\n\n # Pulse sequence type (PST) for post-processing\n self.plsSeqType_L = tk.Label (self.frm0, text=\"plsSeqType\",\n borderwidth=1, relief=\"groove\",\n font=self.boldFont, fg=\"white\", bg=\"navy\");\n self.plsSeqType_L.grid (row=1, column=3, sticky=\"wesn\");\n self.plsSeqType = tk.StringVar();\n self.plsSeqType_O = tk.OptionMenu(self.frm0, self.plsSeqType, \"SpinEcho\", \"StimEcho\");\n self.plsSeqType_O.grid (row=1, column=4, sticky=\"wesn\");\n self.plsSeqType_O.config(font=(self.optFnt,self.optFntSz),bg='green',fg='black',width=6);\n self.plsSeqType_O['menu'].config(font=(self.optFnt,self.optFntSz),bg='yellow',fg='black');\n self.plsSeqType.set(\"StimEcho\");\n mgv.plsSeqType = self.plsSeqType.get();\n if bShowToolTip:\n tip = ToolTip(self.plsSeqType_O, \"{plsSeqType} Choose a pulse sequence type for post-processing.\");\n\n self.plsSeqType_L.bind (\"\", lambda event:\n getHelp(\"Yes for further HELP\", hlp.hlpPlsSeqType_L, \"no further help\", \"no\"));\n\n # T2 values of H2O in ics, mls, ecs in ms\n self.t2Vals_L = tk.Label (self.frm0, text=\"T\\u2082: (IA.ML.EA) (ms)\",\n borderwidth=1, relief=\"groove\",\n font=self.boldFont, fg=\"white\", bg=\"navy\");\n self.t2Vals_L.grid (row=1, column=5, sticky=\"wesn\");\n self.t2Vals = StringVar ( );\n self.t2Vals_E = tk.Entry (self.frm0, textvariable=self.t2Vals, width=8,\n state=\"disabled\", justify=tk.CENTER, bd=1);\n self.t2Vals_E.grid (row=1, column=6, sticky=\"ewns\", pady=1);\n self.t2Vals.set (\"100.10.100\");\n self.t2Vals_E.bind (\"\", lambda event: self.updateProtFtn(\"t2Vals_E\"));\n self.t2Vals_L.bind (\"\", lambda event:\n getHelp(\"Yes for further HELP\", hlp.hlpT2Vals_L, \"no further help\", \"no\"));\n\n # global functions\n self.UP_B = tk.Button(self.frm0, text='update protocol', font=self.titleFont,\n fg=\"darkgreen\", bg=bgClrB, padx=3, width=widB,\n command=lambda: self.updateProtFtn(\"UP_B\"));\n self.UP_B.grid (row=2, column=1, columnspan=2,sticky=\"wens\");\n\n self.CF_B = tk.Button(self.frm0, text='close\\n all figs.', font=self.titleFont,\n fg=fgClrB, bg=bgClrB, padx=3, width=widB,\n command=lambda: self.closeFigFtn());\n self.CF_B.grid (row=2, column=3, sticky=\"wens\");\n\n self.EP_B = tk.Button(self.frm0, text='enter pdb\\nh/hlp, c/cont., l/lst, j/jmp', padx=3,\n width=widB, fg=\"darkorange\", font=self.titleFont,\n command=self.enterPdbFtn);\n self.EP_B.grid (row=2, column=4, columnspan=2, sticky=\"ewns\");\n self.EP_B.bind (\"\",\n lambda event: getHelp(\"Yes for further HELP\", hlp.hlpEP, \"no further help\"));\n\n self.QT_B = tk.Button(self.frm0, text='Q U I T', padx=3,\n width=widB - 2*(_platform in [\"linux\"]),\n fg=\"red\", font=self.titleFont,\n command=self.quitFtn);\n self.QT_B.grid (row=2, column=6, columnspan=1, sticky=\"ewns\");\n\n self.hLine02 = ttk.Separator(self.frm0, orient=\"horizontal\").grid(row=3, column=1, columnspan=6, pady=1, sticky=\"ewns\");\n\n def quitFtn (self):\n global ugv, udv;\n\n myMod1 = \"quitFtn\";\n\n root.destroy();\n sys.exit(); # self.quit;\n\n def gui_mcsPstPro(self, row1=0, col1=1):\n myMod12 = \"guiPsP\";\n\n fgclr, fgclr_entry, bgclr = 'white', 'black', 'gray32'\n fgClrC, bgClrC = self.fgClrC, self.bgClrC;\n\n try:\n self.btnPsP.destroy();\n for btn in self.cBtnPsP: btn.destroy();\n except: pass;\n\n def InItDisplay (row1=1, col1=2):\n row1 += 1; # for hLine31\n self.cBtnPsP_2.append(self.guiChkButton(self.frm3, \"opnHdr\", self.openHdrTxt,\n fg1=fgClrC, bg1=bgClrC, row1=row1, col1=col1,\n wid1 = 7 + 2*(_platform in [\"linux\"]),\n state1=\"disabled\",\n indOn=0, def1=True, cmd1=None));\n self.cBtnPsP_2Val.append ( self.openHdrTxt );\n\n self.cBtnPsP_2Val.append ( self.bCalcT2 );\n self.bCalcT2.set (False);\n\n col1 += 1;\n self.btnPsP_2 = self.guiButton(self.frm3, 'display Geometry',\n fontB=self.buttonFont,\n row1=row1, col1=col1, colSpan=3,\n fg1=self.fgClrB, hlBgClr1=\"green\",\n cmd1=lambda: self.loadNDisplayFtn(\"EKJ\"));\n\n # read-only\n col1 += 3;\n self.TE_ms_L = tk.Label (self.frm3, text=\"TE (ms)\",\n borderwidth=1, relief=\"groove\",\n fg=fgclr, bg=bgclr,\n font=self.boldFont);\n self.TE_ms_L.grid (row=row1, column=col1, sticky=\"wesn\");\n\n self.TE_ms = tk.DoubleVar();\n self.TE_ms_E = tk.Entry (self.frm3, textvariable=self.TE_ms, width=7,\n font=self.inputFont,\n state=\"normal\", justify=tk.CENTER, bd=1);\n self.TE_ms_E.grid (row=row1, column=col1+1, sticky=\"ewns\", pady=1);\n self.TE_ms.set(20); # s/m2\n self.TE_ms_E.bind (\"\", lambda event: self.updateProtFtn(\"TE_ms_E\"));\n mgv.TE_ms = self.TE_ms.get();\n\n def postProCal (row1=2, col1=1):\n '''\n try:\n self.btnPsP_4.destroy();\n\n #for btn in self.cBtnPsP_3: btn.destroy();\n for lbl in self.lblPsP: lbl.destroy();\n for ent in self.entPsP: ent.destroy();\n except: pass;\n '''\n\n row1 += 1; # hLine31\n if sys.platform in [\"win32\", \"windows\", \"linux\"]:\n lbls = [u\"\\u2220 (G\\u20D7, e\\u2081)\", \"\\u03B4 (ms)\", \"\\u0394 (ms)\", \"nbVals\"];\n else: lbls = [u\"\\u2220 (G\\u20D7, \\u00EA\\u2081)\", \"\\u03B4 (ms)\", \"\\u0394 (ms)\", \"nbVals\"];\n\n guiVarPst = [self.grdDirFromZ, self.smallDel, self.bigDel, self.nbVals];\n\n for i in range (len(lbls) - 1):\n self.lblPsP.append(self.guiLabel(self.frm3, lbls[i], row1=row1 + i/3,\n col1=col1 + 1 + 2*(i%3), fontL=self.lableFont,\n fg1=fgclr, bg1=bgclr, wid1=6));\n entTmp, varTmp = self.guiEntry (self.frm3, guiVarPst[i], wid1=7, bd1=2,\n row1=row1 + i//3, col1=col1 + 2 + 2*(i%3),\n fg1=fgclr_entry);\n\n self.entPsP.append (entTmp);\n guiVarPst[i] = varTmp;\n\n # nbVals separately added\n self.lblPsP.append(self.guiLabel(self.frm3, lbls[3], row1=5,\n col1=2, fontL=self.lableFont,\n fg1=fgclr, bg1=bgclr, wid1=7));\n entTmp, varTmp = self.guiEntry (self.frm3, guiVarPst[3], wid1=7, bd1=2,\n row1=5, col1=3, fg1=fgclr_entry);\n\n self.entPsP.append (entTmp);\n guiVarPst[3] = varTmp;\n\n # new additional row\n self.maxG_L = tk.Label (self.frm3, text=\"max G (mT/m)\",\n borderwidth=1, relief=\"groove\",\n fg=fgclr, bg=bgclr, font=self.boldFont);\n self.maxG_L.grid (row=4, column=1, columnspan=2, sticky=\"wesn\");\n\n self.maxG = tk.DoubleVar();\n self.maxG_E = tk.Entry (self.frm3, textvariable=self.maxG, width=7,\n font=self.inputFont,\n state=\"normal\", justify=tk.CENTER, bd=1);\n self.maxG_E.grid (row=4, column=3, sticky=\"ewns\", pady=1);\n self.maxG.set(80); # s/m2\n self.maxG_E.bind (\"\", lambda event: self.updateProtFtn(\"maxG_E\"));\n mgv.maxG = self.maxG.get();\n\n\n # new row: 5\n self.bMx_DelG_DelDelta = tk.StringVar();\n self.bMx_DelG_DelDelta_O = tk.OptionMenu(self.frm3, self.bMx_DelG_DelDelta,\n self.bMaxTxt, self.delGTxt, self.delDeltaTxt);\n self.bMx_DelG_DelDelta_O.config (font=(self.optFnt,self.optFntSz),bg='green',fg='black',width=8);\n self.bMx_DelG_DelDelta_O['menu'].config (font=(self.optFnt,self.optFntSz),bg='yellow',fg='black');\n self.bMx_DelG_DelDelta_O.grid (row=5, column=4, sticky=\"wesn\")\n self.bMx_DelG_DelDelta_O.config (highlightbackground=bgclr);\n self.bMx_DelG_DelDelta.set(self.bMaxTxt);\n self.bMx_DelG_DelDelta_O.bind (\"\",\n lambda event: self.updateProtFtn(\"bMx_DelG_DelDelta_O\"));\n\n # increment Delta\n self.DeltaStep = tk.DoubleVar();\n self.DeltaStep_E = tk.Entry(self.frm3, textvariable=self.DeltaStep, width=7,\n font=self.inputFont,\n state=\"normal\", justify=tk.CENTER, bd=1);\n self.DeltaStep_E.grid (row=5, column=5, sticky=\"ewns\", pady=1);\n self.DeltaStep_E.bind (\"\", lambda event: self.updateProtFtn(\"DeltaStep_E\"));\n self.DeltaStep.set(100); # in ms unit, ...\n mgv.DeltaStep = 1e-3*self.DeltaStep.get();\n\n # icrement Gd, ...\n self.GdStep = tk.DoubleVar();\n self.GdStep_E = tk.Entry(self.frm3, textvariable=self.GdStep, width=7,\n font=self.inputFont,\n state=\"normal\", justify=tk.CENTER, bd=1);\n self.GdStep_E.grid (row=5, column=5, sticky=\"ewns\", pady=1);\n self.GdStep.set(10.0); # mT/m unit\n self.GdStep_E.bind (\"\", lambda event: self.updateProtFtn(\"GdStep_E\"));\n mgv.GdStep = self.GdStep.get();\n\n self.bMax = tk.IntVar();\n # self.bMax = tk.DoubleVar();\n self.bMax_E = tk.Entry (self.frm3, textvariable=self.bMax, width=7,\n font=self.inputFont,\n state=\"normal\", justify=tk.CENTER, bd=1);\n self.bMax_E.grid (row=5, column=5, columnspan=2, sticky=\"ewns\", pady=1);\n self.bMax.set(10000); # s/m2\n self.bMax_E.bind (\"\", lambda event: self.updateProtFtn(\"bMax_E\"));\n mgv.bMax = 1e6*self.bMax.get();\n\n # option to select constant delta, Gd, or B (with constant delta)\n self.varyDelGdB = tk.StringVar();\n self.varyDelGdB_O = tk.OptionMenu(self.frm3, self.varyDelGdB,\n self.varyDelTxt, self.varyGdTxt, self.constBTxt);\n self.varyDelGdB_O.config (font=(self.optFnt,self.optFntSz),bg='green',fg='black',width=5);\n self.varyDelGdB_O['menu'].config (font=(self.optFnt,self.optFntSz),bg='yellow',fg='black');\n self.varyDelGdB_O.grid (row=5, column=1, sticky=\"wesn\")\n self.varyDelGdB_O.config (highlightbackground=bgclr);\n self.varyDelGdB.set(self.varyGdTxt);\n\n mgv.varyDelTxt, mgv.varyGdTxt, mgv.constBTxt = self.varyDelTxt, self.varyGdTxt, self.constBTxt;\n\n # A column is push out in above for loop\n #self.cBtnPsP_3.append(self.guiChkButton(self.frm3, \" ... \", self.bMpiPySigComp,\n # row1=5, col1=6, fg1=fgClrC, bg1=bgClrC, indOn=0,\n # wid1=7, def1=True, cmd1=postProCal));\n col1 += 1;\n\n guiVarPst += [self.GdStep, self.varyDelGdB, False]; #, self.constantDiffTime];\n\n # self.cBtnPsP_3Val.append ( False );\n\n self.btnPsP_4 = self.guiButton(self.frm3, 'R U N',row1=5, col1=7, wid1=8,\n state1=\"disabled\",\n fontB=\"darkgreen\", # self.buttonFont,\n fg1=self.fgClrB, hlBgClr1=self.hlBgColor,\n cmd1=lambda: mPstP.mcsPostPro(guiVarPst, mgv,\n mPstP.readFiles(False, mgv, self.fInfo)).run(mgv));\n\n # separator, ...\n self.hLine31 = ttk.Separator(self.frm3, orient=\"horizontal\"\n ).grid(row=0, column=1, columnspan=7, pady=1, sticky=\"ewns\")\n\n dialogTxt = \"Select 3 files (initGeometry*.txt, *_mcs.txt, *_mcs.dat)\";\n lblTxt = 'select data: [gm*.dat, gm*_mcs.txt, gm*_mcs.dat]\\n';\n lblTxt += '... (1). geometry, (2). *_mcs.dat, (3). *_mcs.txt ...';\n self.btnPsP = tk.Button(self.frm3, text=lblTxt, fg=self.fgClrB, font=self.buttonFont,\n wid = 8 + 2*(_platform == \"linux\"),\n command=lambda: self.selectDataFiles(dialogTxt));\n self.btnPsP.grid (row=1, column=1, columnspan=7, sticky=\"ewns\");\n self.btnPsP.config (highlightbackground = self.hlBgColor);\n if bShowToolTip:\n ToolTip(self.btnPsP, \"{self.btnPsP} Load input geometry and MCS data/text files.\");\n\n # gradient-waveform table\n self.cBtnPsP.append(self.guiChkButton(self.frm3, \"dsp Geom\", self.initDsp,\n row1=2, col1=1, fg1=fgClrC, bg1=bgClrC, indOn=0,\n def1=True, state1=\"disabled\", cmd1=InItDisplay));\n\n self.cBtnPsP.append(self.guiChkButton (self.frm3, \"calc PostP\", self.postProCal,\n row1=3, col1=1, pady1=2,fg1=fgClrC, bg1=bgClrC,\n indOn=0, state1=\"disabled\",\n cmd1=postProCal));\n\n self.postProCal.set ( True );\n\n self.cBtnPsPVal.append ( self.initDsp );\n self.cBtnPsPVal.append ( self.postProCal );\n\n #\n InItDisplay();\n postProCal ();\n self.UP_B.invoke();\n\n self.hLine32 = ttk.Separator(self.frm3, orient=\"horizontal\"\n ).grid(row=6, column=1, columnspan=7, pady=1, sticky=\"ewns\")\n\n def selectDataFiles(self, dialogTxt):\n myMod1 = \"loadFiles\";\n\n txtC = mPstP.colored(\"[{:^10}:L{:0>4}]:Q: \".format(myMod1, getCurrLineNo()),\n \"green\", attrs=[\"bold\"]);\n\n print (txtC + dialogTxt);\n\n geomFileName, hdrFileName, datFileName = None, None, None;\n plt.close(\"all\");\n\n self.fInfo = mPstP.selectDataFiles(dialogTxt);\n\n err = False;\n if None not in self.fInfo:\n mgv.hdrFileName = hdrFileName = os.path.split(self.fInfo[0])[1];\n mgv.datFileName = datFileName = os.path.split(self.fInfo[1])[1];\n mgv.geomFileName = geomFileName = os.path.split(self.fInfo[2])[1];\n\n hdrName = geomFileName.split(\".\")[0];\n if not (hdrName in hdrFileName and hdrName in datFileName):\n err = True;\n txt = (\"Seleched files are are not consistant.\");\n txtC += mPstP.colored(txt, \"red\", attrs=[\"bold\"]);\n\n mgv.tmStr = self.tmStr = time.strftime('%m/%d/%Y', time.gmtime(os.path.getmtime(self.fInfo[0]))).split(\"/\");\n else:\n err = True;\n txt = (\"Selection of MCS file set not complete.\");\n txtC += mPstP.colored(txt, \"red\", attrs=[\"bold\", \"blink\"]);\n print (txtC);\n\n txt += \"\\nSelect correct files.\";\n res = mBox.showinfo(\"File-selection Error\", txt, icon=\"warning\");\n\n # check if , ...\n if None in [mgv.geomFileName, mgv.hdrFileName, mgv.datFileName] or err: return False;\n # else: self.btnPsP_4.config(state=\"normal\");\n\n txtC = mPstP.colored(\"[{:^10}:L{:0>4}]: : \".format(myMod1, getCurrLineNo()),\n \"green\", attrs=[\"bold\"]);\n txt = \"Selected data files = \\n\";\n txt += \" geomDat = %s\\n\" % (geomFileName);\n txt += \" mcsData = %s\\n\" % (datFileName );\n txt += \" mcsText = %s\\n\" % (hdrFileName );\n print (txtC + txt);\n\n # load and display data, ...\n self.loadNDisplayFtn(\"loadFiles\");\n\n success = self.updateProtFtn(\"%s\" % myMod1);\n\n if None in self.fInfo:\n err = True;\n txt = (\"No file has been selected.\");\n txtC += mPstP.colored(txt, \"red\", attrs=[\"bold\"]);\n print (txtC);\n\n if err:\n txt += \"\\nSelect correct files.\";\n res = mBox.showinfo(\"fileSelection Error\", txt, icon=\"warning\");\n\n def guiRadioButton (self, frm, txt, var, row1=0, col1=0, fg1=None, bg1=None, cmd1=None):\n rbtn = tk.Radiobutton(frm, text=txt, variable = var, value = 1,\n fg=fg1, bg=bg1, relief=tk.RIDGE);\n rbtn.grid (row=row1, column=col1);\n return rbtn;\n\n def guiButton (self, frm, txt, row1=0, col1=0, rowSpan=1, colSpan=1, wid1=8,\n state1=\"normal\",\n fg1=None, bg1=None, fontB=None, padx1=None, pady1=None,\n hlBgClr1=None, cmd1=None):\n if fontB == None: fontB = \"-weight bold\";\n\n cbtn = tk.Button(frm, text=txt, fg=fg1, bg=bg1, relief=tk.RIDGE,\n padx=padx1, pady=pady1, state=state1,\n command=cmd1, font=fontB, width=wid1);\n cbtn.grid (row=int(row1), column=int(col1), rowspan=rowSpan, columnspan=colSpan, sticky=\"ewns\");\n cbtn.config (highlightbackground = hlBgClr1);\n\n return cbtn;\n\n def guiChkButton(self, frm, txt, var, indOn=0, row1=0, col1=0, rowSpan=1,\n wid1=8, colSpan=1, fg1=None, bg1=None, fontC=None,\n padx1=1, pady1=2, def1=True, cmd1=None, state1=\"normal\"):\n if fontC == None: fontC = \"-weight bold\";\n\n chkBtn = tk.Checkbutton(frm, text=txt, variable=var, indicatoron=indOn,\n padx=padx1, pady=pady1, state=state1,\n fg=fg1, bg=bg1, command=cmd1, relief=tk.RIDGE,\n font=fontC, width=wid1);\n var.set(def1);\n\n chkBtn.grid(row=int(row1), column=int(col1), rowspan=rowSpan, columnspan=colSpan, sticky=\"ewns\");\n\n return chkBtn;\n\n def guiLabel(self, frm, txt, row1=0, col1=0, fg1= None, bg1=None, fontL=None, wid1=8, padx1=2, pady1=0):\n if fontL == None: fontL = \"-weight bold\";\n\n lbl = tk.Label(frm, text=txt, fg=fg1, bg=bg1, padx=4, relief=tk.RIDGE,\n font=fontL, width=wid1);\n lbl.grid (row=int(row1), column=int(col1), sticky=\"ewns\");\n return lbl;\n\n def guiEntry (self, frm, txtVar, row1=0, col1=0, fg1=None, bg1=None, font1=None, wid1=5, padx1=1, bd1=2):\n if font1 == None: font1 = self.inputFont;\n\n ent = tk.Entry(frm, textvariable=txtVar, fg=fg1, bg=bg1, bd=bd1, relief=tk.RIDGE,\n font = self.inputFont, justify=tk.CENTER, width=wid1);\n ent.grid(row = int(row1), column=int(col1), sticky=\"ewns\");\n ent.bind (\"\", lambda event: self.updateProtFtn(txtVar));\n\n return ent, txtVar;\n\n def initGUI(self):\n self.master = root;\n\n eachFrm = 'gray48';\n\n self.frmHgt0, self.frmHgt3, self.frmHgt4 = 108, 152, 64;\n\n self.w0Wd = 648;\n self.w0Ht = self.frmHgt0 + self.frmHgt3 + self.frmHgt4 + 13;\n\n self.w0X, self.w0Y = self.master.winfo_screenwidth() - self.w0Wd, 0;\n self.master .geometry(\"%dx%d+%d+%d\" % (self.w0Wd, self.w0Ht, self.w0X, self.w0Y))\n\n titleTxt = ('Monte-Carlo Simulation of Water Diffusion:');\n if _platform not in [\"win32\", \"windows\"]:\n hostName = platform.node();\n loginName = os.getlogin();\n titleTxt += (' {OS: %s, %s@' % (platform.system(), loginName));\n if _platform in [\"darwin\"]:\n titleTxt += ('%s}' % hostName[:hostName.index(\".\")]);\n elif _platform in [\"linux\"]: titleTxt += ('%s}' % hostName);\n\n self.master.title( titleTxt);\n\n # root.title(\"Monte-Carlo Simulation of water diffusion\") # Set the window title\n self.master.minsize(width=self.w0Wd, height=self.w0Ht);\n self.master.maxsize(width=self.w0Wd, height=self.w0Ht);\n self.master.resizable(width=False, height=False);\n\n # tk.Frame 0 is for creating a geometry\n self.frm0 = tk.Frame(self.master, width=self.w0Wd-6, height=self.frmHgt0,\n bg=eachFrm, relief=tk.RIDGE);\n self.frm0.grid(row=0, column=0, padx=3, pady=2);\n self.frm0.grid_propagate(False);\n\n #tk.Frame 3 is for doing a postprocessing\n self.frm3 = tk.Frame(self.master, width=self.w0Wd-6, height=self.frmHgt3,\n bg=eachFrm, relief=tk.RIDGE);\n self.frm3.grid(row=1, column=0, padx=3, pady=2);\n self.frm3.grid_propagate(False);\n\n #tk.Frame 4 is for doing a postprocessing\n self.frm4 = tk.Frame(self.master, width=self.w0Wd-6, height=self.frmHgt4,\n bg=eachFrm, relief=tk.RIDGE);\n self.frm4.grid(row=2, column=0, padx=3, pady=2);\n self.frm4.grid_propagate(False);\n\n self.master.configure(bg=\"darkred\"); # set the window background color\n\n def closeFigFtn (self):\n plt.close(\"all\");\n\n def enterPdbFtn (self):\n txtC = mPstP.colored(\"Entered debugging mode. h/help, c/continue, q/quit.\", \"red\", attrs=[\"bold\"]);\n print (txtC);\n pdb.set_trace();\n\n def loadNDisplayFtn (self, txtIn):\n myMod1 = \"loadNDisplayFtn\";\n\n mPstP.loadNDisplay(mgv, [self.bMkDiffMovie, self.openHdrTxt],\n mPstP.readFiles(False, mgv, self.fInfo));\n\n if self.mcsPstPro.get():\n mgv.bMcsDataLoaded = self.bMcsDataLoaded = True;\n else: mgv.bMcsDataLoaded = self.bMcsDataLoaded = False;\n\n # set these values and display on MCS window, ..\n cPrint(\"[{:^10}:L{:0>4}]: : mcs data successfully loaded:\".format(myMod1, getCurrLineNo()),\n \"green\", attrs=[\"bold\"]);\n\n def nullFtn (self, txtIn):\n myMod1 = \"Null Button\";\n\n txtC = mPstP.colored(\"[{:^10}:L{:0>4}]: : \".format(myMod1, getCurrLineNo()), \"green\", attrs=[\"bold\"]);\n print (txtC + \" %s NOT USED. \" % txtIn);\n\n def updateProtFtn (self, txtIn):\n myMod1 = \"updateProt\"\n\n txtC = mPstP.colored(\"[{:^10}:L{:0>4}]: : \".format(myMod1, getCurrLineNo()),\n \"green\", attrs=[\"bold\"]);\n print (txtC + \"update-protocol invoked by {%s}.\" % txtIn);\n\n if self.bMcsDataLoaded:\n if 2*self.smallDel.get() > mgv.dtDiff:\n self.smallDel.set(mgv.dtDiff/2);\n\n diffTime = (self.smallDel.get() + self.bigDel.get());\n if mgv.dtDiff < diffTime:\n txt = (\"%.1f < (%.1f + %.1f)\" % (mgv.dtDiff, self.smallDel.get(), self.bigDel.get()));\n txt += (\"\\n\\t--> Reduce gradient duration and separation.\");\n print (txtC + txt);\n\n self.bigDel.set(mgv.dtDiff - self.smallDel.get());\n\n self.mcsPstPro.set ( True );\n self.initDsp.set ( True );\n self.postProCal.set( True );\n\n # update checkButton fg and bg colors, ...\n self.cBtnPsPVal [0] = self.initDsp;\n self.cBtnPsPVal [1] = self.postProCal;\n # self.cBtnPsPVal [2] = False;\n\n if self.nbVals.get() < 2: self.nbVals.set(2);\n\n if None in [mgv.geomFileName, mgv.hdrFileName, mgv.datFileName]:\n self.btnPsP_4.config(state=\"disabled\");\n else: self.btnPsP_4.config(state=\"normal\");\n\n if self.postProCal.get():\n self.cBtnPsP_2Val[0] = self.bMkDiffMovie;\n self.cBtnPsP_2Val[1] = self.openHdrTxt;\n\n if self.bMax.get() <= 0.0: self.bMax.set (1);\n\n if self.bMx_DelG_DelDelta.get() == self.bMaxTxt:\n self.GdStep_E.grid_remove ();\n self.DeltaStep_E.grid_remove();\n self.bMax_E.grid ();\n elif self.bMx_DelG_DelDelta.get() == self.delGTxt:\n self.bMax_E.grid_remove ();\n self.DeltaStep_E.grid_remove();\n self.GdStep_E.grid ();\n elif self.bMx_DelG_DelDelta.get() == self.delDeltaTxt:\n self.GdStep_E.grid_remove();\n self.bMax_E.grid_remove ();\n self.DeltaStep_E.grid ();\n\n GPS.config(text=gpsLblTxt + \"\\nD W I\");\n\n # otherwise, error is raised in mPstP.sigCalc\n if self.bMax.get() == 0: self.bMax.set(10);\n\n mgv.bMax = 1e6*self.bMax.get();\n mgv.GdStep = self.GdStep.get();\n mgv.DeltaStep = 1e-3*self.DeltaStep.get();\n self.delGTxt = \"\\u0394G (mT/m)\" if self.constantDiffTime.get() else \"G\\u20D7 (mT/m)\";\n\n self.bMx_DelG_DelDelta_O['menu'].delete(0, 'end'); # First, delete all list, ...\n for choice in tuple((self.bMaxTxt, self.delGTxt, self.delDeltaTxt)):\n self.bMx_DelG_DelDelta_O['menu'].add_command(label=choice,\n command=tk._setit(self.bMx_DelG_DelDelta, choice));\n\n if self.bMcsDataLoaded and mgv.dGWaveTbl is not None:\n if (mgv.dGWaveTbl[:,0] + mgv.dGWaveTbl[:,1]).max() > 1e-3 * mgv.dtDiff:\n mgv.dGWaveTbl = None;\n self.bGWaveTblRead = False;\n\n txtC = mPstP.colored(\"[{:^10}:L{:0>4}]: : \".format(myMod1, getCurrLineNo()), \"green\", attrs=[\"bold\",\"blink\"]);\n txtC2 = mPstP.colored(\"The G-Table %s is unloaded.\" % mgv.gWaveFileName, \"red\", attrs=[\"bold\",\"blink\"]);\n print (txtC + txtC2 + \" Check the times.\");\n else:\n self.varyDelGdB.set( self.varyGdTxt );\n\n self.bigDel.set (1e3 * (mgv.dGWaveTbl[:,0].max() - mgv.dGWaveTbl[:,0].min()));\n self.smallDel.set(1e3 * (mgv.dGWaveTbl[:,1].max() ));\n\n # print out the maximum bVal, ..\n if self.mcsPstPro.get():\n mgv.Deltas = np.zeros( self.nbVals.get() + 1);\n mgv.nbVals = self.nbVals.get() + 1;\n\n dGradNB = [];\n mgv.smallDelta = 1e-3*self.smallDel.get();\n\n delta, Delta = 1e-3*self.smallDel.get(), 1e-3*self.bigDel.get();\n dirAng = self.grdDirFromZ.get()/57.299;\n\n if self.varyDelGdB.get() in [self.constBTxt, self.varyDelTxt]:\n self.entPsP[2].config (state=\"disabled\");\n\n maxDelStep = (mgv.dtDiff - 2*self.smallDel.get())/self.nbVals.get();\n\n if self.DeltaStep.get() > maxDelStep: self.DeltaStep.set( maxDelStep );\n\n mgv.Deltas[:] = 1e-3*self.DeltaStep.get()*np.arange(self.nbVals.get() + 1);\n\n # otherwise bVal becomes negative, ...\n mgv.Deltas[0] = 1e-3*self.smallDel.get();\n else: # b-value varies, with constant delta and Delta: self.varyGdTxt\n self.entPsP[2].config (state=\"normal\" );\n mgv.Deltas[:] = 1e-3*self.bigDel.get();\n\n # ....\n for k in range(self.nbVals.get() + 1):\n bValFct = 0.0;\n\n if self.varyDelGdB.get() == self.constBTxt: bValFct = 1.0;\n elif self.varyDelGdB.get() == self.varyGdTxt: bValFct = (k/self.nbVals.get())**2;\n else: bValFct = (mgv.Deltas[ k] - 1e-3*self.smallDel.get()/3) \\\n /(mgv.Deltas[-1] - 1e-3*self.smallDel.get()/3);\n\n if k==0: bValFct = 0.000001; # otherwise, error in mPstP, line 801\n\n dGradNB.append ((np.sin(dirAng), 0.0, np.cos(dirAng), bValFct));\n\n mgv.dGradNB = self.dGradNB = np.array(dGradNB).T;\n mgv.dGradNBL = self.dGradNBL = [(\"dir0\", self.dGradNB)];\n\n if self.varyDelGdB.get() != self.varyGdTxt:\n self.bigDel.set ( 1e3*mgv.Deltas[-1] ); # show the largest Delta\n # self.bigDel.set ( self.DeltaStep.get() );\n\n if len(self.dGradNBL) == 0 and self.dGradNB is not None:\n mgv.dGradNBL = self.dGradNBL = [(\"dir0\", self.dGradNB)];\n\n if len(self.dGradNBL) > 0:\n if self.dGradNBL[0][1].shape[1] != (self.nbVals.get() + 1): # for DTI simulation,\n mgv.dGradNBL = self.dGradNBL = [(\"dir0\", self.dGradNB)];\n\n # calc maximum B\n mgv.maxG = self.maxG.get();\n maxB = 1e-12*(mgv.gammaRad*mgv.smallDelta*self.maxG.get())**2 * 1e-3*(self.bigDel.get() - 1e-3*self.smallDel.get()/3);\n self.bMax.set(np.round(min(self.bMax.get(), maxB), 0));\n\n self.lblPsP[0].config(state=\"normal\" );\n self.entPsP[0].config(state=\"normal\" );\n\n # simulate for constant B with varying diffTime, ...\n if self.bMcsDataLoaded: # and self.bDGradDirRead:\n mgv.bMax = 1e6*self.bMax.get();\n delta, Delta = 1e-3*self.smallDel.get(), 1e-3*self.bigDel.get();\n if self.varyDelGdB.get() in [self.varyGdTxt, self.constBTxt]:\n GdStep = 1e3*np.sqrt(mgv.bMax/(Delta - delta/3)) \\\n /(self.gamma*delta*self.nbVals.get());\n elif self.varyDelGdB.get() == self.varyDelTxt:\n GdStep = 1e3*np.sqrt(mgv.bMax/(mgv.Deltas[-1] - delta/3))/(self.gamma*delta);\n\n self.GdStep.set ( np.around(GdStep, decimals=4) );\n\n txtC = mPstP.colored(\"[{:^10}:L{:0>4}]: : \".format(myMod1, getCurrLineNo()), \"green\", attrs=[\"bold\"]);\n txt = \"maximum bVal = %.1f %s\" % (self.bMax.get(), self.bUnit);\n\n print (txtC + txt);\n\n\n mgv.colorMap4Dsp = eval(\"cm.\" + self.colorMap4Dsp.get());\n mgv.plsSeqType = self.plsSeqType.get();\n if mgv.plsSeqType == \"StimEcho\": self.TE_ms.set(2*self.smallDel.get() + 5.12);\n else: self.TE_ms.set(self.smallDel.get() + self.bigDel.get() + 5.12);\n\n mgv.TE_ms = self.TE_ms.get();\n\n self.bigDel.set(np.round(max(self.bigDel.get(), self.smallDel.get() + 5.12), 2));\n\n if self.mcsPstPro.get(): self.t2Vals_E.config(state= \"normal\" );\n else: self.t2Vals_E.config(state=\"disabled\");\n\n t2ValsStr = self.t2Vals.get().replace(\",\",\".\").replace(\" \",\".\")\n self.t2Vals.set(t2ValsStr);\n mgv.icsT2, mgv.mlsT2, mgv.ecsT2 = np.array(self.t2Vals.get().split(\".\")[0:3], dtype=np.double);\n\n # END of updateProtFtn\n\n# pop up a help message box, ..\ndef getHelp (*args):\n global bYesNo;\n\n def popUpMessage (titleTxt, messageTxt):\n # popUpWin = tk.Tk(); # to change the display font, but not working, ...\n # popUpWin.option_add('*Dialog.msg.width', 50); # window wider\n # popUpWin.option_add('*font', self.inputFont);\n res = mBox.showinfo(titleTxt, messageTxt, icon=\"warning\");\n # popUpWin.option_clear();\n return res;\n\n argc = len(args);\n titleTxt, msgTxt = args[0:2];\n defAns = args[argc - 1];\n\n if argc > 2:\n bYesNo = mBox.askyesno(titleTxt, msgTxt, default=defAns, icon=\"question\");\n if bYesNo: popUpMessage(\"HELP\", args[2]);\n else: bYesNo = mBox.showinfo(titleTxt, msgTxt);\n\n return bYesNo;\n\ndef main_MCS():\n global GPS, gpsLblTxt;\n\n GUI = GUI_MCS();\n\n fgClr, bgClr = 'white', 'darkgreen';\n fgClrB, bgClrB = 'navy', 'black';\n hlBgColor = \"green\"\n\n if sys.version_info < (3, 6):\n procFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=15);\n titleFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=14);\n cmdFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=16);\n infoFontB = tkFont.Font(family=\"Helvetica\", weight='bold', size=12);\n infoFont = tkFont.Font(family=\"Helvetica\", size=12);\n else:\n if _platform in [\"win32\", \"windows\"]:\n procFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=11);\n titleFont = tkFont.Font(family=\"Helvetica\", weight='bold', size= 9);\n infoFont = tkFont.Font(family=\"Helvetica\", size=11);\n elif _platform in [\"linux\"]:\n procFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=11);\n titleFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=10);\n infoFont = tkFont.Font(family=\"Helvetica\", size=12);\n else:\n procFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=12);\n titleFont = tkFont.Font(family=\"Helvetica\", weight='bold', size= 9);\n infoFont = tkFont.Font(family=\"Helvetica\", size=12);\n\n cmdFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=11);\n infoFontB = tkFont.Font(family=\"Helvetica\", weight='bold', size= 8);\n\n GI = GUI.gui_makeInfoFrame ();\n\n wid1 = 9 if (_platform in [\"win32\", \"windows\", \"linux\"]) else 12;\n wid11 = wid1 if _platform not in [\"win32\", \"windows\"] else 6;\n GPS = tk.Label (GUI.frm3, text=\"MCS pstProc\", font=procFont,\n bg='black', fg='white', padx=4, pady=2,\n width=11, justify=tk.CENTER, relief=tk.RIDGE);\n GPS.grid (row=0, column=0, rowspan=6, sticky=\"ewns\");\n GPS.bind(\"\",\n lambda event: getHelp(\"Yes for further HELP\", hlp.mPstP, \"no\"));\n\n GUI.gui_mcsPstPro();\n\n # extra label to show information\n extra_L = tk.Label (GUI.frm4, text=\"N O T E\", font=procFont, # infoFontB,\n fg='white', bg='gray20', padx=4, pady=2,\n width=11 - 4*(_platform in [\"linux\"]),\n justify=tk.CENTER, relief=tk.RIDGE);\n extra_L.grid (row=0, rowspan=2, column=0, sticky=\"ewns\");\n\n extra1_L = tk.Label(GUI.frm4, text=hlp.hlpEX01, font=infoFont,\n fg='black', bg='gray68', padx=4, pady=2, width=38,\n justify=tk.LEFT, anchor=\"w\", relief=tk.RIDGE);\n extra1_L.grid (row=0, rowspan=2, column=1, columnspan=4, sticky=\"ewns\");\n\n extra2_L = tk.Label(GUI.frm4, text=hlp.hlpEX02, font=infoFont,\n fg='black', bg='gray68', padx=4, pady=2, width=38,\n justify=tk.LEFT, anchor=\"w\", relief=tk.RIDGE);\n extra2_L.grid (row=0, rowspan=2, column=5, columnspan=4, sticky=\"ewns\");\n\n root.mainloop();\n\n root.destroy ();\n\n return GUI;\n\n# Following lines are excuted, ...\nif __name__ == '__main__':\n hlp = HELP.helpTxt();\n mgv = MGV.mcsVars ();\n\n GUI = main_MCS();\n\n","sub_path":"mcsGUI.py","file_name":"mcsGUI.py","file_ext":"py","file_size_in_byte":46201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"42820464","text":"import gc\nimport re\nimport sys\nimport time\nimport jieba\nimport string\nimport codecs\nimport pickle\nimport hashlib\nimport os.path\nimport datetime\nimport numpy as np\nimport pandas as pd\nimport lightgbm as lgb\nfrom collections import Counter\nfrom sklearn.metrics import f1_score\nfrom sklearn.decomposition import PCA\nfrom sklearn.metrics import roc_auc_score\n\n\n\n\n\n######################################## 清洗数据 ########################################\nimport numpy as np\nimport pandas as pd\n\n\n\ndata_path = r'C:/Users/csw/Desktop/python/liangzi/data/'\n\n\nentbase = pd.read_csv(data_path + '1entbase.csv')\nalter = pd.read_csv(data_path + '2alter.csv')\nbranch = pd.read_csv(data_path + '3branch.csv')\ninvest = pd.read_csv(data_path + '4invest.csv')\nright = pd.read_csv(data_path + '5right.csv')\nproject = pd.read_csv(data_path + '6project.csv')\nlawsuit = pd.read_csv(data_path + '7lawsuit.csv')\nbreakfaith = pd.read_csv(data_path + '8breakfaith.csv')\nrecruit = pd.read_csv(data_path + '9recruit.csv')\nqualification = pd.read_csv(data_path + '10qualification.csv',encoding='GB2312')\ntest = pd.read_csv(data_path + 'evaluation_public.csv')\ntrain = pd.read_csv(data_path + 'train.csv')\n\nprint('将feature name转换为小写')\ndef conver2lower(data):\n new_columns = []\n for name in data.columns:\n new_columns.append(name.lower())\n data.columns = new_columns\n data.rename(columns={'eid': 'id'}, inplace=True)\n return data\n\nentbase = conver2lower(entbase)\nalter = conver2lower(alter)\nbranch = conver2lower(branch)\ninvest = conver2lower(invest)\nright = conver2lower(right)\nproject = conver2lower(project)\nlawsuit = conver2lower(lawsuit)\nbreakfaith = conver2lower(breakfaith)\nrecruit = conver2lower(recruit)\nqualification = conver2lower(qualification)\ntest = conver2lower(test)\ntrain = conver2lower(train)\n\ndef replace(s):\n if s is np.nan:\n return s\n if '美元' in s:\n return float(s.replace('美元', '').replace('万元', '').replace('万', '')) * 6.5\n if '港' in s:\n return float(s.replace('港', '').replace('币', '').replace('万元', '').replace('万', '')) * 0.85\n\n return float(s.replace('万元','').replace('人民币','').replace('万', '').replace('(单位:)', ''))\ndef get_area(s):\n if '美元' in s:\n return 2\n if '港币' in s:\n return 1\n return 0\n\nprint('数据清洗...')\nalter['altbe'] = alter['altbe'].apply(replace)\nalter['altaf'] = alter['altaf'].apply(replace)\nalter['alterno'].replace('A_015','15',inplace=True)\nqualification['begindate'] = qualification['begindate'].apply(lambda x: x.replace('年','-').replace('月',''))\nqualification['expirydate'] = qualification['expirydate'].apply(lambda x: x.replace('年','-').replace('月','') if type(x) is str else x)\nbreakfaith['fbdate'] = breakfaith['fbdate'].apply(lambda x: x.replace('年','-').replace('月',''))\nbreakfaith['sxenddate'] = breakfaith['sxenddate'].apply(lambda x: x.replace('年','-').replace('月','') if type(x) is str else x)\nlawsuit['lawdate'] = lawsuit['lawdate'].apply(lambda x: x.replace('年','-').replace('月',''))\nrecruit['pnum'] = recruit['pnum'].apply(lambda x: x.replace('若干','').replace('人','') if type(x) is str else x)\ntrain.rename(columns={'target':'label'},inplace=True)\n\nprint('覆盖原来数据')\nentbase.to_csv(data_path + '1entbase.csv',index=False,encoding='utf-8')\nalter.to_csv(data_path + '2alter.csv',index=False,encoding='utf-8')\nbranch.to_csv(data_path + '3branch.csv',index=False,encoding='utf-8')\ninvest.to_csv(data_path + '4invest.csv',index=False,encoding='utf-8')\nright.to_csv(data_path + '5right.csv',index=False,encoding='utf-8')\nproject.to_csv(data_path + '6project.csv',index=False,encoding='utf-8')\nlawsuit.to_csv(data_path + '7lawsuit.csv',index=False,encoding='utf-8')\nbreakfaith.to_csv(data_path + '8breakfaith.csv',index=False,encoding='utf-8')\nrecruit.to_csv(data_path + '9recruit.csv',index=False,encoding='utf-8')\nqualification.to_csv(data_path + '10qualification.csv',index=False,encoding='utf-8')\ntest.to_csv(data_path + 'evaluation_public.csv',index=False,encoding='utf-8')\ntrain.to_csv(data_path + 'train.csv',index=False,encoding='utf-8')\n\n#################################### 构造特征 #######################################\n\nglobal data_path\ncache_path = 'F:/liangzi_cache/'\nnew = False\n\n# 获取阈值\ndef get_threshold(preds):\n preds_temp = sorted(preds,reverse=True)\n n = sum(preds) # 实际正例个数\n m = 0 # 提交的正例个数\n e = 0 # 正确个数的期望值\n f1 = 0 # f1的期望得分\n for threshold in preds_temp:\n e += threshold\n m += 1\n f1_temp = e/(m+n)\n if f1>f1_temp:\n break\n else:\n f1 = f1_temp\n print('阈值为:{}'.format(threshold))\n print('提交正例个数为:{}'.format(m-1))\n print('期望得分为:{}'.format(f1*2))\n return [(1 if (pred>threshold) else 0) for pred in preds]\n\n# 合并节约内存\ndef concat(L):\n result = None\n for l in L:\n if result is None:\n result = l\n else:\n result[l.columns.tolist()] = l\n return result\n\n# 分组标准化\ndef grp_standard(data,key,names):\n for name in names:\n mean_std = data.groupby(key, as_index=False)[name].agg({'mean': 'mean',\n 'std': 'std'})\n data = data.merge(mean_std, on=key, how='left')\n data[name] = ((data[name]-data['mean'])/data['std']).fillna(0)\n data[name] = data[name].replace(-np.inf, 0)\n data.drop(['mean','std'],axis=1,inplace=True)\n return data\n\n# 分组归一化\ndef grp_normalize(data,key,names,start=0):\n for name in names:\n max_min = data.groupby(key,as_index=False)[name].agg({'max':'max',\n 'min':'min'})\n data = data.merge(max_min,on=key,how='left')\n data[name] = (data[name]-data['min'])/(data['max']-data['min'])\n data[name] = data[name].replace(-np.inf, start)\n data.drop(['max','min'],axis=1,inplace=True)\n return data\n\n# 分组排序\ndef grp_rank(data,key,names,ascending=True):\n for name in names:\n data.sort_values([key, name], inplace=True, ascending=ascending)\n data['rank'] = range(data.shape[0])\n min_rank = data.groupby(key, as_index=False)['rank'].agg({'min_rank': 'min'})\n data = pd.merge(data, min_rank, on=key, how='left')\n data['rank'] = data['rank'] - data['min_rank']\n data[names] = data['rank']\n data.drop(['rank'],axis=1,inplace=True)\n return data\n\n\n# 基础特征\ndef get_base_feat(stat,data,data_key):\n def id_convert(x):\n if 'p' in x:\n return -1\n if 's' in x:\n return 0 if int(x[1:])<500000 else 1\n else:\n return 0 if int(x) < 500000 else 1\n entbase = pd.read_csv(data_path + '1entbase.csv').fillna(-1)\n # stat_temp = stat.merge(entbase,on='id',how='left')\n feat = data.merge(entbase,on='id',how='left')\n feat['hy_count'] = feat['hy'].map(stat['hy'].value_counts())\n feat['etype_count'] = feat['etype'].map(stat['etype'].value_counts())\n feat['ienum'] = feat['inum'] - feat['enum']\n feat['rgyear'] = 2020 - feat['rgyear']\n # feat['zczb2'] = feat['zczb'] * (1.14 ** feat['rgyear'])\n feat['finzb2'] = feat['finzb'] / (feat['zczb'] + 0.1)\n feat['mpnum2'] = feat['mpnum'] / (feat['zczb'] + 0.1)\n feat['inum2'] = feat['inum'] / (feat['zczb'] + 0.1)\n feat['fstinum2'] = feat['fstinum'] / (feat['zczb'] + 0.1)\n feat['tzinum2'] = feat['tzinum'] / (feat['zczb'] + 0.1)\n feat['sumnum'] = feat[['mpnum','inum','fstinum','tzinum']].sum(axis=1)\n hy = pd.get_dummies(feat['hy'], prefix='hy')\n feat = pd.concat([feat,hy],axis=1)\n etype = pd.get_dummies(feat['etype'], prefix='etype')\n feat = pd.concat([feat, etype], axis=1)\n feat['id_feat'] = feat['id'].apply(id_convert)\n feat.fillna(0,inplace=True)\n feat.drop(['id','label'],axis=1,inplace=True)\n return feat\n\n# alter特征\ndef get_alter_feat(data,data_key):\n alter = pd.read_csv(data_path + '2alter.csv')\n alter['altdate'] = alter['altdate'].apply(lambda x:(2016-int(x[:4]))*12 - int(x[-2:]))\n alter.sort_values('altdate', ascending=True, inplace=True)\n n_alter = alter.groupby('id',as_index=False)['alterno'].agg({'n_alter':'size'})\n alterno = pd.get_dummies(alter['alterno'], prefix='alterno')\n alterno = pd.concat([alter[['id']], alterno], axis=1)\n alterno = alterno.groupby(['id'], as_index=False).sum()\n alter_first = alter.drop_duplicates('id',keep='first').rename(columns={'altdate':'altdate_first'})\n alter_last = alter.drop_duplicates('id', keep='last').rename(columns={'altdate':'altdate_last'})\n # alterno_time = alter.drop_duplicates(['id','alterno'], keep='last')[['id','alterno','altdate']]\n # alterno_time = alterno_time.set_index(['id','alterno']).unstack()\n # alterno_time.columns = alterno_time.columns.droplevel(0)\n # alterno_time = alterno_time.add_prefix('alterdate_').reset_index()\n # alter_money = alter[~alter['altbe'].isnull()].drop_duplicates('id', keep='first')\n # alter_money['alter_money'] = alter_money['altaf'] - alter_money['altbe']\n # alter_money['alter_rate'] = alter_money['alter_money'] / (alter_money['altbe']+0.1)\n feat = data.merge(n_alter, on='id', how='left').fillna(0)\n feat = feat.merge(alterno, on='id', how='left').fillna(0)\n # feat = feat.merge(alterno_time, on='id', how='left').fillna(0)\n feat = feat.merge(alter_first[['id', 'alterno', 'altdate_first']], on='id', how='left').fillna(-1)\n feat = feat.merge(alter_last[['id', 'altdate_last']], on='id', how='left').fillna(-1)\n # feat = feat.merge(alter_money[['id', 'alter_money', 'alter_rate','altbe']],on='id', how='left').fillna(-100000)\n feat.drop(['id', 'label'], axis=1, inplace=True)\n return feat\n\n# branch特征\ndef get_branch_feat(data,data_key):\n branch = pd.read_csv(data_path + '3branch.csv')\n branch['branch_active_year'] = branch['b_endyear'] - branch['b_reyear']\n feat = branch.groupby('id').agg({'b_endyear': {'n_branch': 'size',\n 'n_end_branch': 'count',\n 'last_end_branch': 'max',\n 'median_end_branch': 'median'},\n 'ifhome': {'n_home_branch': 'sum'},\n 'b_reyear': {'last_start_branch': 'max',\n 'first_start_branch': 'min'},\n 'branch_active_year': {'branch_active_year':'mean'}})\n feat.columns = feat.columns.droplevel(0)\n feat['id'] = feat.index\n feat['n_active_branch'] = feat['n_branch'] - feat['n_end_branch']\n feat['n_outer_branch'] = feat['n_branch'] - feat['n_home_branch']\n feat['active_branch_rate'] = feat['n_active_branch'] / (feat['n_branch'] + 0.1)\n feat['home_brach_rate'] = feat['n_home_branch'] / (feat['n_branch'] + 0.1)\n feat = data.merge(feat,on='id',how='left')\n feat.drop(['id', 'label'], axis=1, inplace=True)\n return feat\n\n# invest特征\ndef get_invest_feat(data,data_key):\n invest = pd.read_csv(data_path + '4invest.csv')\n # invest = invest[invest['id'] != invest['bteid']]\n train = pd.read_csv(data_path + 'train.csv')\n id_label_dict = dict(zip(train['id'].values,train['label'].values))\n invest['btlabel'] = invest['bteid'].map(id_label_dict)\n invest['idlabel'] = invest['id'].map(id_label_dict)\n n_invest = invest.groupby('id',as_index=False)['id'].agg({'n_invest':'count'})\n mean_btbl = invest.groupby('id', as_index=False)['btbl'].agg({'mean_btbl': 'mean'})\n sum_btbl = invest.groupby('id', as_index=False)['btbl'].agg({'sum_btbl': 'sum'})\n n_home_invest = invest.groupby('id', as_index=False)['ifhome'].agg({'n_home_invest': 'sum'})\n n_negitive_invest = invest.groupby('id', as_index=False)['btlabel'].agg({'n_negitive_invest': 'sum'})\n n_negitive_invest2 = invest.groupby('id', as_index=False)['btendyear'].agg({'n_negitive_invest2': 'count'})\n n_negitive_invested = invest.groupby('bteid', as_index=False)['idlabel'].agg({'n_negitive_invested': 'sum'})\n n_negitive_invested.rename(columns={'bteid':'id'},inplace=True)\n last_invest = invest.groupby('id',as_index=False)['btyear'].agg({'last_invest':'max'})\n last_negitive_invest = invest.groupby('id', as_index=False)['btendyear'].agg({'last_negitive_invest': 'max'})\n bt_invest = invest[['bteid','btyear','btendyear','btbl']].rename(columns={'bteid':'id'})\n bt_invest = bt_invest.groupby('id',as_index=False).max()\n feat = data.merge(n_invest, on='id', how='left')\n feat = feat.merge(n_home_invest, on='id', how='left')\n feat = feat.merge(mean_btbl, on='id', how='left')\n feat = feat.merge(sum_btbl, on='id', how='left')\n feat = feat.merge(n_negitive_invest, on='id', how='left')\n feat = feat.merge(n_negitive_invest2, on='id', how='left')\n feat = feat.merge(n_negitive_invested, on='id', how='left')\n feat = feat.merge(last_invest, on='id', how='left')\n feat = feat.merge(last_negitive_invest, on='id', how='left')\n feat = feat.merge(bt_invest, on='id', how='left')\n feat['home_invest_rate'] = feat['n_home_invest'] / (feat['n_invest']+0.1)\n feat.drop(['id', 'label'], axis=1, inplace=True)\n return feat\n\n# right特征\ndef get_right_feat(data, data_key):\n right = pd.read_csv(data_path + '5right.csv')\n nunique_right = right.groupby('id', as_index=False)['righttype'].agg({'nunique_right': 'nunique'})\n n_right1 = right[(right['askdate'] > '2012')].groupby('id', as_index=False)['righttype'].agg({'n_right1': 'count'})\n right['fbdate'] = right['fbdate'].apply(lambda x: x if x is np.nan else (2020 - int(x[:4])) * 12 - int(x[-2:]))\n right['weight'] = right['askdate'].apply(lambda x:0.5**(2015-int(x[:4])))\n right['askdate'] = right['askdate'].apply(lambda x: x if x is np.nan else (2020 - int(x[:4])) * 12 - int(x[-2:]))\n n_right = right.groupby('id', as_index=False)['id'].agg({'n_right': 'count'})\n n_right2 = right.groupby('id',as_index=False)['weight'].agg({'n_right2':'sum'})\n n_right3 = right.groupby('id', as_index=False)['fbdate'].agg({'n_right3': 'count'})\n righttype = pd.get_dummies(right['righttype'], prefix='righttype')\n righttype = pd.concat([right['id'], righttype], axis=1)\n righttype = righttype.groupby(['id'], as_index=False).sum()\n last_fbdate = right.groupby('id', as_index=False)['fbdate'].agg({'last_fbdate': 'min'})\n last_askdate = right.groupby('id', as_index=False)['askdate'].agg({'last_askdate': 'min'})\n feat = data.merge(n_right, on='id', how='left')\n feat = feat.merge(n_right2, on='id', how='left')\n feat = feat.merge(n_right3, on='id', how='left')\n feat = feat.merge(nunique_right, on='id', how='left')\n feat = feat.merge(n_right1, on='id', how='left')\n feat = feat.merge(righttype, on='id', how='left')\n feat = feat.merge(last_fbdate, on='id', how='left')\n feat = feat.merge(last_askdate, on='id', how='left')\n feat['n_right4'] = feat['n_right'] - feat['n_right3']\n feat.drop(['id', 'label'], axis=1, inplace=True)\n return feat\n\n# project特征\ndef get_project_feat(data, data_key):\n project = pd.read_csv(data_path + '6project.csv')\n project['djdate'] = project['djdate'].apply(lambda x: x if x is np.nan else (2020 - int(x[:4])) * 12 - int(x[5:7]))\n feat = project.groupby('id',as_index=False)['djdate'].agg({'n_project':'count',\n 'max_dfdate':'min',\n 'min_dfdate':'max',\n 'mean_dfdate': 'mean'})\n feat = data.merge(feat,on='id',how='left')\n feat.drop(['id', 'label'], axis=1, inplace=True)\n return feat\n\n\n# lawsuit特征\ndef get_lawsuit_feat(data, data_key):\n lawsuit = pd.read_csv(data_path + '7lawsuit.csv')\n lawsuit.drop_duplicates(['id','lawdate','lawamount'],inplace=True)\n n_lawsuit = lawsuit.groupby('id', as_index=False)['id'].agg({'n_lawsuit': 'size'})\n sum_lawsuit_money = lawsuit.groupby('id', as_index=False)['lawamount'].agg({'lawamount': 'sum'})\n lawsuit['lawdate'] = lawsuit['lawdate'].apply(lambda x:(2016-int(x[:4]))*12 - int(x[-2:]))\n last_lawsuit_date = lawsuit.groupby('id', as_index=False)['lawdate'].agg({'last_lawsuit_date': 'min'})\n feat = data.merge(n_lawsuit, on='id', how='left')\n feat = feat.merge(sum_lawsuit_money, on='id', how='left')\n feat = feat.merge(last_lawsuit_date, on='id', how='left')\n feat.drop(['id', 'label'], axis=1, inplace=True)\n return feat\n\n# breakfaith特征\ndef get_breakfaith_feat(data, data_key):\n breakfaith = pd.read_csv(data_path + '8breakfaith.csv')\n breakfaith['fbdate'] = pd.to_datetime(breakfaith['fbdate']).apply(lambda x: (2016-x.year)*12 + x.month)\n breakfaith.drop_duplicates(['id', 'fbdate'], inplace=True)\n n_breakfaith = breakfaith.groupby('id', as_index=False)['id'].agg({'n_breakfaith': 'size'})\n last_fbdate = breakfaith.groupby('id', as_index=False)['fbdate'].agg({'last_fbdate': 'min'})\n first_fbdate = breakfaith.groupby('id', as_index=False)['fbdate'].agg({'last_fbdate': 'max'})\n feat = data.merge(n_breakfaith, on='id', how='left')\n feat = feat.merge(last_fbdate, on='id', how='left')\n feat = feat.merge(first_fbdate, on='id', how='left')\n feat.drop(['id', 'label'], axis=1, inplace=True)\n return feat\n\n# recruit特征\ndef get_recruit_feat(data, data_key):\n recruit = pd.read_csv(data_path + '9recruit.csv')\n recruit['recdate'] = recruit['recdate'].apply(lambda x: (2016 - int(x[:4])) * 12 - int(x[-2:]))\n # breakfaith.drop_duplicates(['id', 'fbdate'], inplace=True)\n n_recruit = recruit.groupby('id', as_index=False)['id'].agg({'n_recruit': 'size'})\n nunique_recruit = recruit.groupby('id', as_index=False)['poscode'].agg({'nunique_recruit': 'nunique'})\n sum_recruit_people = recruit.groupby('id', as_index=False)['pnum'].agg({'sum_recruit_people': 'sum',\n 'max_pnum':'max',\n 'mean_pnum':'mean'})\n last_lawsuit_date = recruit.groupby('id', as_index=False)['recdate'].agg({'last_lawsuit_date': 'min'})\n wzcode = recruit.groupby(['id','wzcode'])['pnum'].sum().unstack().reset_index()\n feat = data.merge(n_recruit, on='id', how='left')\n feat = feat.merge(nunique_recruit, on='id', how='left')\n feat = feat.merge(sum_recruit_people, on='id', how='left')\n feat = feat.merge(last_lawsuit_date, on='id', how='left')\n feat = feat.merge(wzcode, on='id', how='left')\n feat.drop(['id', 'label'], axis=1, inplace=True)\n return feat\n\ndef get_qualification_feat(data, data_key):\n qualification = pd.read_csv(data_path + '10qualification.csv', encoding='gb2312')\n n_qualification = qualification.groupby('id',as_index=False)['addtype'].agg({'n_qua':'count'})\n feat = data.merge(n_qualification,on='id',how='left')\n return feat\n\n\n\n# 二次处理特征\ndef second_feat(result):\n return result\n\n# 获取样本标签\ndef get_labels(data):\n train = pd.read_csv(r'C:/Users/csw/Desktop/python/liangzi/data/concat_data/train.csv')\n label_dict = dict(zip(train['id'].values,train['label'].values))\n data['label'] = data['id'].map(label_dict)\n return data\n\n# 构造训练集\ndef make_set(stat,data,path):\n global data_path\n data_path = path\n t0 = time.time()\n data_key = hashlib.md5(data.to_string().encode()).hexdigest()\n print('数据key为:{}'.format(data_key))\n result_path = cache_path + 'feat_set_{}.hdf'.format(data_key)\n if os.path.exists(result_path) & 0:\n result = pd.read_hdf(result_path, 'w')\n else:\n data.index = list(range(len(data.index)))\n entbase = pd.read_csv(data_path + '1entbase.csv').fillna(0)\n stat = stat.merge(entbase,on='id',how='left')\n print('开始构造特征...')\n base_feat = get_base_feat(stat, data,data_key) # 添加基础特征\n alter_feat = get_alter_feat(data,data_key) # alter特征\n branch_feat = get_branch_feat(data,data_key) # branch特征\n invest_feat = get_invest_feat(data,data_key) # invest特征\n right_feat = get_right_feat(data, data_key) # right特征\n project_feat = get_project_feat(data, data_key) # project特征\n lawsuit_feat = get_lawsuit_feat(data, data_key) # lawsuit特征\n breakfaith_feat = get_breakfaith_feat(data, data_key) # breakfaith特征\n recruit_feat = get_recruit_feat(data, data_key) # recruit特征\n # qualification_feat = get_qualification_feat(data, data_key)# qualification特征\n\n result = concat([data[['id']],base_feat,alter_feat ,branch_feat,invest_feat,right_feat,\n project_feat,lawsuit_feat,breakfaith_feat,recruit_feat\n ])\n result = get_labels(result)\n result = second_feat(result)\n result.to_hdf(result_path, 'w', complib='blosc', complevel=5)\n print('特征矩阵大小:{}'.format(result.shape))\n print('生成特征一共用时{}秒'.format(time.time() - t0))\n return result\n\n\n##################################### lgb重采样预测 ############################\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.cross_validation import KFold\n\nprint('读取train数据...')\ndata_path = 'C:/Users/csw/Desktop/python/liangzi/data/'\ncache_path = 'F:/liangzi_cache/'\n\ntrain = pd.read_csv(data_path + 'train.csv')\ntest = pd.read_csv(data_path + 'evaluation_public.csv')\ntest['label'] = np.nan\n\nprint('构造特征...')\ntrain_feat_temp = make_set(train,train,data_path)\ntest_feat = make_set(train,test,data_path)\nsumbmission = test_feat[['id']].copy()\n\npredictors = [f for f in train_feat_temp.columns if f not in ['id','label','enddate']]\n\ntrain_feat = train_feat_temp.append(train_feat_temp[train_feat_temp['prov']==11])\ntrain_feat = train_feat.append(train_feat_temp[train_feat_temp['prov']==11])\nprint('开始CV 5折训练...')\nscores = []\nt0 = time.time()\nmean_score = []\ntrain_preds = np.zeros(len(train_feat))\ntest_preds11 = np.zeros(len(test_feat))\nkf = KFold(len(train_feat), n_folds = 5, shuffle=True, random_state=520)\nfor i, (train_index, test_index) in enumerate(kf):\n lgb_train = lgb.Dataset(train_feat[predictors].iloc[train_index], train_feat['label'].iloc[train_index])\n lgb_test = lgb.Dataset(train_feat[predictors].iloc[test_index], train_feat['label'].iloc[test_index])\n\n params = {\n 'task': 'train',\n 'boosting_type': 'gbdt',\n 'objective': 'binary',\n 'metric': 'auc',\n 'num_leaves': 150,\n 'learning_rate': 0.01,\n 'subsample': 0.7,\n 'colsample_bytree': 0.7,\n 'feature_fraction': 0.8,\n 'bagging_fraction': 0.95,\n 'bagging_freq': 5,\n 'verbose': -1,\n 'seed': 100,\n }\n gbm = lgb.train(params, lgb_train, 900)\n train_preds_sub = gbm.predict(train_feat[predictors].iloc[test_index])\n train_preds[test_index] += train_preds_sub\n test_preds_sub = gbm.predict(test_feat[predictors])\n test_preds11 += test_preds_sub\n\n score = roc_auc_score(train_feat['label'].iloc[test_index],train_preds_sub)\n scores.append(score)\n print('第{0}轮mae的得分: {1}'.format(i + 1, score))\ntest_preds11 = test_preds11/5\nprint('auc平均得分: {}'.format(np.mean(scores)))\nprint('CV训练用时{}秒'.format(time.time() - t0))\n\nprint('开始CV 5折训练...')\ntrain_feat = train_feat_temp.append(train_feat_temp[(train_feat_temp['prov']==12)])\ntrain_feat = train_feat.append(train_feat_temp[(train_feat_temp['prov']==12)])\nscores = []\nt0 = time.time()\nmean_score = []\ntrain_preds = np.zeros(len(train_feat))\ntest_preds12 = np.zeros(len(test_feat))\nkf = KFold(len(train_feat), n_folds = 5, shuffle=True, random_state=520)\nfor i, (train_index, test_index) in enumerate(kf):\n lgb_train = lgb.Dataset(train_feat[predictors].iloc[train_index], train_feat['label'].iloc[train_index])\n lgb_test = lgb.Dataset(train_feat[predictors].iloc[test_index], train_feat['label'].iloc[test_index])\n\n params = {\n 'task': 'train',\n 'boosting_type': 'gbdt',\n 'objective': 'binary',\n 'metric': 'auc',\n 'num_leaves': 150,\n 'learning_rate': 0.01,\n 'subsample': 0.7,\n 'colsample_bytree': 0.7,\n 'feature_fraction': 0.8,\n 'bagging_fraction': 0.95,\n 'bagging_freq': 5,\n 'verbose': -1,\n 'seed': 100,\n }\n gbm = lgb.train(params, lgb_train, 900)\n train_preds_sub = gbm.predict(train_feat[predictors].iloc[test_index])\n train_preds[test_index] += train_preds_sub\n test_preds_sub = gbm.predict(test_feat[predictors])\n test_preds12 += test_preds_sub\n\n score = roc_auc_score(train_feat['label'].iloc[test_index],train_preds_sub)\n scores.append(score)\n print('第{0}轮mae的得分: {1}'.format(i + 1, score))\ntest_preds12 = test_preds12/5\nprint('auc平均得分: {}'.format(np.mean(scores)))\nprint('CV训练用时{}秒'.format(time.time() - t0))\n\ntest_feat['pred11'] = test_preds11\ntest_feat['pred12'] = test_preds12\ntest_feat['pred'] = test_feat.apply(lambda x: x.pred11 if x.prov==11 else x.pred12, axis=1)\npreds_scatter = get_threshold(test_feat['pred'].values)\nsubmission = pd.DataFrame({'EID':sumbmission['id'],'FORTARGET':preds_scatter,'PROB':1-test_feat['pred'].values})\nsubmission.to_csv(r'C:\\Users\\csw\\Desktop\\python\\liangzi\\submission\\sub{}.csv'.format(datetime.datetime.now().strftime('%Y%m%d_%H%M%S')), index=False, float_format='%.4f')\n\n\n\n\n\n\n\n\n\n\n################################## xgb重采样 ################################\nimport xgboost\n\nprint('读取train数据...')\ndata_path = 'C:/Users/csw/Desktop/python/liangzi/data/'\ncache_path = 'F:/liangzi_cache/'\n\ntrain = pd.read_csv(data_path + 'train.csv')\ntest = pd.read_csv(data_path + 'evaluation_public.csv')\ntest['label'] = np.nan\n\nprint('构造特征...')\ntrain_feat_temp = make_set(train,train,data_path)\ntest_feat = make_set(train,test,data_path)\nsumbmission = test_feat[['id']].copy()\n\ntrain_feat = train_feat_temp.append(train_feat_temp[train_feat_temp['prov']==11])\ntrain_feat = train_feat.append(train_feat_temp[train_feat_temp['prov']==11])\npredictors = train_feat.columns.drop(['id','label','enddate','hy_16.0', 'hy_91.0', 'hy_94.0'])\n\nprint('开始CV 5折训练...')\nscores = []\nt0 = time.time()\ntest_preds11 = np.zeros(len(test_feat))\nkf = KFold(len(train_feat), n_folds = 5, shuffle=True, random_state=520)\nfor i, (train_index, test_index) in enumerate(kf):\n xgb_train = xgboost.DMatrix(train_feat[predictors].iloc[train_index], train_feat['label'].iloc[train_index])\n xgb_eval = xgboost.DMatrix(test_feat[predictors])\n\n xgb_params = {\n \"objective\": \"reg:logistic\"\n , \"eval_metric\": \"auc\"\n , \"eta\": 0.01\n , \"max_depth\": 12\n , \"min_child_weight\": 10\n , \"gamma\": 0.70\n , \"subsample\": 0.76\n , \"colsample_bytree\": 0.95\n , \"alpha\": 2e-05\n , \"lambda\": 10\n }\n bst = xgboost.train(params=xgb_params,dtrain=xgb_train,num_boost_round=1200)\n test_preds_sub = bst.predict(xgb_eval)\n test_preds11 += test_preds_sub\n\ntest_preds11 = test_preds11/5\nprint('CV训练用时{}秒'.format(time.time() - t0))\n\nprint('开始CV 5折训练...')\ntrain_feat = train_feat_temp.append(train_feat_temp[(train_feat_temp['prov']==12)])\ntrain_feat = train_feat.append(train_feat_temp[(train_feat_temp['prov']==12)])\nt0 = time.time()\ntest_preds12 = np.zeros(len(test_feat))\nkf = KFold(len(train_feat), n_folds = 5, shuffle=True, random_state=520)\nfor i, (train_index, test_index) in enumerate(kf):\n xgb_train = xgboost.DMatrix(train_feat[predictors].iloc[train_index], train_feat['label'].iloc[train_index])\n xgb_eval = xgboost.DMatrix(test_feat[predictors])\n\n xgb_params = {\n \"objective\": \"reg:logistic\"\n , \"eval_metric\": \"auc\"\n , \"eta\": 0.01\n , \"max_depth\": 12\n , \"min_child_weight\": 10\n , \"gamma\": 0.70\n , \"subsample\": 0.76\n , \"colsample_bytree\": 0.95\n , \"alpha\": 2e-05\n , \"lambda\": 10\n }\n bst = xgboost.train(params=xgb_params, dtrain=xgb_train, num_boost_round=1200)\n test_preds_sub = bst.predict(xgb_eval)\n test_preds12 += test_preds_sub\n\ntest_preds12 = test_preds12/5\nprint('CV训练用时{}秒'.format(time.time() - t0))\n\ntest_feat['pred11'] = test_preds11\ntest_feat['pred12'] = test_preds12\ntest_feat['pred'] = test_feat.apply(lambda x: x.pred11 if x.prov==11 else x.pred12, axis=1)\npreds_scatter = get_threshold(test_feat['pred'].values)\nsubmission = pd.DataFrame({'EID':sumbmission['id'],'FORTARGET':preds_scatter,'PROB':test_feat['pred'].values})\nsubmission.to_csv(r'C:\\Users\\csw\\Desktop\\python\\liangzi\\submission\\sub{}.csv'.format(datetime.datetime.now().strftime('%Y%m%d_%H%M%S')), index=False)\n\n\n\n\n\n\n\n\n\n\n","sub_path":"liangzi/piupiu_fengxian.py","file_name":"piupiu_fengxian.py","file_ext":"py","file_size_in_byte":29148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"154904474","text":"# -*- coding: utf-8 -*-\n# Copyright © 2015-2017 Carl Chenet \n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see \n\n# CLI parsing\n'''CLI parsing'''\n\n# standard libraries imports\nfrom argparse import ArgumentParser\nimport os.path\nimport sys\n\nclass CliParse:\n '''CliParse class'''\n def __init__(self):\n '''Constructor for the CliParse class'''\n self.epilog = 'For more information: https://db2twitter.readthedocs.io'\n self.description = 'db2twitter automatically extracts fields from your database, use them to feed a template of tweet and send the tweet'\n self.main()\n\n def main(self):\n '''main of CliParse class'''\n parser = ArgumentParser(prog='db2twitter',\n description=self.description,\n epilog=self.epilog)\n parser.add_argument('pathtoconf', metavar='FILE', type=str,\n help='the path to the retweet configuration')\n parser.add_argument('-c', '--circle', action='store_true',\n default=False, help='circling the last tweets')\n parser.add_argument('--dry-run', dest='dryrun', action='store_true',\n default=False, help='simulate the execution, no tweet sent')\n self.cliargs = parser.parse_args()\n if not os.path.exists(self.cliargs.pathtoconf):\n sys.exit('the path you provided for db2twitter configuration file does not exist')\n if not os.path.isfile(self.cliargs.pathtoconf):\n sys.exit('the path you provided for db2twitter configuration is not a file')\n\n @property\n def args(self):\n '''return the cli arguments'''\n return self.cliargs\n","sub_path":"db2twitter/cliparse.py","file_name":"cliparse.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"503910991","text":"# Dictionaries\n# 1. Exercise 1\nphonebook_dict = {\n 'Alice': '703-493-1834',\n 'Bob': '857-384-1234',\n 'Elizabeth': '484-584-2923'\n}\nprint(\"Elizabeth's phone number is:\",phonebook_dict['Elizabeth'])\nphonebook_dict['Kareem'] = '938-489-1234'\nprint(\"Kareem's phone number is:\", phonebook_dict['Kareem'])\ndel phonebook_dict['Alice']\nphonebook_dict['Bob']='968-345-2345'\nprint(\"The phonebook dictionary consists of the following keys:\", phonebook_dict.keys())\nprint(\"The phonebook dictionary consists of the following values/numbers:\", phonebook_dict.values())\nprint(\"The phonebook dictionary consists of the following items:\", phonebook_dict.items())\n\n\n# Exercise 2: Nested Dictionaries\nramit = {\n 'name': 'Ramit',\n 'email': 'ramit@gmail.com',\n 'interests': ['movies', 'tennis'],\n 'friends': [\n {\n 'name': 'Jasmine',\n 'email': 'jasmine@yahoo.com',\n 'interests': ['photography', 'tennis']\n },\n {\n 'name': 'Jan',\n 'email': 'jan@hotmail.com',\n 'interests': ['movies', 'tv']\n }\n ]\n}\nprint(\"Ramit's email is:\",ramit['email'])\nprint(\"Ramit's first interest is:\", ramit['interests'][0])\nprint(\"Jasmine's email is:\",ramit['friends'][0]['email'])\nprint(\"The second of Jan's two interests is:\",ramit['friends'][1]['interests'][1])\n\n# Exercise 3: Letter Summary \n#s = \"banana\"\n\ndef letter_histogram(str1):\n dict = {}\n for n in str1: # treating the string as an array vs the second exercise where we are creating the array first\n keys = dict.keys()\n if n in keys:\n dict[n] += 1\n else:\n dict[n] = 1\n return dict\nprint(letter_histogram('banana'))\n\n## Need to review \n# Exercise 4: Word Summary \ndef word_histogram(text):\n text = text.lower()\n word_dict = {}\n word_list = text.replace('\\n', ' ').replace('.',' ').split(' ')\n for word in word_list:\n if word in word_dict:\n word_dict[word] += 1\n else: \n word_dict[word]=1\n return(word_dict)\n\nif __name__ ==\"__main__\": # this is to not allow running the function when the file is imported\n text = input(\"Please give me a sentence\")\n print(word_histogram(text))\n \n\n \n \n \n \n \n ","sub_path":"PythonPart3_Dic_IO.py","file_name":"PythonPart3_Dic_IO.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"499241027","text":"#!/usr/bin/env python\n# encoding: utf-8\n#\n# Copyright © 2014 deanishe@deanishe.net\n#\n# MIT Licence. See http://opensource.org/licenses/MIT\n#\n# Created on 2014-11-10\n#\n\n\"\"\"\n\"\"\"\n\nfrom __future__ import print_function, unicode_literals, absolute_import\n\nimport os\nimport subprocess\nimport sys\n\n\ndef pngcrush(filepath):\n \"\"\"Run file through `pngcrush` and return SHA1 hash\"\"\"\n name, ext = os.path.splitext(filepath)\n temppath = '{}.{}{}'.format(name, os.getpid(), ext)\n size_in = os.stat(filepath).st_size\n os.rename(filepath, temppath)\n cmd = [\n 'pngcrush',\n '-rem', 'allb',\n '-m', '10',\n '-q',\n '-reduce',\n temppath,\n filepath,\n ]\n subprocess.call(cmd)\n if os.path.exists(filepath) and os.path.exists(temppath):\n os.unlink(temppath)\n size_out = os.stat(filepath).st_size\n pc = (float(size_out) / size_in) * 100\n print('Optimised [{:4d}b / {:0.1f}%] `{}`'.format(\n size_out, pc, filepath), file=sys.stderr)\n\n\ndef main():\n if not len(sys.argv) == 2:\n print('Usage: optimise-pngs.py ')\n return 1\n\n rootdir = sys.argv[1]\n\n for root, dirnames, filenames in os.walk(rootdir):\n for filename in filenames:\n if not filename.lower().endswith('.png'):\n continue\n filepath = os.path.join(root, filename)\n pngcrush(filepath)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"extra/optimise-pngs.py","file_name":"optimise-pngs.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"333840144","text":"\"\"\"\nGeorgia Institute of Technology - CS1301\nHW08 - File I/O\n\"\"\"\n__author__ = \"\"\" Jade Law \"\"\"\n__collab__ = \"\"\" I worked on some of the problems with Rashmi Athavale. \"\"\"\n\n\"\"\"\nFunction name: get_roster\nParameters: filename (string)\nReturns: Read in a file of any name, but assume it is in the format stated\nabove. Go through every line and make a list of tuples of all the students\nin the class. The tuples will be formatted (FirstName, LastName). Return this\nlist. If the file is not found, catch a FileNotFoundError and return\n“File is not found.” \n\"\"\"\ndef get_roster(filename):\n tuplist = []\n num = 0\n try:\n file = open(filename,\"r\")\n lines = file.readlines()\n for line in lines:\n if \",\" in line:\n line = line.strip()\n num = line.find(\",\")\n tuplist.append((line[:num], line[num+2:]))\n else:\n continue\n file.close()\n return tuplist\n except FileNotFoundError:\n return \"File is not found.\"\n\n\"\"\"\nFunction name: get_average\nParameters: filename (string), student (string)\nReturns: A tuple with the name of the student (without the comma)\nand their average for their exams\nDescription: Read in a file of any name but in the format as what is stated\nabove. For the student passed into the function, find the student and take\nthe average of their test scores. The average will be a float rounded to two\ndecimals. Return this data as a tuple in the format\n(FirstName LastName, Average). If the student is not found in the file,\nreturn “Student not found in file.”. If the file is not found, catch a\nFileNotFoundError and return “File is not found.” \n\"\"\"\ndef get_average(filename, student):\n studentname = student.split()\n studentname = studentname[0] + \", \" + studentname [1] + \"\\n\"\n avgscore = 0\n try:\n file = open(filename,\"r\")\n lines = file.readlines()\n lines.append(\"\\n\")\n num = lines.index(studentname)\n lines = lines[num+1:]\n num = lines.index(\"\\n\")\n lines = lines[:num]\n for score in lines:\n num = score.find(\":\")\n score = float(score[num+2:])\n avgscore += score\n avgscore = round(avgscore/len(lines),2)\n file.close()\n return (student, avgscore)\n except FileNotFoundError:\n return \"File is not found.\"\n except ValueError:\n file.close()\n return \"Student not found in file.\"\n\n\"\"\"\nFunction name: get_all_averages\nParameters: filename (string)\nReturns: A dictionary representing a student as the key,\nand their average on exams as the value\nDescription: Read in a file of any name but in the format as what is stated\nabove. For every student, make an entry in a dictionary where their first\nname is the key and their average for their exams as the value. The file will\nnot have duplicate first names. The average will be a float rounded to two\ndecimals. If the file is not found, catch a FileNotFoundError and return\n“File is not found.” \n\"\"\"\n\ndef get_all_averages(filename):\n newdict = {}\n name = \"\"\n score = 0\n testcount = 0\n try:\n file = open(filename,\"r\")\n lines = file.readlines()\n for line in lines:\n if \",\" in line:\n name = line[:line.find(\",\")]\n elif \":\" in line:\n line = line.strip()\n score += float(line[line.find(\":\")+2:])\n testcount += 1\n else:\n newdict[name] = round(score/testcount,2)\n name = \"\"\n score = 0\n testcount = 0\n newdict[name] = round(score/testcount,2)\n file.close()\n return newdict\n except FileNotFoundError:\n return \"File is not found.\"\n\n\"\"\"\nFunction name: form_groups\nParameters: filename (string), current_student (string), num_per_team (int)\nReturns: None\nDescription: Read in a file of any name but in the format as what is stated\nabove. If the file is not found, catch a FileNotFoundError and return\n“File is not found.”. In a new file to write named group.txt, write\n“Team StudentName” on one line, replacing StudentName with the name of the\ncurrent student passed in. Go through the file to find the top X-1 number\nof students to add to your team, top being those with the highest averages.\nX is the number passed in representing the maximum number of people per team,\nand X-1 is the number of students selected on the team minus the current\nstudent. The current student can not be one of the students added to the team.\nIf there are less than X number of students, then everyone in the file is\nincluded on the team. However, the number of students in a team can not exceed\nX. If X == 1, then just write the header on the file “Team StudentName”\nand if X == 0, then do not write anything to the file. There will not be more\nthan one student with the same average. Each of these students will be a\nseparate line in the new file in the format of “Y) Student Name”, Y being a\nnumber in a list in increasing order, going from 1 - the maximum number of\npeople per team. The top student will be 1, and then it will go down in\ndecreasing top scores. The last line of the file should not have a “\\n”.\nThis function will return None unless there is an error.\n\"\"\"\ndef form_groups(filename, current_student, num_per_team):\n studentname = current_student.split()\n studentname = studentname[0] + \", \" + studentname [1] + \"\\n\"\n score = 0\n testcount = 0\n studict = {}\n try:\n stufile = open(filename,\"r\")\n lines = stufile.readlines()\n lines.append(\"\\n\")\n for line in lines:\n if \",\" in line:\n name = line\n if name != studentname:\n if \",\" in line:\n student = line.strip()\n student = student[:line.find(\",\")]\n student += line[line.find(\",\")+1:line.find(\"\\n\")]\n elif \":\" in line:\n line = line.strip()\n score += float(line[line.find(\":\")+2:])\n testcount += 1\n else:\n studict[student] = round(score/testcount,2)\n student = \"\"\n score = 0\n testcount = 0\n stufile.close()\n groupfile = open(\"group.txt\",\"w\")\n groupfile.write(\"Team \" + current_student + \"\\n\")\n if num_per_team-1 < len(studict.keys()):\n for i in range(num_per_team-1):\n highest = -1\n for name,avg in studict.items():\n if avg > highest:\n highest = avg\n highestName = name\n groupfile.write(\"{}) {}\\n\".format(i+1, highestName))\n del studict[highestName]\n elif num_per_team-1 > len(studict.keys()):\n for i in range(len(studict.keys())):\n highest = -1\n for name,avg in studict.items():\n if avg > highest:\n highest = avg\n highestName = name\n groupfile.write(\"{}) {}\\n\".format(i+1, highestName))\n del studict[highestName]\n groupfile.close()\n except FileNotFoundError:\n return \"File is not found.\"\n \nform_groups('files/CS1332.txt', 'Steve Jobs', 0)\n\"\"\"\nFunction name: zero_calorie_diet\nParameters: filename (string)\nReturns: A string representing the name of a dish\nDescription: Read in a file of any name but in the format as what is stated\nabove. If the file is not found, catch a FileNotFoundError and return\n“File is not found.”. You are trying to go on a low calorie diet, so parse\nthrough the file and return the name of the dish with the least amount of\ncalories. If two dishes have the same amount of calories, return the one that\noccurred first. \n\"\"\"\ndef zero_calorie_diet(filename):\n try:\n file = open(filename,\"r\")\n heading = file.readline()\n lines = file.readlines()\n lowcal = 10000000000000\n for line in lines:\n line = line.split(\",\")\n line[2] = int(line[2])\n line = tuple(line)\n (name, price, cal, cuisine) = (line[0], line[1], line[2], line[3])\n if cal < lowcal:\n lowcal = cal\n lowfood = name\n file.close()\n return lowfood\n except FileNotFoundError:\n return \"File is not found.\"\n\n\"\"\"\nFunction name: erica_menu\nParameters: filename (string), num_of_dishes (int)\nReturns: None\nDescription: Read in a file of any name but in the format as what is stated\nabove. If the file is not found, catch a FileNotFoundError and return\n“File is not found.”. Erica wants to put together an ideal menu for her,\nconsisting of the same or less number of items than the number passed in,\nbut never more. There are conditions, however, since she is broke and very\npicky. Parse through the file and create a menu for Erica by choosing the\ncheapest dishes. However, never put a Vegetarian dish on her menu, because\nshe will not eat it. What this means is that if there are 4 dishes on the\nlist, one being Vegetarian, and she wants 4 items on her menu, then her menu\nwill consist of 3 dishes. You will be writing this menu out onto a new file\nnamed EricaMenu.txt. The first line of the file will be “Erica’s Menu” and\nevery corresponding line after that will be the Dish Name, Price, and Cuisine\nType all on the same line, with each dish being on a separate line.\nPrice will have a $ preceding the number and each element for a line, except\nthe last element, will be followed by a “, “ (comma and space). The last\nline will not have a “\\n”. This function will return None, unless there is an\nerror. \n\"\"\"\ndef erica_menu(filename, num_of_dishes):\n try:\n file = open(filename,\"r\")\n heading = file.readline()\n lines = file.readlines()\n menu = open(\"EricaMenu.txt.\",\"w\")\n menu.write(\"Erica's Menu\\n\")\n used = []\n foods = {}\n num = 0\n for line in lines:\n line = line.split(\",\")\n line[1] = float(line[1][1:])\n line[3] = line[3].strip()\n line = tuple(line)\n (name, price, cal, cuisine) = (line[0], line[1], line[2], line[3])\n if cuisine != \"Vegetarian\":\n foods[name] = (price, cuisine)\n for i in range(num_of_dishes):\n if len(foods) != 1:\n cheapest = 100000000\n for food in foods:\n (price, cuisine) = (foods[food][0], foods[food][1])\n if price < cheapest:\n cheapest = price\n cheapfood = food\n cheapcuisine = cuisine\n menu.write(\"{}, ${}, {}\\n\".format(cheapfood, str(cheapest), cheapcuisine))\n del foods[cheapfood]\n else:\n for food in foods:\n menu.write(\"{}, ${}, {}\".format(food, foods[food][0], foods[food][1]))\n break\n file.close()\n menu.close()\n except FileNotFoundError:\n return \"File is not found.\"\n\nerica_menu('files/chikfila.csv', 0)\nerica_menu('files/westvillage.csv', 10)\nerica_menu('files/chikfila.csv', 4)\nerica_menu('files/chikfila.csv', 10)\n","sub_path":"HW08_FileIO.py","file_name":"HW08_FileIO.py","file_ext":"py","file_size_in_byte":11371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"646238326","text":"\"\"\"\r\nSimple interest formula is given by:\r\nSimple Interest = (P x T x R)/100\r\nWhere,\r\nP is the principle amount\r\nT is the time and\r\nR is the rate\r\n\r\n\"\"\"\r\ndef si(p,r,t):\r\n si = (P * R * T) / 100\r\n return si\r\n\r\nP = 1000\r\nR = 5\r\nT = 5\r\n\r\nprint(si(P,R,T))\r\n","sub_path":"Python Program for simple interest.py","file_name":"Python Program for simple interest.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"584670586","text":"import random\n\n\ndef hash(plain):\n cols = 4\n # Kodomain yang digunakan berupa huruf w/x/y/z (119-122)\n char = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\",\n \"8\", \"9\", \"a\", \"b\", \"c\", \"d\", \"e\", \"f\"]\n startChar = 97\n codomain = [0]*cols\n result = ''\n\n # Random digunakan sebagai padding apabila len(plain)= cols:\n break\n plain += chr(startChar + random.randint(0, 26))\n\n # Membagi string kedalam 4 kolom codomain\n # Lalu setiap char di kolom tersebut di XOR kan nilai ASCII nya\n for i in range(len(plain)):\n modulo = i % cols\n codomain[modulo] ^= ord(plain[i])\n\n # Setiap kolom dijadikan char dengan nilai w/x/y/z sesuai hasil xor % 4\n # Ditambah startChar\n for i in range(cols):\n result += chr(startChar + (codomain[i] % cols))\n\n return result\n\n\n# Meminta input dan menampilkan hasil\nplain = input(\"Plain text: \")\n\ndigest = hash(plain)\nprint(\"Digest: \", end='')\nprint(digest)\n","sub_path":"minggu4/digestV2.py","file_name":"digestV2.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"274700763","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nCopyright (c) 2019 Valentin B.\nRequirements:\nPython 3.5+\npip install -U discord.py pynacl youtube-dl\nYou also need FFmpeg in your PATH environment variable or the FFmpeg.exe binary in your client's directory on Windows.\n\"\"\"\n\n\"\"\"\nMerci A Valentin B pour son code source qui m'as permis d'apprendre beaucoup\n\"\"\"\n\nimport asyncio\nimport functools\nimport itertools\nimport math\nimport random\n\nimport discord\nimport youtube_dl\nfrom async_timeout import timeout\nfrom discord.ext import commands\n\n# Silence useless bug reports messages\nyoutube_dl.utils.bug_reports_message = lambda: ''\n\n\nclass VoiceError(Exception):\n pass\n\n\nclass YTDLError(Exception):\n pass\n\n\nclass YTDLSource(discord.PCMVolumeTransformer):\n YTDL_OPTIONS = {\n 'format': 'bestaudio/best',\n 'extractaudio': True,\n 'audioformat': 'mp3',\n 'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s',\n 'restrictfilenames': True,\n 'noplaylist': False,\n 'nocheckcertificate': True,\n 'ignoreerrors': False,\n 'logtostderr': False,\n 'quiet': True,\n 'no_warnings': True,\n 'default_search': 'auto',\n 'source_address': '0.0.0.0',\n }\n\n FFMPEG_OPTIONS = {\n 'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5',\n 'options': '-vn',\n }\n\n ytdl = youtube_dl.YoutubeDL(YTDL_OPTIONS)\n\n def __init__(self, ctx: commands.Context, source: discord.FFmpegPCMAudio, *, data: dict, volume: float = 0.5):\n super().__init__(source, volume)\n\n self.requester = ctx.author\n self.channel = ctx.channel\n self.data = data\n\n self.uploader = data.get('uploader')\n self.uploader_url = data.get('uploader_url')\n date = data.get('upload_date')\n self.upload_date = date[6:8] + '.' + date[4:6] + '.' + date[0:4]\n self.title = data.get('title')\n self.thumbnail = data.get('thumbnail')\n self.description = data.get('description')\n self.duration = self.parse_duration(int(data.get('duration')))\n self.tags = data.get('tags')\n self.url = data.get('webpage_url')\n self.views = data.get('view_count')\n self.likes = data.get('like_count')\n self.dislikes = data.get('dislike_count')\n self.stream_url = data.get('url')\n\n def __str__(self):\n return '**{0.title}** de **{0.uploader}**'.format(self)\n\n @classmethod\n async def create_source(cls, ctx: commands.Context, search: str, *, loop: asyncio.BaseEventLoop = None):\n loop = loop or asyncio.get_event_loop()\n\n partial = functools.partial(cls.ytdl.extract_info, search, download=False, process=False)\n data = await loop.run_in_executor(None, partial)\n\n if data is None:\n raise YTDLError(\"Je n'ai pas pu trouver de résultat `{}`\".format(search))\n\n if 'entries' not in data:\n process_info = data\n else:\n process_info = None\n for entry in data['entries']:\n if entry:\n process_info = entry\n break\n\n if process_info is None:\n raise YTDLError(\"Je n'ai pas pu trouver de résultat `{}`\".format(search))\n\n webpage_url = process_info['webpage_url']\n partial = functools.partial(cls.ytdl.extract_info, webpage_url, download=False)\n processed_info = await loop.run_in_executor(None, partial)\n\n if processed_info is None:\n raise YTDLError('Erreur - impossible de récuperer'.format(webpage_url))\n\n if 'entries' not in processed_info:\n info = processed_info\n else:\n info = None\n while info is None:\n try:\n info = processed_info['entries'].pop(0)\n except IndexError:\n raise YTDLError(\"Je n'ai pas pu trouver de résultat `{}`\".format(webpage_url))\n\n return cls(ctx, discord.FFmpegPCMAudio(info['url'], **cls.FFMPEG_OPTIONS), data=info)\n\n @staticmethod\n def parse_duration(duration: int):\n minutes, seconds = divmod(duration, 60)\n hours, minutes = divmod(minutes, 60)\n days, hours = divmod(hours, 24)\n\n duration = []\n if days > 0:\n duration.append('{} jours'.format(days))\n if hours > 0:\n duration.append('{} heures'.format(hours))\n if minutes > 0:\n duration.append('{} minutes'.format(minutes))\n if seconds > 0:\n duration.append('{} secondes'.format(seconds))\n\n return ', '.join(duration)\n\n\nclass Song:\n __slots__ = ('source', 'requester')\n\n def __init__(self, source: YTDLSource):\n self.source = source\n self.requester = source.requester\n\n def create_embed(self):\n embed = (discord.Embed(title='Joue actuellement',\n description='```css\\n{0.source.title}\\n```'.format(self),\n color=discord.Color.blurple())\n .add_field(name='Durée', value=self.source.duration)\n .add_field(name='Requête de', value=self.requester.mention)\n .add_field(name='Auteur', value='[{0.source.uploader}]({0.source.uploader_url})'.format(self))\n .add_field(name='URL', value='[Click]({0.source.url})'.format(self))\n .set_thumbnail(url=self.source.thumbnail))\n\n return embed\n\n\nclass SongQueue(asyncio.Queue):\n def __getitem__(self, item):\n if isinstance(item, slice):\n return list(itertools.islice(self._queue, item.start, item.stop, item.step))\n else:\n return self._queue[item]\n\n def __iter__(self):\n return self._queue.__iter__()\n\n def __len__(self):\n return self.qsize()\n\n def clear(self):\n self._queue.clear()\n\n def shuffle(self):\n random.shuffle(self._queue)\n\n def remove(self, index: int):\n del self._queue[index]\n\n\nclass VoiceState:\n def __init__(self, client, ctx: commands.Context):\n self.client = client\n self._ctx = ctx\n\n self.current = None\n self.voice = None\n self.next = asyncio.Event()\n self.songs = SongQueue()\n\n self._loop = False\n self._volume = 0.5\n self.skip_votes = set()\n\n self.audio_player = client.loop.create_task(self.audio_player_task())\n\n def __del__(self):\n self.audio_player.cancel()\n\n @property\n def loop(self):\n return self._loop\n\n @loop.setter\n def loop(self, value: bool):\n self._loop = value\n\n @property\n def volume(self):\n return self._volume\n\n @volume.setter\n def volume(self, value: float):\n self._volume = value\n\n @property\n def is_playing(self):\n return self.voice and self.current\n\n async def audio_player_task(self):\n while True:\n self.next.clear()\n\n if not self.loop:\n # Try to get the next song within 60 minutes.\n # If no song will be added to the queue in time,\n # the player will disconnect due to performance\n # reasons.\n try:\n async with timeout(3600): # 60 minutes\n self.current = await self.songs.get()\n except asyncio.TimeoutError:\n self.client.loop.create_task(self.stop())\n await self._ctx.send(\"Timeout\")\n return\n\n self.current.source.volume = self._volume\n self.voice.play(self.current.source, after=self.play_next_song)\n await self.current.source.channel.send(embed=self.current.create_embed())\n\n await self.next.wait()\n\n def play_next_song(self, error=None):\n if error:\n raise VoiceError(str(error))\n\n self.next.set()\n\n def skip(self):\n self.skip_votes.clear()\n\n if self.is_playing:\n self.voice.stop()\n\n async def stop(self):\n self.songs.clear()\n\n if self.voice:\n await self.voice.disconnect()\n self.voice = None\n\n\nclass Musique(commands.Cog):\n def __init__(self, client):\n self.client = client\n self.voice_states = {}\n print(\"Music is loaded\")\n\n def get_voice_state(self, ctx: commands.Context):\n state = self.voice_states.get(ctx.guild.id)\n if not state:\n state = VoiceState(self.client, ctx)\n self.voice_states[ctx.guild.id] = state\n\n return state\n\n def cog_unload(self):\n for state in self.voice_states.values():\n self.client.loop.create_task(state.stop())\n\n def cog_check(self, ctx: commands.Context):\n if not ctx.guild:\n raise commands.NoPrivateMessage('Cette commande ne peut être utilisée en message privé')\n\n return True\n\n async def cog_before_invoke(self, ctx: commands.Context):\n ctx.voice_state = self.get_voice_state(ctx)\n\n async def cog_command_error(self, ctx: commands.Context, error: commands.CommandError):\n await ctx.send('Erreur: {}'.format(str(error)))\n\n @commands.command(name='summon', invoke_without_subcommand=True)\n async def _join(self, ctx: commands.Context):\n \"\"\"Invoque Sisyphe dans le salon courant.\"\"\"\n\n destination = ctx.author.voice.channel\n if ctx.voice_state.voice:\n await ctx.voice_state.voice.move_to(destination)\n return\n\n ctx.voice_state.voice = await destination.connect()\n\n @commands.command(name='join')\n @commands.has_permissions(manage_guild=True)\n async def _summon(self, ctx: commands.Context, *, channel: discord.VoiceChannel = None):\n \"\"\"Déplace Sisyphe dans le salon courant ou l'invoque.\n \"\"\"\n\n if not channel and not ctx.author.voice:\n raise VoiceError(\"Vous devez d'abord rejoindre un salon vocal\")\n\n destination = channel or ctx.author.voice.channel\n if ctx.voice_state.voice:\n await ctx.voice_state.voice.move_to(destination)\n return\n\n ctx.voice_state.voice = await destination.connect()\n\n @commands.command(name='leave', aliases=['disconnect'])\n @commands.has_permissions(manage_guild=True)\n async def _leave(self, ctx: commands.Context):\n \"\"\"Renvoit Sisyphe.\"\"\"\n\n if not ctx.voice_state.voice:\n return await ctx.send(\"Sisyphe n'est pas dans un salon vocal\")\n\n await ctx.voice_state.stop()\n del self.voice_states[ctx.guild.id]\n\n @commands.command(name='volume')\n async def _volume(self, ctx: commands.Context, *, volume: int):\n \"\"\"Change le volume du lecteur.\"\"\"\n\n if not ctx.voice_state.is_playing:\n return await ctx.send(\"Rien n'est joué pour le moment\")\n\n if 0 > volume > 100:\n return await ctx.send('Le volume doit être entre 0 et 100')\n\n ctx.voice_state.volume = volume / 100\n await ctx.send('Volume mis à {}%'.format(volume))\n\n @commands.command(name='now', aliases=['current', 'playing'])\n async def _now(self, ctx: commands.Context):\n \"\"\"Montre les informations de la musique.\"\"\"\n\n await ctx.send(embed=ctx.voice_state.current.create_embed())\n\n @commands.command(name='pause')\n @commands.has_permissions(manage_guild=True)\n async def _pause(self, ctx: commands.Context):\n \"\"\"Mets en pause.\"\"\"\n\n if ctx.voice_state.voice.is_playing():\n ctx.voice_state.voice.pause()\n await ctx.message.add_reaction('⏯')\n\n @commands.command(name='resume')\n @commands.has_permissions(manage_guild=True)\n async def _resume(self, ctx: commands.Context):\n \"\"\"Enlève la pause.\"\"\"\n\n if ctx.voice_state.voice.is_paused():\n ctx.voice_state.voice.resume()\n await ctx.message.add_reaction('⏯')\n\n @commands.command(name='clear',aliases=['stop'])\n @commands.has_permissions(manage_guild=True)\n async def _clear(self, ctx: commands.Context):\n \"\"\"Vide la file. (aliase:!stop)\"\"\"\n if ctx.voice_state:\n ctx.voice_state.songs.clear()\n if ctx.voice_state.is_playing:\n ctx.voice_state.voice.stop()\n await ctx.send(\"La file a été vidée\")\n await ctx.message.add_reaction('⏹')\n else:\n await ctx.send(\"Sisyphe n'est pas connecté\")\n\n @commands.command(name='skip')\n async def _skip(self, ctx: commands.Context):\n \"\"\"Vote pour passer.\n \"\"\"\n\n if not ctx.voice_state.is_playing:\n return await ctx.send('Ne joue rien actuellement')\n\n voter = ctx.message.author\n if voter == ctx.voice_state.current.requester:\n await ctx.message.add_reaction('⏭')\n ctx.voice_state.skip()\n\n elif voter.id not in ctx.voice_state.skip_votes:\n ctx.voice_state.skip_votes.add(voter.id)\n total_votes = len(ctx.voice_state.skip_votes)\n\n if total_votes >= 2:\n await ctx.message.add_reaction('⏭')\n ctx.voice_state.skip()\n else:\n await ctx.send('Actuellement **{} votes/2** pour passer'.format(total_votes))\n\n else:\n await ctx.send('Tu as déjà voté')\n\n @commands.command(name='queue')\n async def _queue(self, ctx: commands.Context, *, page: int = 1):\n \"\"\"Montre la file d'attente.\n \"\"\"\n\n if len(ctx.voice_state.songs) == 0:\n return await ctx.send('La file est vide')\n\n items_per_page = 10\n pages = math.ceil(len(ctx.voice_state.songs) / items_per_page)\n\n start = (page - 1) * items_per_page\n end = start + items_per_page\n\n queue = ''\n for i, song in enumerate(ctx.voice_state.songs[start:end], start=start):\n queue += '`{0}.` [**{1.source.title}**]({1.source.url})\\n'.format(i + 1, song)\n\n embed = (discord.Embed(description='**{} pistes:**\\n\\n{}'.format(len(ctx.voice_state.songs), queue))\n .set_footer(text='Viewing page {}/{}'.format(page, pages)))\n await ctx.send(embed=embed)\n\n @commands.command(name='shuffle')\n async def _shuffle(self, ctx: commands.Context):\n \"\"\"Mélange la file.\"\"\"\n\n if len(ctx.voice_state.songs) == 0:\n return await ctx.send('.')\n\n ctx.voice_state.songs.shuffle()\n await ctx.message.add_reaction('✅')\n\n @commands.command(name='remove')\n async def _remove(self, ctx: commands.Context, index: int):\n \"\"\"Enlève une musique à l'index donné.\"\"\"\n\n if len(ctx.voice_state.songs) == 0:\n return await ctx.send('Empty queue.')\n\n ctx.voice_state.songs.remove(index - 1)\n await ctx.message.add_reaction('✅')\n\n @commands.command(name='loop')\n async def _loop(self, ctx: commands.Context):\n \"\"\"Rejouer la musique actuelle. (NE MARCHE PAS)\n \"\"\"\n\n if not ctx.voice_state.is_playing:\n return await ctx.send(\"Rien n'ai joué actuellement\")\n\n # Inverse boolean value to loop and unloop.\n ctx.voice_state.loop = not ctx.voice_state.loop\n await ctx.message.add_reaction('✅')\n\n @commands.command(name='play')\n async def _play(self, ctx: commands.Context, *, search: str):\n \"\"\"Joue une musique avec un url ou un nom.\n \"\"\"\n\n if not ctx.voice_state.voice:\n await ctx.invoke(self._join)\n\n async with ctx.typing():\n try:\n source = await YTDLSource.create_source(ctx, search, loop=self.client.loop)\n except YTDLError as e:\n await ctx.send(\"Erreur lors de l'execution de la requête: {}\".format(str(e)))\n else:\n song = Song(source)\n\n await ctx.voice_state.songs.put(song)\n await ctx.send('Mise en file de {}'.format(str(source)))\n\n @commands.command(name='chacha')\n async def _chacha(self,ctx: commands.Context):\n \"\"\"On ne négocie pas avec les terroristes\n \"\"\"\n if not ctx.voice_state.voice:\n await ctx.invoke(self._join)\n \n ctx.voice_state.songs.clear()\n if ctx.voice_state.voice.is_playing():\n ctx.voice_state.voice.stop()\n ctx.voice_state.voice.play(discord.FFmpegPCMAudio('assets/chacha.mp3'))\n await ctx.send(\"On ne négocie pas avec les terroristes !\")\n\n\n @_join.before_invoke\n @_play.before_invoke\n async def ensure_voice_state(self, ctx: commands.Context):\n if not ctx.author.voice or not ctx.author.voice.channel:\n raise commands.CommandError(\"Vous n'etes pas connecté à un salon vocal\")\n\n if ctx.voice_client:\n if ctx.voice_client.channel != ctx.author.voice.channel:\n raise commands.CommandError('Sisyphe est déjà dans un salon vocal')\n\n #Greetings user if not playing\n @commands.Cog.listener()\n async def on_voice_state_update(self,member,before, after):\n if member.bot == False:\n guildid = member.guild.id\n voice_state = self.voice_states.get(guildid)\n if voice_state:\n if before.channel != voice_state.voice.channel and after.channel == voice_state.voice.channel:\n if voice_state.voice.is_playing() == False and voice_state.voice.is_paused() == False :\n voice_state.voice.play(discord.FFmpegPCMAudio('assets/chacha.mp3'))\n \ndef setup(client):\n client.add_cog(Musique(client))","sub_path":"cogs/musique.py","file_name":"musique.py","file_ext":"py","file_size_in_byte":17497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"380169034","text":"import copy\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Union\n\nimport gymnasium as gym\nimport numpy as np\nimport torch\nfrom torch import nn\n\nfrom ..nn.dueling import Dueling\nfrom ..nn.fcn import FCN\nfrom ..nn.noisy_linear import NoisyLinear\nfrom .double_dqn import DoubleDQN\n\n\nclass NoisyDQNPolicy(nn.Sequential):\n\n def __init__(self, input_dim: int, output_dim: int, net_arch: List[int] = [128], deuling: bool = False) -> None:\n hidden_dim = input_dim if len(net_arch) == 0 else net_arch[-1]\n\n layers = [FCN(input_dim, output_dim=None, net_arch=net_arch)]\n if deuling:\n layers += [\n Dueling(\n nn.Sequential(\n NoisyLinear(hidden_dim, hidden_dim),\n nn.ReLU(inplace=True),\n NoisyLinear(hidden_dim, output_dim),\n ),\n nn.Sequential(\n NoisyLinear(hidden_dim, hidden_dim),\n nn.ReLU(inplace=True),\n NoisyLinear(hidden_dim, 1),\n ),\n )\n ]\n else:\n layers += [\n NoisyLinear(hidden_dim, hidden_dim),\n nn.ReLU(inplace=True),\n NoisyLinear(hidden_dim, output_dim),\n ]\n\n super(NoisyDQNPolicy, self).__init__(*layers)\n\n def reset_noise(self) -> None:\n for m in self.modules():\n if isinstance(m, NoisyLinear):\n m.reset_noise()\n\n\nclass NoisyDQN(DoubleDQN):\n\n def __init__(self,\n env: gym.Env,\n policy_kwargs: Optional[Dict[str, Any]] = None,\n learning_rate: float = 0.0001,\n buffer_size: int = 1000000,\n exploration_initial_epsilon: float = 1,\n exploration_final_epsilon: float = 0.01,\n exploration_fraction: float = 0.5,\n batch_size: int = 32,\n gamma: float = 0.99,\n tau: float = 1,\n update_target_interval: int = 1000,\n max_grad_norm: float = 10.0,\n device: str = 'cuda',\n initial_setup: bool = True) -> None:\n super().__init__(env, policy_kwargs, learning_rate, buffer_size, exploration_initial_epsilon,\n exploration_final_epsilon, exploration_fraction, batch_size, gamma, tau,\n update_target_interval, max_grad_norm, device, initial_setup)\n\n def setup_models(self):\n self.policy_net = NoisyDQNPolicy(self.obs_dim, self.action_dim, **self.policy_kwargs).to(self.device)\n self.target_net = copy.deepcopy(self.policy_net).to(self.device)\n self.target_net.eval()\n\n @torch.no_grad()\n def predict(self, observation: Union[np.ndarray, Dict[str, np.ndarray]], deterministic: bool = False):\n\n obs = self._convert_tensor(observation)\n action = self.policy_net(obs).argmax().detach().item()\n\n return action\n\n def train(self):\n loss = super().train()\n\n self.policy_net.reset_noise()\n self.target_net.reset_noise()\n\n return loss\n","sub_path":"haruna/agents/noisy_dqn.py","file_name":"noisy_dqn.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"458874599","text":"#############################################\n# @name = Aditya Shirwatkar\n# @github = aditya-shirwatkar\n# @copyright = None \n#############################################\n\n# x1, y1 should be lower\n\nx1 = -1; x2 = 1\ny1 = -2; y2 = 4\n\nm = round((y2-y1)/(x2-x1), 3) if (x2-x1) != 0 else 100000000 # used just to represnt inf\n\ndx = abs(x2-x1)\ndy = abs(y2-y1)\ntwo_dy = 2*dy \ntwo_dx = 2*dx\n\nlinePoints = []\np = []\np_current = 2*dy - dx\np.append(p_current)\n\ni=0\n\nprint('######### INFO #########')\nprint('x1 = ', x1, ' ', 'y1 = ', y1)\nprint('x2 = ', x2, ' ', 'y2 = ', y2)\nprint('dx = ', dx, ' ', 'dy = ', dy, ' ', 'm = ', m)\n\n## For m<1\nif m < 1:\n if abs(dx) > abs(dy):\n if x1 > x2:\n x, y = x2, y2\n else:\n x, y = x1, y1\n \n linePoints.append([x, y])\n\n while dx>0 :\n if p_current > 0 :\n x, y = x+1, y+1\n linePoints.append([x, y])\n p_current += (two_dy - two_dx)\n p.append(p_current)\n else:\n x += 1\n linePoints.append([x, y])\n p_current += two_dy\n p.append(p_current)\n \n dx-=1\n i+=1\n## for m >1\nelse:\n if y1>y2:\n x,y = x2, y2\n else:\n x,y = x1, y1\n \n linePoints.append([x, y])\n \n while dy>0 :\n if p_current > 0:\n x, y = x+1, y+1\n linePoints.append([x, y])\n p_current += two_dx - two_dy\n p.append(p_current) \n # elif p_current > 0 and dx == 0:\n # y += 1\n # linePoints.append([x, y])\n # p_current += two_dx - two_dy\n # p.append(p_current)\n else:\n y += 1\n linePoints.append([x, y])\n p_current += two_dx\n p.append(p_current)\n \n dy-=1\n i+=1\n\nprint('Line', linePoints)\nprint('-----------')\nprint('p', p)\n\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nimport numpy as np\n\nN = 4*len(linePoints)\n# make an empty data set\ndata = np.ones((N, N)) * np.nan\n# fill in some fake data\nfor point in linePoints:\n data[N-1 - (N//2 + point[1]), (point[0] + N//2)] = 1\n\n# make a figure + axes\nfig, ax = plt.subplots(1, 1, tight_layout=True)\n# make color map\nmy_cmap = colors.ListedColormap(['r', 'g', 'b'])\n# set the 'bad' values (nan) to be white and transparent\nmy_cmap.set_bad(color='w', alpha=0)\n# draw the grid\nfor x in range(N + 1):\n ax.axhline(x, lw=2, color='k', zorder=5)\n ax.axvline(x, lw=2, color='k', zorder=5)\n# draw the boxes\nax.imshow(data, interpolation='none', cmap=my_cmap, extent=[0, N, 0, N], zorder=0)\n# turn off the axis labels\nax.axis('off')\n\nplt.show()","sub_path":"bressenham/line.py","file_name":"line.py","file_ext":"py","file_size_in_byte":2715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"162734598","text":"\"\"\"ecomm URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path,include\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n# from django.conf.urls import url,include\n\nurlpatterns = [\n path(r'admin/',admin.site.urls),\n path(r'cart/',include('cart.urls')),\n path(r'orders/', include('orders.urls')), \n path(r'coupons/',include('coupons.urls')),\n path(r'paypal/', include('paypal.standard.ipn.urls')),\n path(r'payment/', include('payment.urls')),\n path(r'account/', include('account.urls')),\n path(r'forum/',include('forum.urls')),\n path(r'hire/',include('hire.urls')),\n path(r'', include('efarm.urls')),\n path(r'cabook/',include('cabook.urls')),\n path(r'book/',include('book.urls')),\n path(r'search/', include('haystack.urls')),\n # path('', include('social.apps.django_app.urls', namespace='social')),\n\n \n]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)\n urlpatterns += static(settings.STATIC_URL,document_root=settings.STATIC_ROOT)\n","sub_path":"ecomm/ecomm/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}
+{"seq_id":"293422760","text":"import logging\nimport typing\nfrom functools import wraps\n\nfrom openhivenpy.utils import dispatch_func_if_exists\n\nlogger = logging.getLogger(__name__) \n\n\nclass EventHandler:\n \"\"\"\n Event Handler for the HivenClient Class. Functions will be called from the\n websocket class and if the user registered an event response with the\n decorator @HivenClient.event, it will be called and executed.\n \"\"\"\n def __init__(self, call_obj: object = None):\n self._call_obj = call_obj\n if self._call_obj is None:\n logger.debug(\"[EVENT-HANDLER] Passed object where the events should be called from is None!\")\n self._call_obj = self\n\n def event(self, func: typing.Coroutine = None):\n \"\"\"\n Decorator used for registering Client Events\n \n :param func: Function that should be wrapped. Only usable if the wrapper is used in the function syntax: 'event(func)'!\n \n \"\"\"\n def decorator(func_: typing.Coroutine):\n @wraps(func_)\n async def wrapper(*args, **kwargs): \n return await func_(*args, **kwargs)\n \n setattr(self, func_.__name__, wrapper) # Adding the function to the object\n\n logger.debug(f\"[EVENT-HANDLER] >> Event {func_.__name__} registered\")\n\n return func_ # func can still be used normally\n\n # TODO! Needs to raise Exception if not async using 'inspect.iscoroutinefunction(func):'\n if func is None:\n return decorator\n else:\n return decorator(func)\n\n async def dispatch_on_connection_start(self) -> None:\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_connection_start'\n )\n\n async def dispatch_on_init(self, time) -> None:\n param = [time]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_init',\n func_args=param\n )\n\n async def dispatch_on_ready(self) -> None:\n param = []\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_ready',\n func_args=param\n )\n\n async def dispatch_on_user_update(self, old, new) -> None:\n param = [old, new]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_user_update',\n func_args=param\n )\n\n async def dispatch_on_house_update(self, old, new) -> None:\n param = [old, new]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_house_update',\n func_args=param\n )\n\n async def dispatch_on_house_add(self, house) -> None:\n param = [house]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_house_join',\n func_args=param\n )\n\n async def dispatch_on_house_remove(self, house) -> None:\n param = [house]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_house_remove',\n func_args=param\n )\n\n async def dispatch_on_house_delete(self, house_id) -> None:\n param = [house_id]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_house_delete',\n func_args=param\n )\n\n async def dispatch_on_house_down_time(self, house_id) -> None:\n param = [house_id]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_house_downtime',\n func_args=param\n )\n\n async def dispatch_on_room_create(self, room) -> None:\n param = [room]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_room_create',\n func_args=param\n )\n\n async def dispatch_on_house_member_join(self, member, house) -> None:\n param = [member, house]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_house_member_join',\n func_args=param\n )\n\n async def dispatch_on_house_member_enter(self, member, house) -> None:\n param = [member, house]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_house_member_online',\n func_args=param\n )\n\n async def dispatch_on_house_member_leave(self, member, house) -> None:\n param = [member, house]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_house_member_leave',\n func_args=param\n )\n\n async def dispatch_on_house_member_exit(self, member, house) -> None:\n param = [member, house]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_house_member_offline',\n func_args=param\n )\n\n async def dispatch_on_relationship_update(self, relationship) -> None:\n param = [relationship]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_relationship_update',\n func_args=param\n )\n\n async def dispatch_on_presence_update(self, presence, user) -> None:\n param = [presence, user]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_presence_update',\n func_args=param\n )\n\n async def dispatch_on_message_create(self, message) -> None:\n param = [message]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_message_create',\n func_args=param\n )\n\n async def dispatch_on_message_delete(self, message) -> None:\n param = [message]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_message_delete',\n func_args=param\n )\n \n async def dispatch_on_message_update(self, message) -> None:\n param = [message]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_message_update',\n func_args=param\n )\n\n async def dispatch_on_typing_start(self, typing) -> None:\n param = [typing]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_typing_start',\n func_args=param\n )\n\n async def dispatch_on_typing_end(self, typing) -> None:\n param = [typing]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_typing_end',\n func_args=param\n )\n\n async def dispatch_on_member_update(self, old, new, house) -> None:\n param = [old, new, house]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_member_update',\n func_args=param\n )\n\n async def dispatch_on_house_member_chunk(self, members: list, house, data: dict) -> None:\n param = [members, house]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_house_member_chunk',\n func_args=param\n )\n\n async def dispatch_on_batch_house_member_update(self, house, members, data: dict) -> None:\n param = [members, data, house]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_batch_house_member_update',\n func_args=param\n )\n\n async def dispatch_on_house_entity_update(self, house) -> None:\n param = [house]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_house_entity_update',\n func_args=param\n )\n","sub_path":"openhivenpy/events/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":7727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}