diff --git "a/4929.jsonl" "b/4929.jsonl"
new file mode 100644--- /dev/null
+++ "b/4929.jsonl"
@@ -0,0 +1,273 @@
+{"seq_id":"624300389","text":"\nimport os\nimport shutil\nimport subprocess\nimport tempfile\n\nfrom odoo.tests import TransactionCase\n\n\nclass TestRunbotSkipBuild(TransactionCase):\n def setUp(self):\n super().setUp()\n self.tmp_dir = tempfile.mkdtemp()\n\n @self.addCleanup\n def remove_tmp_dir():\n if os.path.isdir(self.tmp_dir):\n shutil.rmtree(self.tmp_dir)\n\n self.work_tree = os.path.join(self.tmp_dir, \"git_example\")\n self.git_dir = os.path.join(self.work_tree, \".git\")\n subprocess.call([\"git\", \"init\", self.work_tree])\n hooks_dir = os.path.join(self.git_dir, \"hooks\")\n if os.path.isdir(hooks_dir):\n # Avoid run a hooks for commit commands\n shutil.rmtree(hooks_dir)\n self.repo = self.env[\"runbot.repo\"].create({\"name\": self.git_dir})\n self.build = self.env[\"runbot.build\"]\n\n @self.addCleanup\n def remove_clone_dir():\n if os.path.isdir(self.repo.path):\n shutil.rmtree(self.repo.path)\n\n def git(self, *cmd):\n subprocess.call([\"git\"] + list(cmd), cwd=self.work_tree)\n\n def test_subject_skip_build(self):\n \"\"\"Test [ci skip] feature\"\"\"\n\n cimsg = \"Testing subject [ci skip]\"\n self.git(\"commit\", \"--allow-empty\", \"-m\", cimsg)\n self.repo._update_git()\n build = self.build.search([(\"subject\", \"=\", cimsg)])\n self.assertFalse(build)\n\n cimsg = \"Testing subject without ci skip\"\n self.git(\"commit\", \"--allow-empty\", \"-m\", cimsg)\n self.repo._update_git()\n build = self.build.search([(\"subject\", \"=\", cimsg)])\n self.assertTrue(build)\n\n cimsg = \"Testing body\\n\\n[ci skip]\\nother line\"\n self.git(\"commit\", \"--allow-empty\", \"-m\", cimsg)\n self.repo._update_git()\n build = self.build.search([(\"subject\", \"=\", cimsg.split(\"\\n\")[0])])\n self.assertFalse(build)\n\n cimsg = \"Testing body without\\n\\nci skip\\nother line\"\n self.git(\"commit\", \"--allow-empty\", \"-m\", cimsg)\n self.repo._update_git()\n build = self.build.search([(\"subject\", \"=\", cimsg.split(\"\\n\")[0])])\n self.assertTrue(build)\n","sub_path":"runbot/tests/test_subject_skip_build.py","file_name":"test_subject_skip_build.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"102629626","text":"import konlpy\nimport nltk\nfrom konlpy.tag import Twitter\n#stremmer를 형용사,동사에 붙여보자!!\n\n\ntwitter = Twitter()\n# POS tag a sentence\n\n# 문장단위로 끊어서 해봐!\n\n#sentence = u'좋은 사과'\n#sentence = u'다맘에들어요 다다다다다!!!!!!길이감도 좋고 핏도 좋고 배송도 빠르고 기분너무좋아요ㅋㅋㅋ감사합니당'\n#sentence = u'이 제품 가격대비완전좋아요^^~화질 좋고~~음향좋고~~^^.배송은 아주 만족스럽습니다~~^^'\nsentence = u'배송이 엄청빠르네요ㅎㅎ후기에서 결점이 있다고하던데..무결점으로 주문해서 그런지 결점이 하나도없네요ㅎㅎ가성비는 전 브랜드를 통합해서 최고인거같네요ㅎㅎ추천합니다.'\n#sentence = '양말이생각보다퀄리티도좋고~~그림도너무이쁘네요~~~다음에또사야겟어요~~잘신겠습니다~~~~'\n\n\n\n#sentence = '좋네요 번창하세여'\n# '만족합니다'\n#sentence = '금요일 오전 주문에 토요일 오전 배송도착 \\r\\n불량화소가 대충 몇개 있긴하지만 충분히 감안하고 쓸만합니다'\n#sentence = '아주마음에들어요'\n#sentence = '너무맘에들어요 ㅎㅂㅎ!!'\n# '잘 받았어요~ 잘 신을게요^♡^'\n\n#sentence = '양말싸게잘산듯'\n# '싼 가격에 잘샀네요~'\n# '양말 싸게 잘 구입한것 같습니딘ㅎㅎ'\n\n\n# '아주 만족합니다! 적극적으로 추천합니다 '\n# '만족합니다~~'\n# '잘받앗어여 ~'\n#sentence = '저렴하게 잘 샀어요'\n# '잘받았습니다 '\n# '잘받았습니다 제품 만족합니다!'\n# '대박 상품!!'\n# '모두 맘에 쏙들어요^^'\n# '고맙릅니다'\n# '양말 자체는 에쁜데.. 두번 신으니깐 빵꾸나요 ㅋㅋㅋㅋ'\n# '아직못받앗는데요ㅜㅜ'\n\n# '괜챦아요!!'\n# '싼가격에 잘샀어요'\n# '양말이 거기서 거기지 뭔 후기를 쓰래 ㅡ ㅡ'\n# '잘신을께요'\n# '마음에 들어요'\n# '굿ㅋㅋㅋㅈㅋ'\n# '색상 디자인 만족합니다요~~'\n# '완전 마음에 들어요/뿌뜻 ㅋㅋ'\n# '잘신을게영 ㅎㅎㅎ'\n# '양말 품질 만족합니다'\n# '만족합니다'\n# '네.잘받았습니다 .'\n\n# '잘받았어요'\n\n\n#words = konlpy.tag.Twitter().pos(sentence) #품사태깅\nwords = twitter.pos(sentence, False, True);\nprint(words)\nprint(\"\\n\")\n\n\nfile =open(\"/Users/lunjm/PycharmProjects/ReviewClustering/dic/Noun/ShoppingNoun.txt\")\nnounDic = {}\nscore = file.readline()\nfor x in file:\n x = x.replace('\\n', '')\n nounDic[x] = score\n\nfile.close()\n\nprint(nounDic.keys())\n# print(nounDic.count())\n\n\nprint(nounDic.get('양말'))\n\n\n\n\n\n# Define a chunk grammar, or chunking rules, then chunk\ngrammar = \"\"\"\nNP: {*?} # Noun phrase\nADV: {*}\nADJ: {*} # Adjective phrase\n\"\"\"\nparser = nltk.RegexpParser(grammar) #grammer 별로 구분\n\nchunks = parser.parse(words) #구문분석\nprint(\"# Print whole tree\")\nprint(chunks.pprint())\n\npurchaseReview = []\nbeforeLabel = \"\"\nbeforeWord = \"\"\ntemp = \"\"\nprint(\"\\n# Print Noun, Adjective phrases only\")\nfor subtree in chunks.subtrees():\n if subtree.label() == 'NP' :\n temp = ''.join(e[0] for e in list(subtree))\n if len(temp) != 0 :\n beforeLabel = 'NP'\n beforeWord = temp\n print(\"명사\", ''.join(e[0] for e in list(subtree))),\n elif subtree.label() == 'APJ' :\n word = ''.join(e[0] for e in list(subtree))\n if len(temp) != 0 and (beforeLabel == 'NP') :\n purchaseReview.append(beforeWord +' '+ word)\n else :\n purchaseReview.append(word)\n beforeLabel = 'AP'\n before = word\n print(\"형용사\", ''.join((e[0] for e in list(subtree))))\n # print(subtree.pprint())\n\nprint(purchaseReview)\n# Display the chunk tree\n#chunks.draw()\n","sub_path":"Test3.py","file_name":"Test3.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"339007191","text":"import json\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\nimport random\r\nclass proxy():\r\n\r\n def __init__(self):\r\n self.ip_pool = []\r\n self.proxy_list = []\r\n self.reload()\r\n self.counter = 0\r\n self.restore_counter = 0\r\n # self.RR = Round_Robin(self.proxy_list)\r\n\r\n def reload(self):\r\n # Crawl from http://www2.waselproxy.com/\r\n for page in range(1, 5):\r\n get_url = \"http://www2.waselproxy.com/page/\" + str(page)\r\n p = requests.get(get_url)\r\n soup = BeautifulSoup(p.content, \"lxml\")\r\n ip_row = soup.find_all(\"tr\")\r\n\r\n for one in ip_row[1:]:\r\n try:\r\n x = one.find(\"progress\")\r\n value = int(x.get('value'))\r\n\r\n if value >= 50:\r\n content = one.text\r\n ip_context = content.strip().split(\".\")\r\n ip = \"http://\" + ip_context[0]+\".\"+ip_context[1]+\".\"+ip_context[2]+\".\"+ip_context[3][0:3] + \":\" + ip_context[3][3:7]\r\n if re.match(\".*[a-zA-Z]$\", ip) is None:\r\n self.ip_pool.append(ip)\r\n except Exception as e:\r\n continue\r\n\r\n for items in self.ip_pool:\r\n proxies = {\r\n \"http\": items,\r\n }\r\n self.proxy_list.append(proxies)\r\n\r\n def getproxy(self):\r\n\r\n # index = random.randint(0,len(self.proxy_list))\r\n if self.restore_counter > (len(self.proxy_list)):\r\n # proxy = None\r\n print(\"Refresh the proxy list......\")\r\n self.proxy_list = []\r\n self.reload()\r\n self.restore_counter = 0\r\n proxy = self.proxy_list[self.counter]\r\n elif self.counter < (len(self.proxy_list)):\r\n self.counter += 1\r\n proxy = self.proxy_list[self.counter]\r\n self.restore_counter += 1\r\n else:\r\n self.counter = 0\r\n proxy = self.proxy_list[self.counter]\r\n self.restore_counter += 1\r\n # proxy = self.RR.get_next()\r\n\r\n return proxy\r\n","sub_path":"Cache/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"347855483","text":"import requests\nimport re\nfrom selenium import webdriver\nimport pymongo\n\n\nMONGO_URL = 'localhost'\nMONGO_DB = '虎牙'\nMONGO_COLLECTION = '英雄联盟板块'\nclient = pymongo.MongoClient(MONGO_URL)\ndb = client[MONGO_DB]\n\n\ndef get_page(url):\n\n \"\"\" 获取页面 \"\"\"\n\n # 设置请求头\n header = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36 Edg/80.0.361.66',\n 'Referer': 'https://www.huya.com/'\n }\n response = requests.get(url, headers=header)\n\n if response.status_code == 200:\n return response.text\n return None\n\n\ndef parse_html(html):\n\n \"\"\" 利用正则表达式解析网页 \"\"\"\n\n pattern = re.compile(\n '\"gameFullName\".*?\"(.*?)\",.*?totalCount\".*?\"(.*?)\",.*?roomName\".*?\"(.*?)\",.*?nick\".*?\"(.*?)\",.*?introduction\".*?\"(.*?)\",.*?profileRoom\".*?\"(.*?)\"', re.S\n )\n items = re.findall(pattern, html)\n for index, item in enumerate(items):\n yield {\n '当前页序号': index,\n '类别': item[0].encode('utf-8').decode('unicode_escape'), # 解码encode('utf-8').decode('unicode_escape')\n '标题': item[2].encode('utf-8').decode('unicode_escape'),\n '主播': item[3].encode('utf-8').decode('unicode_escape'),\n '人气': item[1],\n '直播介绍': item[4].encode('utf-8').decode('unicode_escape'),\n '直播间地址': 'https://www.huya.com/' + str(item[5])\n }\n\n\ndef main(page_no):\n # url = 'https://www.huya.com/cache.php?m=LiveList&do=getLiveListByPage&gameId=2633&tagAll=0&page=' + str(page_no)\n url = 'https://www.huya.com/cache.php?m=LiveList&do=getLiveListByPage&gameId=1&tagAll=0&page=' + str(page_no)\n html = get_page(url)\n # 网页解析返回yield生成器类型\n for item in parse_html(html):\n\n \"\"\" 实现根据字典键找到对应值自动打开浏览器并跳转到直播间, if item['主播'] == 'xxxx' : get() \"\"\"\n if 'Uzi' in item['主播']:\n # print('UZI在直播')\n global browser\n # browser = webdriver.Edge() # 初始化浏览器对象\n # browser.get('https://www.huya.com/666888') #\n print(item)\n save_to_mongo(item)\n\n\ndef save_to_mongo(res):\n myList = []\n myList.append(res)\n try:\n if db[MONGO_COLLECTION].insert_many(myList):\n print('插入数据库成功')\n except Exception:\n print('插入数据库失败')\n\n\nif __name__ == '__main__':\n print('开始清洗历史数据')\n db[MONGO_COLLECTION].delete_many({})\n print('历史数据清洗完成')\n for i in range(1, 10):\n main(page_no=i)\n","sub_path":"reForHuya/getHuyaMsg.py","file_name":"getHuyaMsg.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"191518666","text":"# coding = utf-8\nimport os,sys\nimport zipfile\nfrom selenium import webdriver\nMyque=[]\noption = webdriver.ChromeOptions()\noption.add_argument(\"headless\")\nbrowser = webdriver.Chrome(options=option)\ndef zip_file(src_dir):\n zip_name = src_dir +'.zip'\n z = zipfile.ZipFile(zip_name,'w',zipfile.ZIP_DEFLATED)\n for dirpath, dirnames, filenames in os.walk(src_dir):\n fpath = dirpath.replace(src_dir,'')\n fpath = fpath and fpath + os.sep or ''\n for filename in filenames:\n z.write(os.path.join(dirpath, filename),fpath+filename)\n print ('==压缩成功==')\n z.close()\ndef removeFileInFirstDir(targetDir):\n for file in os.listdir(targetDir):\n targetFile = os.path.join(targetDir, file)\n if os.path.isfile(targetFile):\n os.remove(targetFile)\nusername=input()#读入账户\nuserpass=input()#读入密码\nbrowser.get(\"http://acm.hdu.edu.cn/status.php?user=\"+username)#取得当前用户题目集\nbrowser.find_element_by_name(\"username\").send_keys(username)\nbrowser.find_element_by_name(\"userpass\").send_keys(userpass)\nbrowser.find_element_by_name(\"login\").click()#模拟登录\nflag=True\nwhile flag:\n table=browser.find_element_by_id(\"fixed_table\")\n\n for i in range(2,17):\n try:\n xpath=\"//table/tbody/tr[\" + str(i) + \"]/td[3]/font\"\n status = table.find_element_by_xpath(xpath)\n except:\n try:\n xpath = \"//table/tbody/tr[\" + str(i) + \"]/td[3]/a/font\"\n status = table.find_element_by_xpath(xpath)\n except:\n flag=False\n if flag:\n if status.text==\"Accepted\":\n ans=table.find_element_by_xpath(\"//table/tbody/tr[\"+str(i)+\"]/td[7]/a\").get_attribute(\"href\")\n Myque.append(ans)\n browser.find_element_by_xpath(\"//*[@id=\\\"fixed_table\\\"]/p/a[3]\").click()\nMyque=set(Myque)\nfor quePath in Myque:\n browser.get(quePath)\n code = browser.find_element_by_xpath(\"/html/body/table/tbody/tr[4]/td/div/div[2]/pre\")\n queNum=browser.find_element_by_xpath(\"/html/body/table/tbody/tr[4]/td/div/div[1]/b/font/a[1]\").text\n print(code.text)\n filename=sys.path[0]+\"\\\\tmp\\\\\"+queNum.replace('==',' equals ').replace('?',\" wen \")\n f=open(filename,'w')\n f.write(code.text)\n f.close()\nbrowser.close()\nzip_file(sys.path[0]+\"\\\\tmp\")\nremoveFileInFirstDir(sys.path[0]+\"\\\\tmp\")\nbrowser.quit()","sub_path":"HDUSpider.py","file_name":"HDUSpider.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"490102400","text":"import tensorflow as tf\nimport numpy as np\nimport pdb\n\n# 우린 HiHelloRNNLab에서 Manually하게 one-hot으로 data creation을 했다.\n# 각각 문자에 대한 인덱스와 해당되는 번호를 손으로 만들었다.\n# 하지만 문자열이 길어��면 힘들다. 그래서 자동으로 해보자.\n\nsample = \"if you want you\"\nidx2char = list(set(sample)) # index -> char\nprint(\"idx2char:\", idx2char)\nchar2idx = {c: i for i, c in enumerate(idx2char)} # chat -> idx\nprint(\"char2idx:\", char2idx)\n\nsample_idx = [char2idx[c] for c in sample]\nprint(\"sample_idx:\", sample_idx)\nx_data = [sample_idx[:-1]] # X data sample (0~n-1) hello: hell\ny_data = [sample_idx[1:]] # Y data sample (1~n) hello: ello\n# hyper parameters\ndic_size = len(char2idx) # RNN input size (one hot size)\nrnn_hidden_size = len(char2idx) # RNN output size\nnum_classes = len(char2idx) # final output size (RNN or softmax, etc..)\nbatch_size = 1 # one sample data, one batch\nsequence_length = len(sample) - 1 # X에서 끝에서 마지막 1개까지만 하니까\nX = tf.placeholder(tf.int32, [None, sequence_length]) # X data\nY = tf.placeholder(tf.int32, [None, sequence_length]) # Y label\n\nnum_classes = len(char2idx)\n# 전체 몇개의 one-hot으로 만들어줄지를 정하는 num_classes\nX_one_hot = tf.one_hot(X, num_classes)\n# one hot: 1 -> 0 1 0 0 0 0 0 0 0 0\n# 한가지 주의할게 one-hot으로 만들때는 shape이 어떻게 변하는지를 살펴봐라\n# printing by khj\nsess = tf.Session()\nprint(\"x_data:\", x_data)\nprint(\"y_data:\", y_data)\nprint(\"X_one_hot:\", sess.run(X_one_hot, feed_dict={X: x_data}))\nprint(\"X_max:\", sess.run(tf.argmax(X_one_hot, -1), feed_dict={X: x_data})) # (= x_data)\nprint(\"X_one_hot(squeeze):\", np.squeeze(sess.run(X_one_hot, feed_dict={X: x_data})))\n# printing by khj ###\n\ncell = tf.contrib.rnn.BasicLSTMCell(num_units=rnn_hidden_size, state_is_tuple=True) # output size = one-hot size\ninitial_state = cell.zero_state(batch_size, tf.float32)\noutputs, _states = tf.nn.dynamic_rnn(cell, X_one_hot, initial_state=initial_state, dtype=tf.float32)\n# cell, X_one_hot,initial_state,dtype)\nweights = tf.ones([batch_size, sequence_length])\nprint(\"weights:\",sess.run(weights))\nsequence_loss = tf.contrib.seq2seq.sequence_loss(logits=outputs, targets=Y, weights=weights)\nloss = tf.reduce_mean(sequence_loss)\noptimizer = tf.train.AdamOptimizer(learning_rate=0.1).minimize(sequence_loss)\nprediction = tf.argmax(outputs, axis=-1)\n\n# 이제 학습해야지.\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n # for i in range(10):\n for i in range(20):\n l, _ = sess.run([loss, optimizer], feed_dict={X: x_data, Y: y_data})\n print(\"loss:\", l)\n result, outputs_res, weights_res = sess.run([prediction, outputs, weights], feed_dict={X: x_data})\n print(\"outputs_res:\", outputs_res)\n print(\"outputs_res.shape:\", outputs_res.shape)\n print(\"output_res -> result:\", result)\n print(\"result.shape:\", result.shape)\n print(\"np.squeeze(result):\", np.squeeze(result))\n print(\"weights_res:\", weights_res)\n result_str = [idx2char[c] for c in np.squeeze(result)]\n print(\"result_str:\", result_str)\n\n# 이건 잘 된다.\n\n","sub_path":"practice/15_RNNWithLongSequenceLab.py","file_name":"15_RNNWithLongSequenceLab.py","file_ext":"py","file_size_in_byte":3220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"378592281","text":"import re\r\nimport json\r\nimport os\r\nimport MatchNum\r\nimport time\r\nstart = time.time()\r\nMatchNum.matchNum()\r\n\r\nk = 1\r\nwhile True:\r\n if os.path.exists('./needToSolve{}.txt'.format(k)):\r\n k += 1\r\n else:\r\n break\r\n\r\noutputFile = open('./needToSolve{}.txt'.format(k), 'w', encoding='utf-8')\r\n\r\nwith open('./replaceResult.txt', 'r') as f:\r\n for index, line in enumerate(f.readlines()):\r\n word = json.loads(line)['text']\r\n word = word.split(' ')\r\n for single in word:\r\n match_num = re.findall(r'\\d+', single)\r\n if len(match_num) == 0:\r\n continue\r\n else:\r\n # print(index, match_num, single)\r\n temp = str(index) + ' ' + str(match_num) + ' ' + str(single) + '\\n'\r\n outputFile.write(temp)\r\nend = time.time()\r\nprint('time:', end - start)\r\nprint('detect over')\r\n\r\n","sub_path":"TranslateNumToEnglish/detectNum.py","file_name":"detectNum.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"264079993","text":"import re\n\ncount = 0\nb = re.compile('b')\nm = b.finditer('abcdefabghkab')\n\nfor match in m:\n count +=1\n print(match.start(),'---',match.end(),'---',match.group())\nprint('no of occurence',count)\n\n\n","sub_path":"ex76.py","file_name":"ex76.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"561997927","text":"from dobot_gym.envs.real.straight import DobotStraightEnv\nimport numpy as np\n\ndobot_env = DobotStraightEnv()\n\ndobot_env.reset()\n\nrandom_action = dobot_env.action_space.sample()\nprint(\"Random action -- \", random_action)\n\nfixed_action = np.array([0, 1, 1])\nfor i in range(55):\n random_action = dobot_env.action_space.sample()\n # obs, reward, done, _ = dobot_env.step(random_action)\n obs, reward, done, _ = dobot_env.step(fixed_action)\n print(done)\n","sub_path":"dobot_gym/envs/tests/straight_env.py","file_name":"straight_env.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"588505517","text":"# 循环语句while循环\ni = 0\nwhile i < 3:\n i += 1\n print(i)\n# 1\n# 2\n# 3\n# while循环嵌套语句\ni, j = 0, 3\nwhile i < 3:\n while i < j:\n print((i+1)*j)\n j -= 1\n i += 1\n# 3\n# 4\n# for循环\nname = 'this, is, mis, spring'\ni = 0\nfor strName in name:\n if strName == 's':\n i = i+1\n print(i)\n# 1\n# 2\n# 3\n# 4\n# 利用内建范围函数range实现for循环\nfor i in range(19):\n if i % 2 == 0:\n print('%d是偶数' % (i))\n# 0是偶数\n# 2是偶数\n# 4是偶数\n# 6是偶数\n# 8是偶数\n# 10是偶数\n# 12是偶数\n# 14是偶数\n# 16是偶数\n# 18是偶数\n# 循环控制语句break,终止跳出循环\nstr_user = 'how are you'\nfor i in range(len(str_user)):\n print('for循环%d次' % (i+1))\n if str_user[i:i+3] == 'how':\n print('how is %d' % (i))\n break\n print('for是否执行这句代码')\n# for循环1次\n# how is 0\n# ------continue语句是控制循环方向,当满足条件之后contioue回到循环开始处进行下一次的循环\nfor i in range(1, 10):\n if i % 2 != 0:\n continue\n print('%d是偶数' % (i))\n# 2是偶数\n# 4是偶数\n# 6是偶数\n# 8是偶数\n","sub_path":"python_basis/03_Conditional_Branches_Loops/loop.py","file_name":"loop.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"634147438","text":"#/usr/bin/env python3.7\n\nmatrix = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n]\n\n# becomes\n# matrix = [\n# [1, 5, 9],\n# [2, 6,10],\n# [3, 7,11],\n# [4, 8,12],\n#]\n\nprint(\"Matrix: \", matrix)\n# nested list comp is evaluated in the context of the for that follows it\n# [ 2 ][ 1 ]\ntransposed = [[row[i] for row in matrix] for i in range(4)]\nprint(\"Matrix transposed: \", transposed)\n\ntransposed.clear()\n\nfor i in range(4):\n transposed.append([row[i] for row in matrix])\nprint(\"Alterneate version of transposed:\", transposed)\n\ntransposed.clear()\n\nfor i in range(4):\n # the following 3 lines implement the nested listcomp\n transposed_row = []\n for row in matrix:\n transposed_row.append(row[i])\n transposed.append(transposed_row)\n\nprint(\"Transposed with a for loop: \", transposed)\n\n# \"in the real world, you should prefer built-in functions to complex flow\n# statements\"\n\ntransposed.clear()\n\ntransposed = list(zip(*matrix))\n\nprint(\"Transposed built in zip: \", transposed)\n","sub_path":"docs_5.data_structures/list_comprehensions7.py","file_name":"list_comprehensions7.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"104940626","text":"'''\nCreated on May 17, 2018\n\n@author: helrewaidy\n'''\n# models\n\nimport argparse\nimport os\n\nimport torch\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1,2,3,4\"\n\n\nclass Parameters():\n def __init__(self):\n super(Parameters, self).__init__()\n\n ## Hardware/GPU parameters =================================================\n self.Op_Node = 'spider' # 'alpha_V12' # 'myPC', 'O2', 'spider'\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.tbVisualize = False\n self.tbVisualize_kernels = False\n self.tbVisualize_featuremaps = False\n self.multi_GPU = True\n\n if self.Op_Node in ['myPC', 'alpha_V12']:\n self.device_ids = [0]\n elif self.Op_Node in ['spider', 'O2']:\n self.device_ids = range(0, torch.cuda.device_count())\n\n if self.Op_Node in ['spider', 'O2', 'alpha_V12']:\n self.data_loders_num_workers = 20\n else:\n self.data_loders_num_workers = 4\n\n ## Network/Model parameters =================================================\n if self.Op_Node in ['myPC', 'alpha_V12']:\n self.batch_size = 2\n elif self.Op_Node in ['spider', 'O2']:\n self.batch_size = 100 * len(self.device_ids) // 8\n\n print('-- # GPUs: ', len(self.device_ids))\n print('-- batch_size: ', self.batch_size)\n\n self.activation_func = 'CReLU' # 'CReLU' # 'modReLU' 'KAF2D' 'ZReLU'\n self.args.lr = 0.1\n self.dropout_ratio = 0.0\n self.epochs = 10\n self.training_percent = 0.7\n self.nIterations = 1\n self.magnitude_only = False\n self.Validation_Only = False\n self.Evaluation = False\n\n #########\n # self.MODEL = 0 # Original U-net implementation\n self.MODEL = 3 # Complex U-net (URUS)\n # self.MODEL = 3.1 # Complex stacked convolution layers\n # self.MODEL = 3.2 # Complex U-net with different kernel configuration\n # self.MODEL = 4 # Complex U-Net with residual connection\n # self.MODEL = 7 # Real shallow U-net layer [double size] (magNet)\n\n #########\n if self.MODEL in [2, 3, 3.1, 3.2, 4, 5, 6]:\n self.complex_net = True\n else:\n self.complex_net = False\n\n ## Dataset and paths =================================================\n\n self.ds_total_num_slices = 0\n self.patients = []\n self.Rate = 3\n self.input_slices = list()\n self.num_slices_per_patient = list()\n self.groundTruth_slices = list()\n self.training_patients_index = list()\n self.us_rates = list()\n self.saveVolumeData = False\n self.multiCoilInput = False\n self.coilCombinedInputTV = True\n self.img_size = [256, 256]\n self.n_channels = 1\n\n self.cropped_dataset64 = False\n if self.cropped_dataset64:\n crop_txt = '_cropped64'\n else:\n crop_txt = ''\n self.trialNum = '3.555'\n self.arch_name = 'Model_0' + str(\n self.MODEL) + '_R' + str(\n self.Rate) + 'Trial' + self.trialNum\n\n self.dir = {'./ReconData_coilCombTVDL/Rate_' + str(\n self.Rate) + crop_txt + '/',\n './ReconData_coilCombTVDL/Rate_' + str(\n self.Rate) + '/'\n }\n self.model_save_dir = './RecoNet-Model/' + self.arch_name + '/'\n self.net_save_dir = './MatData/'\n self.tensorboard_dir = './RecoNet-Model/' + self.arch_name + '_tensorboard/'\n\n self.args.model = self.model_save_dir + 'MODEL_EPOCH.pth'\n\n def parse_args(self):\n model_names = 'recoNet_Model1'\n parser = argparse.ArgumentParser(description='PyTorch MD-CNN Training')\n # parser.add_argument('data', metavar='DIR',\n # help='path to dataset')\n parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18',\n choices=model_names,\n help='model architecture: ' +\n ' | '.join(model_names) +\n ' (default: resnet18)')\n parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\n parser.add_argument('--epochs', default=90, type=int, metavar='N',\n help='number of total epochs to run')\n parser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\n parser.add_argument('-b', '--batch-size', default=256, type=int,\n metavar='N', help='mini-batch size (default: 256)')\n parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,\n metavar='LR', help='initial learning rate')\n parser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\n parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\n parser.add_argument('--print-freq', '-p', default=10, type=int,\n metavar='N', help='print frequency (default: 10)')\n parser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\n parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\n parser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='use pre-trained model')\n parser.add_argument('--world-size', default=1, type=int,\n help='number of distributed processes')\n parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,\n help='url used to set Up distributed training')\n parser.add_argument('--dist-backend', default='gloo', type=str,\n help='distributed backend')\n parser.add_argument('--cpu', '-c', action='store_true',\n help='Do not use the cuda version of the model',\n default=False)\n parser.add_argument('--viz', '-v', action='store_true',\n help='Visualize the images as they are processed',\n default=False)\n parser.add_argument('--no-save', '-n', action='store_false',\n help='Do not save the output masks',\n default=False)\n parser.add_argument('--model', '-m', default='MODEL_EPOCH417.pth',\n metavar='FILE',\n help='Specify the file in which is stored the model'\n \" (default : 'MODEL.pth')\")\n self.args = parser.parse_args()\n","sub_path":"parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":7125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"49462990","text":"import requests\n\n\ndef test_methods_availability(base_url, auth_availability):\n endpoint, method, expected_status, description = auth_availability\n response = requests.request(method, f\"{base_url}/auth/{endpoint}\")\n\n assert response.status_code == int(expected_status), \\\n f\"Wrong status code on auth {endpoint} url for {method} method\"\n\n assert response.json().get(\"description\") == description\n","sub_path":"example/test_auth.py","file_name":"test_auth.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"91840890","text":"class Solution(object):\n def findComplement(self, num):\n \"\"\"\n :type num: int\n :rtype: int\n \"\"\"\n rt = 0\n exponent = 0\n while num != 0:\n remainder = num % 2\n num //= 2\n rt += (1 - remainder) * 2 ** exponent\n exponent += 1\n return rt\n","sub_path":"src/476_Number_Complement.py","file_name":"476_Number_Complement.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"10473622","text":"class RightTriangle:\n def __init__(self, hyp, leg_1, leg_2):\n self.c = hyp\n self.a = leg_1\n self.b = leg_2\n # calculate the area here\n self.area = round(self.a * self.b / 2, 1)\n\n\n# triangle from the input\ninput_c, input_a, input_b = [int(x) for x in input().split()]\n\n# write your code here\nprint(RightTriangle(input_c, input_a, input_b).area\n if input_c * input_c == input_a * input_a + input_b * input_b else 'Not right')\n","sub_path":"Topics/Class instances/Right triangle/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"256907808","text":"import cv2 \nimport numpy as np\nimport pandas as pd\nimport os\nimport pickle\n\ndef load_header_df():\n return pd.read_csv('data/full_df.csv')\n\ndef img_to_vector(fn, resize=None, imgdir='data/preprocessed_images'):\n '''\n Take input filename for preprocessed fundus image.\n Assumes resolution of 512 px by 512 px. \n Returns a 1 dimensional row vector\n\n Input - file name of 512x512 preprocessed image data\n imgdir - directory of preprocessed image data\n resize - None or tuple of integer dimensions (list like of len 2)\n '''\n #print(imgdir, fn)\n path = os.path.join(imgdir, fn)\n\n img = cv2.imread(path)\n\n if img is None: raise ValueError('Unable to open image file, Check the filename and directory')\n\n if resize is not None:\n #img = cv2.resize(img, resize)\n try:\n img = cv2.resize(img, resize)\n except:\n raise TypeError('Unable to resize, Ensure resize is a valid tuple of dimensions')\n try:\n img = img.reshape(1, resize[0]*resize[1]*3)\n except: \n raise ValueError('Unable to reshpae resized image.')\n else:\n try:\n img = img.reshape(1, 512*512*3)\n except:\n raise ValueError('Unable to reshape image. Ensure the image is 512 px by 512 px')\n\n return img\n\ndef header_to_img_matrix(df, resize=None, imgdir='data/preprocessed_images', save=None):\n '''\n Convert dataframe of image header data and converts into numpy array. \n Inputs - dataframe, directory of images, and optionaal reshape dimensions\n Outputs - numpy array of header data and numpy array of image data\n '''\n\n # Extract data from dataframe\n data = df.values\n data_cols = list(df.columns.values)\n # Make sure filename is passed\n if 'filename' not in data_cols: raise ValueError('Dataframe must contain a filename column.')\n \n # Iterate over each row of data an extract an image for each filename\n img_features = None\n for row in data:\n fn = row[-1]\n #print(fn)\n img = img_to_vector(fn, resize, imgdir)\n if img_features is None: img_features=img\n else: img_features=np.concatenate((img_features, img))\n\n img_features = np.array(img_features) \n\n if save is not None:\n pickle.dump((data, img_features), open(\"data/{}.pkl\".format(save), \"wb\"))\n\n\n return data, img_features\n","sub_path":"src/make_features.py","file_name":"make_features.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"20777517","text":"''' \nAashiq Adams, sick class\n'''\nfrom tkinter import *\nfrom tkinter import messagebox\n\n#Parent class of influenza and cancer classes\nclass sick: \n def __init__(self):\n #labels\n sickness_code = Label(root, text=\"Sickness Code:\").place(x=25,y=150,anchor=\"w\")\n treatment_duration = Label(root, text=\"Duration of Treatment:\").place(x=25,y=200,anchor=\"w\")\n duration_unit = Label(root, text=\"Weeks/Months\").place(x=390, y=188)\n doc_prac_num = Label(root, text=\"Doctor's Practice Number:\").place(x=25,y=250,anchor=\"w\")\n fee = Label(root, text=\"Scan/Consultation Fee:\").place(x=25,y=300,anchor=\"w\")\n amount_paid_label = Label(root, text=\"Amount paid for treatment:\").place(x=25,y=400)\n\n #entry boxes\n self.sick_id = Entry(root)\n self.duration = Entry(root, width=10)\n self.doc_id = Entry(root)\n self.scan_or_consult = Entry(root)\n\n self.sick_id.place(x=300, y=135)\n self.duration.place(x=300, y=185)\n self.doc_id.place(x=300, y=235)\n self.scan_or_consult.place(x=300, y=285)\n \n #radiobuttons\n self.v = IntVar()\n cancer_radio = Radiobutton(root, text=\"Cancer\", variable=self.v, value=1)\n influenza_radio = Radiobutton(root, text=\"Influenza\", variable=self.v, value=2)\n\n cancer_radio.place(x=20, y=330)\n influenza_radio.place(x=20, y=360)\n \n #calculate, clear and exit buttons\n calc_btn = Button(root, text=\"Calculate\",command=self.calculate)\n clear_btn = Button(root, text=\"Clear\",command=self.clear)\n exit_btn = Button(root, text=\"Exit\", command=root.destroy).place(x=425, y=450)\n\n calc_btn.place(x=25, y=450)\n clear_btn.place(x=225, y=450)\n\n def calculate(self): #This function is to redirect the calculation based on the radio button selected\n radio = self.v.get()\n if radio == 1:\n can = cancer(self.scan_or_consult.get())\n elif radio == 2:\n flu = influenza(self.scan_or_consult.get())\n\n def clear(self): #This function clears entry fields\n self.sick_id.delete(0, 'end')\n self.duration.delete(0, 'end')\n self.doc_id.delete(0, 'end')\n self.scan_or_consult.delete(0, 'end')\n\n#Child of sick class for cancer calculation and display\nclass cancer(sick): \n def __init__(self, scan):\n amount_paid_display = Label(root, text=\"\")\n amount_paid_display.place(x=225,y=400)\n medication = 400\n self.scan = scan\n if float(scan)>600:\n messagebox.showinfo(\"\", \"Sorry we cannot treat you\")\n else:\n amount_paid = float(scan) + medication\n amount_paid_display.config(text=\"R\"+str(round(amount_paid, 4)))\n\n#Child of sick class for influenza calculation and display\nclass influenza(sick): \n def __init__(self, consult):\n x = StringVar()\n amount_paid_display = Label(root, textvariable=x)\n amount_paid_display.place(x=225,y=400)\n medication = 350.50\n self.consult=consult\n consult=float(consult)\n if consult>600:\n consult = 0.98*consult\n amount_paid = float(consult) + medication\n x.set(\"R\"+str(round(amount_paid, 2))+\" \")\n else:\n amount_paid = float(consult) + medication\n x.set(\"R\"+str(round(amount_paid, 2))+\" \")\n\n#tkinter stuff\nroot=Tk()\nroot.title(\"Aashiq's Program\")\nroot.geometry(\"600x500\")\n\n#code for png logo\ntry:\n canvas = Canvas(root, width = 300, height = 100) \n canvas.place(x=0, y=0) \n logo = PhotoImage(file=\"sick.png\") \n canvas.create_image(10,10, anchor=NW, image=logo) \nexcept:\n canvas = Label(root, text=\"*logo file missing*\").place(x=20,y=55)\n\n#instantiating the program through the sick class\napp = sick()\n\nroot.mainloop()\n","sub_path":"oop+tkinter.py","file_name":"oop+tkinter.py","file_ext":"py","file_size_in_byte":3924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"88383564","text":"import argparse\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import Linear as Lin\nfrom torch_geometric.nn import XConv, fps, global_mean_pool,knn_interpolate\nfrom torch_geometric.profile import rename_profile_file\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--epochs', type=int, default=200)\nparser.add_argument('--batch_size', type=int, default=32)\nparser.add_argument('--lr', type=float, default=0.001)\nparser.add_argument('--lr_decay_factor', type=float, default=0.5)\nparser.add_argument('--lr_decay_step_size', type=int, default=50)\nparser.add_argument('--weight_decay', type=float, default=0)\nparser.add_argument('--inference', action='store_true')\nparser.add_argument('--profile', action='store_true')\nargs = parser.parse_args()\n\n\n\ndef down_sample_layer(x ,pose,batch,ratio = 0.375 ):\n idx = fps(pose, batch, ratio=ratio)\n x, pose, batch = x[idx], pose[idx], batch[idx]\n return x,pose,batch \n\n\n\nclass Net(torch.nn.Module):\n def __init__(self, num_classes):\n super().__init__()\n\n\n self.num_classes = num_classes\n\n forward_laye_down = [64,96,192,384]\n forward_laye_up = [384,192,96,96]\n hidden_down = [32,64,128,256]\n hidden_up = [256,128,64,64]\n kernel_size_down = [16,16,16,16]\n kernel_size_up = [16,16,16,16]\n self.Down_layers = nn.ModuleList()\n self.Up_layers = nn.ModuleList()\n\n self.down_sample = [1,0.375,0.375,1,1]\n\n prev = 0 \n for indx,layer in enumerate(forward_laye_down):\n self.Down_layers.append(XConv(prev,layer,kernel_size= kernel_size_down[0],hidden_channels = hidden_down[0]))\n if indx > 0 :\n prev = layer[indx-1]\n prev = 0 \n for indx,layer in enumerate(forward_laye_up):\n self.Up_layers.append(XConv(prev,layer,kernel_size= kernel_size_up[0],hidden_channels = hidden_up[0]))\n if indx > 0 :\n prev = layer[indx-1]\n\n \n # self.conv1 = XConv(0, 64, dim=3, kernel_size=16, hidden_channels=32)\n # self.conv2 = XConv(64, 96, dim=3, kernel_size=16, hidden_channels=64,dilation=2)\n # self.conv3 = XConv(96, 192, dim=3, kernel_size=16, hidden_channels=128,dilation=2)\n # self.conv4 = XConv(192, 384, dim=3, kernel_size=16,hidden_channels=256, dilation=2)\n # self.conv4_up = XConv(384 + 128 , 192 , dim=3, kernel_size=16,hidden_channels=320, dilation=2)\n # self.conv3_up = XConv(192 + 192 , 96 , dim=3, kernel_size=16,hidden_channels=256, dilation=2)\n # self.conv2_up = XConv(96 + 96 , 96 , dim=3, kernel_size=16,hidden_channels=125, dilation=2)\n # self.conv1_up = XConv(96 + 64 , 128 , dim=3, kernel_size=16,hidden_channels=120, dilation=2)\n \n\n self.lin1 = Lin(384, 256)\n self.lin2 = Lin(256, 128)\n\n self.down_sampler = down_sample_layer\n\n self.fc_lyaer = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=1, bias=False),\n nn.BatchNorm1d(128),\n nn.ReLU(True),\n nn.Dropout(0.5),\n nn.Conv1d(128, self.num_classes, kernel_size=1),\n )\n \n def forward(self, pos, batch):\n layer_features = []\n layer_pos = []\n layer_batch = []\n\n x1 = F.relu(self.conv1(None, pos, batch))\n layer_features.append(x1)\n layer_pos.append(pos)\n layer_batch.append(batch)\n\n x_loop = layer_features[0]\n pos_loop = layer_pos[0]\n batch_loop = layer_pos[0]\n\n for layer in range(1,len(self.Down_layers)):\n if self.down_sample[layer] != 1:\n x_loop,pos_loop,batch_loop = self.down_sampler(x_loop,pos_loop,batch_loop,ratio=self.down_sample[layer])\n x_loop = F.relu(self.Down_layers[layer])(x_loop, pos_loop, batch_loop)\n layer_features.append(x_loop)\n pos_loop.append(pos_loop)\n batch_loop.append(batch_loop)\n \n\n x_glob = global_mean_pool(layer_features[-1], batch_loop[-1])\n x_glob = F.relu(self.lin1(x_glob))\n x_glob = F.relu(self.lin2(x_glob))\n x_con_glob = x_glob[batch_loop[-1]]\n\n up_feature = torch.cat((x_con_glob,layer_features[-1]),1)\n\n\n for layer in range(len(self.Up_layers)):\n up_feature = torch.cat((up_feature,layer_features[-1-layer]),1)\n up_feature = F.relu(self.Up_layers[layer](up_feature, pos_loop[-1-layer], batch_loop[-1-layer]))\n \n if self.down_sample[-1-layer] != 1:\n up_feature = knn_interpolate(x = up_feature,pos_x=pos_loop[-1-layer],batch_x=batch_loop[-1-layer],k=3,pos_y=pos_loop[-2-layer],batch_y=batch_loop[-2-layer])\n\n out = torch.unsqueeze(up_feature.T, 0)\n out = self.fc_lyaer(out)\n # x1 = F.relu(self.conv1(None, pos, batch))\n # x2, pos1, batch1 = self.down_sample(x1, pos, batch)\n # x2 = F.relu(self.conv2(x2, pos1, batch1))\n # x3, pos2, batch2 = self.down_sample(x2, pos1, batch1)\n # x3 = F.relu(self.conv3(x3, pos2, batch2))\n # x4 = F.relu(self.conv4(x3, pos2, batch2))\n \n # x_glob = global_mean_pool(x4, batch2)\n # x_glob = F.relu(self.lin1(x_glob))\n # x_glob = F.relu(self.lin2(x_glob))\n \n # layer_up1 = torch.cat((x_con_glob,x4),1)\n # up4 = F.relu(self.conv4_up(layer_up1, pos2, batch2))\n # layer_up2 = torch.cat((up4,x3),1)\n # up3 = F.relu(self.conv3_up(layer_up2, pos2, batch2))\n # layer_up3 = torch.cat((knn_interpolate(x = up3,pos_x=pos2,batch_x=batch2,k=4,pos_y=pos1,batch_y=batch1),x2),1)\n\n # up2 = F.relu(self.conv2_up(layer_up3, pos1, batch1))\n # layer_up4 = torch.cat((knn_interpolate(x = up2,pos_x=pos1,batch_x=batch1,k=4,pos_y=pos,batch_y=batch),x1),1)\n # up1 = F.relu(self.conv1_up(layer_up4, pos, batch))\n \n\n # out = torch.unsqueeze(up1.T, 0)\n \n # out = self.fc_lyaer(out)\n \n # out_batch = torch.zeros(batch_size,point_number, self.num_classes)\n # out = out.squeeze(0).T\n # for b in range(batch_size):\n # out_batch[b,:,:] = out[batch == b]\n return out.squeeze(0)\n\n \n\n\n\n\n\n\n\n\n\n\n\n# print(\"runnig\")\n# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n# model = Net(num_classes=13).to(device)\n# pos = torch.load('/content/ss/tensor_pos.pt')\n# batch = torch.load('/content/ss/tensor_batch.pt')\n# print(pos.size())\n# print(batch.size())\n# model(pos,batch)\n# def get_n_params(model):\n# pp=0\n# for p in list(model.parameters()):\n# nn=1\n# # print(p)\n# for s in list(p.size()):\n# nn = nn*s\n# pp += nn\n# return pp\n# print(get_n_params(model))\n\n# torch.save(model.state_dict(), '/content/ss/model_state_dict.pt')\n","sub_path":"point_cnn.py","file_name":"point_cnn.py","file_ext":"py","file_size_in_byte":6821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"521112106","text":"import itertools\n\n\ndef power_set(sequence):\n for size in range(len(sequence) + 1):\n for item in itertools.combinations(sequence, size):\n yield item\n\n\nfor res in power_set('abc'):\n print(res)\n","sub_path":"Codes/Chapter_06_generator_corutines/generators/08_generating_from_generators_power_set.py","file_name":"08_generating_from_generators_power_set.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"139618887","text":"from util import *\nfrom Process import Process \nfrom buildindex import buildIndex\nfrom Evaluation import Evaluation\nfrom TFIDF import TFIDF\nfrom BM import BM\n# Input compatibility for Python 2 and Python 3\nif version_info.major == 3:\n pass\nelif version_info.major == 2:\n try:\n input = raw_input\n except NameError:\n pass\nelse:\n print (\"Unknown python version - input function not safe\")\n\n\nif __name__ == \"__main__\":\n\n # Create an argument parser\n parser = argparse.ArgumentParser(description='main.py')\n\n # Tunable parameters as external arguments\n parser.add_argument('-dataset', default = \"cranfield/\", \n help = \"Path to the dataset folder\")\n parser.add_argument('-out_folder', default = \"output/\", \n help = \"Path to output folder\")\n parser.add_argument('-segmenter', default = \"punkt\",\n help = \"Sentence Segmenter Type [naive|punkt]\")\n parser.add_argument('-tokenizer', default = \"ptb\",\n help = \"Tokenizer Type [naive|ptb]\")\n parser.add_argument('-custom', action = \"store_true\", \n help = \"Take custom query as input\")\n\n # Parse the input arguments\n args = parser.parse_args()\n print(args)\n labels=[]\n precisions=[]\n recalls=[]\n fscores=[]\n maps=[]\n ndcgs=[]\n\n # BM25\n ind=1\n pchoice=0\n bchoice=0\n lsachoice=0\n searchEngine=BM(args)\n p,r,f,m,n=searchEngine.evaluateDataset(pchoice,bchoice,lsachoice)\n precisions.append(p)\n recalls.append(r)\n fscores.append(f)\n maps.append(m)\n ndcgs.append(n)\n label=''\n if(ind==0):\n label=label+'TFIDF'\n else:\n label=label+'BM25'\n if(pchoice==1):\n label+='+Wordnet'\n if(bchoice==1):\n label+='+Bigram'\n if(lsachoice==1):\n label+='+LSA'\n labels.append(label)\n\n\n # BM+LSA\n ind=1\n pchoice=0\n bchoice=0\n lsachoice=1\n searchEngine=BM(args)\n p,r,f,m,n=searchEngine.evaluateDataset(pchoice,bchoice,lsachoice)\n precisions.append(p)\n recalls.append(r)\n fscores.append(f)\n maps.append(m)\n ndcgs.append(n)\n label=''\n if(ind==0):\n label=label+'TFIDF'\n else:\n label=label+'BM25'\n if(pchoice==1):\n label+='+Wordnet'\n if(bchoice==1):\n label+='+Bigram'\n if(lsachoice==1):\n label+='+LSA'\n labels.append(label)\n\n\n # BM+Wordnet\n ind=1\n pchoice=1\n bchoice=0\n lsachoice=0\n searchEngine=BM(args)\n p,r,f,m,n=searchEngine.evaluateDataset(pchoice,bchoice,lsachoice)\n precisions.append(p)\n recalls.append(r)\n fscores.append(f)\n maps.append(m)\n ndcgs.append(n)\n label=''\n if(ind==0):\n label=label+'TFIDF'\n else:\n label=label+'BM25'\n if(pchoice==1):\n label+='+Wordnet'\n if(bchoice==1):\n label+='+Bigram'\n if(lsachoice==1):\n label+='+LSA'\n labels.append(label)\n\n #BM+Bigram\n ind=1\n pchoice=0\n bchoice=1\n lsachoice=0\n searchEngine=BM(args)\n p,r,f,m,n=searchEngine.evaluateDataset(pchoice,bchoice,lsachoice)\n precisions.append(p)\n recalls.append(r)\n fscores.append(f)\n maps.append(m)\n ndcgs.append(n)\n label=''\n if(ind==0):\n label=label+'TFIDF'\n else:\n label=label+'BM25'\n if(pchoice==1):\n label+='+Wordnet'\n if(bchoice==1):\n label+='+Bigram'\n if(lsachoice==1):\n label+='+LSA'\n labels.append(label)\n\n\n if(args.custom==0):\n plt.figure(1)\n plt.subplots(1,5,figsize=(20,5))\n plt.subplot(1,5,1)\n plt.plot(range(1, 11), precisions[0], label=labels[0])\n plt.plot(range(1, 11), precisions[1], label=labels[1])\n plt.plot(range(1, 11), precisions[2], label=labels[2])\n plt.plot(range(1, 11), precisions[3], label=labels[3])\n plt.title(\"Precision\")\n plt.xlabel(\"k\")\n plt.legend()\n plt.grid(b=True, which='major', color='#666666', linestyle='-')\n plt.minorticks_on()\n plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)\n\n plt.subplot(1,5,2)\n plt.plot(range(1, 11), recalls[0], label=labels[0])\n plt.plot(range(1, 11), recalls[1], label=labels[1])\n plt.plot(range(1, 11), recalls[2], label=labels[2])\n plt.plot(range(1, 11), recalls[3], label=labels[3])\n plt.title(\"Recall\")\n plt.xlabel(\"k\")\n plt.legend()\n plt.grid(b=True, which='major', color='#666666', linestyle='-')\n plt.minorticks_on()\n plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)\n\n\n plt.subplot(1,5,3)\n plt.plot(range(1, 11), fscores[0], label=labels[0])\n plt.plot(range(1, 11), fscores[1], label=labels[1])\n plt.plot(range(1, 11), fscores[2], label=labels[2])\n plt.plot(range(1, 11), fscores[3], label=labels[3])\n plt.title(\"Fscore\")\n plt.xlabel(\"k\")\n plt.legend()\n plt.grid(b=True, which='major', color='#666666', linestyle='-')\n plt.minorticks_on()\n plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)\n\n\n plt.subplot(1,5,4)\n plt.plot(range(1, 11), maps[0], label=labels[0])\n plt.plot(range(1, 11), maps[1], label=labels[1])\n plt.plot(range(1, 11), maps[2], label=labels[2])\n plt.plot(range(1, 11), maps[3], label=labels[3])\n plt.title(\"MAP\")\n plt.xlabel(\"k\")\n plt.legend()\n plt.grid(b=True, which='major', color='#666666', linestyle='-')\n plt.minorticks_on()\n plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)\n\n\n plt.subplot(1,5,5)\n plt.plot(range(1, 11), ndcgs[0], label=labels[0])\n plt.plot(range(1, 11), ndcgs[1], label=labels[1])\n plt.plot(range(1, 11), ndcgs[2], label=labels[2])\n plt.plot(range(1, 11), ndcgs[3], label=labels[3])\n plt.title(\"nDCG\")\n plt.xlabel(\"k\")\n plt.legend()\n plt.grid(b=True, which='major', color='#666666', linestyle='-')\n plt.minorticks_on()\n plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)\n\n plt.savefig(args.out_folder + \"Experiment2.png\")\n\n\n \n if not os.path.exists('expt2csvfolder'):\n os.makedirs('expt2csvfolder')\n \n for j in range(4):\n filename='expt2csvfolder/'+labels[j]+\".csv\"\n with open(filename,'w',newline='') as file:\n writer=csv.writer(file)\n writer.writerow([\"Precision\",\"Recall\",\"F-Score\",\"MAP\",\"nDCG\"])\n for i in range(10):\n writer.writerow([precisions[j][i],recalls[j][i],fscores[j][i],maps[j][i],ndcgs[j][i]])\n","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":6735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"365867866","text":"from unittest.mock import patch\nfrom unittest.mock import call\nfrom mt_omzsh import step\nfrom mt_omzsh import config\nfrom mt_shared import mt_errors\n\n\n@patch('mt_omzsh.step.mt_subp')\n@patch('mt_omzsh.step._install_omzsh')\ndef test_mac_install(mock_inst_omzsh, mock_subprocess):\n\n step.mac()\n\n cmd = ['brew install zsh zsh-completions']\n mock_subprocess.run.assert_called_with(cmd)\n mock_inst_omzsh.assert_called_once()\n\n\n@patch('mt_omzsh.step.mt_subp')\n@patch('mt_omzsh.step._install_omzsh')\ndef test_deb_install(mock_inst_omzsh, mock_subp):\n\n step.deb()\n\n mock_inst_omzsh.assert_called_once()\n mock_subp.run.assert_called_with(([\n 'apt update',\n 'apt install zsh -y'\n ]))\n\n\n@patch('mt_omzsh.step.mt_subp')\n@patch('mt_omzsh.step._install_omzsh')\ndef test_install_mac(mock_inst, mock_subp):\n\n config = {'install': 'mac'}\n\n step.install(config, 'mac')\n\n mock_subp.run.assert_called_once()\n mock_inst.assert_called_once()\n\n\n@patch('mt_omzsh.step.mt_subp')\ndef test_install_other(mock_subp):\n\n config = {'install': 'mac'}\n\n try:\n step.install(config, 'mac')\n except mt_errors.InstallError as e:\n assert e.step == 'ZSH'\n assert e.message == 'Invalid installer option.'\n\n\n@patch('mt_omzsh.step.mt_subp')\n@patch('mt_omzsh.step._install_omzsh')\ndef test_install_deb(mock_inst_omzsh, mock_subp):\n\n config = {'install': 'deb'}\n\n step.install(config, 'deb')\n\n mock_subp.run.assert_called_once()\n mock_inst_omzsh.assert_called_once()\n\n\n@patch('urllib.request.urlretrieve')\n@patch('mt_omzsh.step.mt_subp')\ndef test_install_omzsh_downloads_installer(mock_subprocess, mock_urlretrieve):\n\n file_name = 'path/to/downloaded/install.sh'\n headers = {'headers': 'of the download'}\n\n mock_urlretrieve.return_value = (file_name, headers)\n\n step._install_omzsh()\n\n mock_urlretrieve.assert_called_with(config.OMZSH_URL)\n\n\n@patch('urllib.request.urlretrieve')\n@patch('mt_omzsh.step.mt_subp')\ndef test_install_execute_installer(mock_subprocess, mock_urlretrieve):\n\n file_name = 'path/to/downloaded/install.sh'\n headers = {'headers': 'of the download'}\n\n mock_subprocess.exe.return_value = '/usr/local/bin/zsh'\n mock_urlretrieve.return_value = (file_name, headers)\n\n step._install_omzsh()\n\n mock_subprocess.run.assert_called_with([\n 'sh {}'.format(file_name),\n 'chsh -s /usr/local/bin/zsh'\n ])\n\n\n@patch('mt_omzsh.step.mt_io.read_file')\n@patch('mt_omzsh.step.mt_io.replace_token')\n@patch('mt_omzsh.step.mt_io.write_file')\n@patch('mt_omzsh.step.mt_io.write_file')\ndef test_edit_zshrc(mock_e, mock_write, mock_replace_token, mock_file_reader):\n\n zshrc_file = 'the entire zshrc file'\n new_value = 'token replaced file'\n mock_file_reader.return_value = zshrc_file\n mock_replace_token.return_value = new_value\n\n step.edit_zshrc()\n\n calls = [\n call(*config.ZSHRC_TOKENS[0], zshrc_file),\n call(*config.ZSHRC_TOKENS[1], new_value),\n call(*config.ZSHRC_TOKENS[2], new_value)\n ]\n mock_replace_token.assert_has_calls(calls)\n\n\n@patch('mt_omzsh.step.mt_io.read_file')\n@patch('mt_omzsh.step.mt_io.replace_token')\n@patch('mt_omzsh.step.mt_io.write_file')\n@patch('mt_omzsh.step.mt_io.exists')\ndef test_edit_zshrc_writes(mock_e, mock_writer, mock_replace_token, mock_file_reader):\n\n zshrc_file = 'the entire zshrc file'\n new_value = 'token replaced file'\n mock_file_reader.return_value = zshrc_file\n mock_replace_token.return_value = new_value\n\n step.edit_zshrc()\n\n mock_writer.assert_called_with(config.ZSHRC, new_value)\n\n\n@patch('mt_omzsh.step.mt_io')\ndef test_edit_zshrc_without_zshrc_file(mock_io):\n\n mock_io.exists.return_value = False\n error = None\n\n try:\n step.edit_zshrc()\n except mt_errors.InstallError as e:\n error = e\n\n assert error.step == 'ZSH'\n assert error.message == 'Unable to find .zshrc, make sure om-zsh is installed.'\n\n\n@patch('mt_omzsh.step.urllib.request')\n@patch('mt_omzsh.step.mt_io.read_file')\n@patch('mt_omzsh.step.mt_io.write_file')\ndef test_config_env(mock_writer, mock_reader, mock_request):\n\n fn, headers = 'tmp/filename', {'field': 12}\n env_file = 'the content of the .env file'\n mock_request.urlretrieve.return_value = (fn, headers)\n mock_reader.return_value = env_file\n\n step.config_env()\n\n mock_reader.assert_called_with(fn)\n mock_request.urlretrieve.assert_called_with(config.ENV_URL)\n mock_writer.assert_called_with(config.ENV_PATH, env_file)\n\n\n\n","sub_path":"mt-omzsh/tests/test_omzsh_step.py","file_name":"test_omzsh_step.py","file_ext":"py","file_size_in_byte":4498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"79571036","text":"import re\n\n# -----------------------------------------------------------------------------\n\nIPPPSSOOT_RE = re.compile(r\"^[IJLOijlo][a-zA-Z0-9]{8,8}$\")\n\n# Note: only ACS, COS, STIS, and WFC3 are initially supported\nIPPPSSOOT_INSTR = {\n \"J\": \"acs\",\n \"U\": \"wfpc2\",\n \"V\": \"hsp\",\n \"W\": \"wfpc\",\n \"X\": \"foc\",\n \"Y\": \"fos\",\n \"Z\": \"hrs\",\n \"E\": \"eng\",\n \"F\": \"fgs\",\n \"I\": \"wfc3\",\n \"N\": \"nicmos\",\n \"O\": \"stis\",\n \"L\": \"cos\",\n}\n\nINSTRUMENTS = set(IPPPSSOOT_INSTR.values())\n\n\ndef get_instrument(ipppssoot):\n \"\"\"Given an `ipppssoot` ID, return the corresponding instrument name.\n\n Parameters\n ----------\n ipppssoot : str\n HST-style dataset name, 'i' character identifies instrument:\n J -- acs\n U -- wfpc2\n I -- wfc3\n O -- stis\n L -- cos\n\n Returns\n -------\n instrument : str\n Name of the instrument in lowercase corresponding to `ipppssoot`, e.g. 'acs'\n \"\"\"\n if ipppssoot.lower() in INSTRUMENTS:\n return ipppssoot.lower()\n else:\n return IPPPSSOOT_INSTR.get(ipppssoot.upper()[0])\n\n\n# -----------------------------------------------------------------------------\n\n\ndef get_output_path(output_uri, ipppssoot):\n \"\"\"Given an `output_uri` string which nominally defines an S3 bucket and\n directory base path, and an `ipppssoot` dataset name, generate a full\n S3 output path where outputs from processing `ipppssoot` should be stored.\n\n Parameters\n ----------\n output_uri : str\n A combination of S3 bucket and object directory prefix\n ipppssoot : str\n HST-style dataset name for which outputs will be stored.\n\n Returns\n -------\n full_s3_object_path : str\n A fully specified S3 object, including bucket, directory, and filename.\n\n >>> get_output_path(\"s3://temp/batch-2020-02-13T10:33:00\", \"IC0B02020\")\n 's3://temp/batch-2020-02-13T10:33:00/wfc3/IC0B02020'\n \"\"\"\n return output_uri + \"/\" + get_instrument(ipppssoot) + \"/\" + ipppssoot\n","sub_path":"calcloud/hst.py","file_name":"hst.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"436857206","text":"\"\"\"Access classess and identity for partitions. \n\nCopyright (c) 2013 Clarinova. This file is licensed under the terms of the\nRevised BSD License, included in this distribution as LICENSE.txt\n\"\"\"\n\nimport os\n\nfrom databundles.identity import PartitionIdentity\nfrom sqlalchemy.orm.exc import NoResultFound\n\n \nclass Partition(object):\n '''Represents a bundle partition, part of the bundle data broken out in \n time, space, or by table. '''\n \n def __init__(self, bundle, record):\n from databundles.database import PartitionDb\n \n self.bundle = bundle\n self.record = record\n \n self._db_class = PartitionDb\n self._database = None\n self._hd5file = None\n self._tempfile_cache = {}\n \n def init(self):\n '''Initialize the partition, loading in any SQL, etc. '''\n \n @property\n def name(self):\n return self.identity.name\n \n @property\n def identity(self):\n return self.record.identity\n \n def _path_parts(self):\n\n name_parts = self.bundle.identity.name_parts(self.bundle.identity)\n \n source = name_parts.pop(0)\n p = self.identity\n # HACK HACK HACK!\n # The table,space,time,grain order must match up with PartitionIdentity._path_str\n partition_path = [ str(i) for i in [p.table,p.space,p.time,p.grain, p.format] if i is not None]\n \n return source, name_parts, partition_path \n \n @property\n def path(self):\n '''Return a pathname for the partition, relative to the containing \n directory of the bundle. '''\n source, name_parts, partition_path = self._path_parts()\n\n return os.path.join(self.bundle.database.base_path, *partition_path )\n\n def sub_dir(self, *args):\n \"\"\"Return a subdirectory relative to the partition path\"\"\"\n return os.path.join(self.path,*args)\n\n @property\n def database(self):\n if self._database is None:\n \n \n source, name_parts, partition_path = self._path_parts() #@UnusedVariable\n\n self._database = self._db_class(self.bundle, self, base_path=self.path)\n \n def add_type(database):\n from databundles.bundle import BundleDbConfig\n config = BundleDbConfig(self.database)\n config.set_value('info','type','partition')\n \n self._database.add_post_create(add_type) \n \n return self._database\n\n def query(self,*args, **kwargs):\n \"\"\"Convience function for self.database.query()\"\"\"\n \n return self.database.query(*args, **kwargs)\n \n\n def tempfile(self, table=None, suffix=None,ignore_first=False):\n '''Return a tempfile object for this partition'''\n \n ckey = (table,suffix)\n\n tf = self._tempfile_cache.get(ckey, None) \n if tf:\n return tf\n else: \n if table is None and self.table:\n table = self.table;\n tf = self.database.tempfile(table, suffix=suffix, ignore_first=ignore_first)\n self._tempfile_cache[ckey] = tf\n return tf\n \n @property\n def hdf5file(self):\n from databundles.hdf5 import Hdf5File\n if self._hd5file is None:\n self._hd5file = Hdf5File(self)\n \n return self._hd5file\n\n @property\n def data(self):\n return self.record.data\n \n \n @property\n def table(self):\n '''Return the orm table for this partition, or None if\n no table is specified. \n '''\n \n table_spec = self.identity.table\n \n if table_spec is None:\n return None\n \n return self.bundle.schema.table(table_spec)\n \n def create_with_tables(self, tables=None, clean=False):\n '''Create, or re-create, the partition, possibly copying tables\n from the main bundle\n \n Args:\n tables. String or Array of Strings. Specifies the names of tables to \n copy from the main bundle. \n \n clean. If True, delete the database first. Defaults to true. \n \n '''\n\n if not tables: \n raise ValueError(\"'tables' cannot be empty\")\n\n if not isinstance(tables, (list, tuple)):\n tables = [tables]\n\n if clean:\n self.database.delete()\n\n self.database.create(copy_tables = False)\n\n self.add_tables(tables)\n\n def add_tables(self,tables):\n\n for t in tables:\n if not t in self.database.inspector.get_table_names():\n t_meta, table = self.bundle.schema.get_table_meta(t) #@UnusedVariable\n t_meta.create_all(bind=self.database.engine) \n\n def create(self):\n\n tables = self.data.get('tables',[])\n\n if tables:\n self.create_with_tables(tables=tables)\n else:\n self.database.create(copy_tables = False)\n\n\n @property\n def extents(self, where=None):\n '''Return the bounding box for the dataset. The partition must specify \n a table\n \n '''\n import geo.util\n return geo.util.extents(self.database,self.table.name, where=where)\n \n def inserter(self, table_or_name=None,**kwargs):\n \n if not self.database.exists():\n self.create()\n\n return self.database.inserter(table_or_name,**kwargs)\n\n def __repr__(self):\n return \"\".format(self.name)\n\n\nclass HdfPartition(Partition):\n '''A Partition that hosts a Spatialite for geographic data'''\n \n def __init__(self, bundle, record):\n super(HdfPartition, self).__init__(bundle, record)\n \n from .database import HdfDb\n\n self._db_class = HdfDb\n\n @property\n def database(self):\n from .database import HdfDb\n if self._database is None:\n self._database = HdfDb(self)\n \n return self._database\n\n\nclass GeoPartition(Partition):\n '''A Partition that hosts a Spatialite for geographic data'''\n \n def __init__(self, bundle, record):\n super(GeoPartition, self).__init__(bundle, record)\n from .database import GeoDb\n\n self._db_class = GeoDb\n\n def get_srs_wkt(self):\n \n #\n # !! Assumes only one layer!\n \n try:\n q =\"select srs_wkt from geometry_columns, spatial_ref_sys where spatial_ref_sys.srid == geometry_columns.srid;\"\n return self.database.query(q).first()[0]\n except:\n q =\"select srtext from geometry_columns, spatial_ref_sys where spatial_ref_sys.srid == geometry_columns.srid;\"\n return self.database.query(q).first()[0]\n\n def get_srs(self):\n import ogr \n \n srs = ogr.osr.SpatialReference()\n srs.ImportFromWkt(self.get_srs_wkt())\n return srs\n\n @property\n def srs(self):\n return self.get_srs()\n\n def get_transform(self, dest_srs=4326):\n \"\"\"Get an ogr transform object to convert from the SRS of this partition \n to another\"\"\"\n import ogr, osr\n \n \n srs2 = ogr.osr.SpatialReference()\n srs2.ImportFromEPSG(dest_srs) \n transform = osr.CoordinateTransformation(self.get_srs(), srs2)\n\n return transform\n\n def create(self, dest_srs=4326, source_srs=None):\n\n from databundles.geo.sfschema import TableShapefile\n \n tsf = TableShapefile(self.bundle, self._db_class.make_path(self), self.identity.table,\n dest_srs = dest_srs, source_srs = source_srs )\n \n tsf.close()\n \n self.add_tables(self.data.get('tables',None))\n\n def convert(self, table_name, progress_f=None):\n \"\"\"Convert a spatialite geopartition to a regular arg\n by extracting the geometry and re-projecting it to WGS84\n \n :param config: a `RunConfig` object\n :rtype: a `LibraryDb` object\n \n :param config: a `RunConfig` object\n :rtype: a `LibraryDb` object\n \n \"\"\"\n import subprocess, csv\n from databundles.orm import Column\n from databundles.dbexceptions import ConfigurationError\n\n #\n # Duplicate the geo arg table for the new arg\n # Then make the new arg\n #\n\n t = self.bundle.schema.add_table(table_name)\n \n ot = self.table\n \n for c in ot.columns:\n self.bundle.schema.add_column(t,c.name,datatype=c.datatype)\n \n \n #\n # Open a connection to spatialite and run the query to \n # extract CSV. \n #\n # It would be a lot more efficient to connect to the \n # Spatialite procss, attach the new database, the copt the \n # records in SQL. \n #\n \n try:\n subprocess.check_output('spatialite -version', shell=True)\n except:\n raise ConfigurationError('Did not find spatialite on path. Install spatialite')\n \n # Check the type of geometry:\n p = subprocess.Popen(('spatialite {file} \"select GeometryType(geometry) FROM {table} LIMIT 1;\"'\n .format(file=self.database.path,table = self.identity.table)), \n stdout = subprocess.PIPE, shell=True)\n \n out, _ = p.communicate()\n out = out.strip()\n \n if out == 'POINT':\n self.bundle.schema.add_column(t,'_db_lon',datatype=Column.DATATYPE_REAL)\n self.bundle.schema.add_column(t,'_db_lat',datatype=Column.DATATYPE_REAL)\n \n command_template = \"\"\"spatialite -csv -header {file} \"select *, \n X(Transform(geometry, 4326)) AS _db_lon, Y(Transform(geometry, 4326)) AS _db_lat \n FROM {table}\" \"\"\" \n else:\n self.bundle.schema.add_column(t,'_wkb',datatype=Column.DATATYPE_TEXT)\n \n command_template = \"\"\"spatialite -csv -header {file} \"select *, \n AsBinary(Transform(geometry, 4326)) AS _wkb\n FROM {table}\" \"\"\" \n\n self.bundle.database.commit()\n\n pid = self.identity\n pid.table = table_name\n arg = self.bundle.partitions.new_partition(pid)\n arg.create_with_tables()\n\n #\n # Now extract the data into a new database. \n #\n\n command = command_template.format(file=self.database.path,\n table = self.identity.table)\n\n \n self.bundle.log(\"Running: {}\".format(command))\n \n p = subprocess.Popen(command, stdout = subprocess.PIPE, shell=True)\n stdout, stderr = p.communicate()\n \n #\n # Finally we can copy the data. \n #\n \n reader = csv.reader(stdout.decode('ascii').splitlines())\n header = reader.next()\n \n if not progress_f:\n progress_f = lambda x: x\n \n with arg.database.inserter(table_name) as ins:\n for i, line in enumerate(reader):\n ins.insert(line)\n progress_f(i)\n\n\nclass Partitions(object):\n '''Continer and manager for the set of partitions. \n \n This object is always accessed from Bundle.partitions\"\"\n '''\n \n def __init__(self, bundle):\n self.bundle = bundle\n\n def partition(self, arg, db_type=None):\n '''Get a local partition object from either a Partion ORM object, or\n a partition name\n \n Arguments:\n arg -- a orm.Partition or Partition object. \n \n '''\n\n from databundles.orm import Partition as OrmPartition\n from databundles.identity import PartitionNumber, PartitionIdentity\n \n if isinstance(arg,OrmPartition):\n orm_partition = arg\n elif isinstance(arg, str):\n s = self.bundle.database.session \n orm_partition = s.query(OrmPartition).filter(OrmPartition.id_==arg ).one()\n elif isinstance(arg, PartitionNumber):\n s = self.bundle.database.session \n orm_partition = s.query(OrmPartition).filter(OrmPartition.id_==str(arg) ).one()\n elif isinstance(arg, PartitionIdentity): \n s = self.bundle.database.session \n orm_partition = s.query(OrmPartition).filter(OrmPartition.id_==str(arg.id_) ).one() \n else:\n raise ValueError(\"Arg must be a Partition or PartitionNumber\")\n\n if orm_partition.data.get('db_type', False):\n db_type = orm_partition.data.get('db_type')\n elif db_type:\n orm_partition.data['db_type'] = db_type\n s = self.bundle.database.session \n s.merge(orm_partition)\n s.commit()\n\n if db_type == 'geo':\n return GeoPartition(self.bundle, orm_partition)\n elif db_type == 'hdf':\n return HdfPartition(self.bundle, orm_partition)\n else:\n return Partition(self.bundle, orm_partition)\n\n @property\n def count(self):\n from databundles.orm import Partition as OrmPartition\n \n s = self.bundle.database.session\n return s.query(OrmPartition).count()\n \n @property \n def all(self): #@ReservedAssignment\n '''Return an iterator of all partitions'''\n from databundles.orm import Partition as OrmPartition\n import sqlalchemy.exc\n try:\n s = self.bundle.database.session \n return [self.partition(op) for op in s.query(OrmPartition).all()]\n except sqlalchemy.exc.OperationalError:\n return []\n \n \n def __iter__(self):\n return iter(self.all)\n\n \n @property\n def query(self):\n from databundles.orm import Partition as OrmPartition\n \n s = self.bundle.database.session\n \n return s.query(OrmPartition)\n \n \n def get(self, id_):\n '''Get a partition by the id number \n \n Arguments:\n id_ -- a partition id value\n \n Returns:\n A partitions.Partition object\n \n Throws:\n a Sqlalchemy exception if the partition either does not exist or\n is not unique\n ''' \n from databundles.orm import Partition as OrmPartition\n \n # This is needed to flush newly created partitions, I think ... \n self.bundle.database.session.close()\n \n if isinstance(id_, PartitionIdentity):\n id_ = id_.identity.id_\n \n \n q = (self.bundle.database.session\n .query(OrmPartition)\n .filter(OrmPartition.id_==str(id_).encode('ascii')))\n \n try:\n orm_partition = q.one()\n \n return self.partition(orm_partition)\n except NoResultFound:\n orm_partition = None\n \n if not orm_partition:\n q = (self.bundle.database.session\n .query(OrmPartition)\n .filter(OrmPartition.name==id_.encode('ascii')))\n \n try:\n orm_partition = q.one()\n \n return self.partition(orm_partition)\n except NoResultFound:\n orm_partition = None\n \n return orm_partition\n\n def find_table(self, table_name):\n '''Return the first partition that has the given table name'''\n \n for partition in self.all:\n if partition.table and partition.table.name == table_name:\n return partition\n \n return None\n\n def find(self, pid=None, **kwargs):\n '''Return a Partition object from the database based on a PartitionId.\n The object returned is immutable; changes are not persisted'''\n import sqlalchemy.orm.exc\n try:\n \n partitions = [ self.partition(op) for op in self.find_orm(pid, **kwargs).all()];\n \n if len(partitions) == 1:\n return partitions.pop()\n elif len(partitions) > 1 :\n from databundles.dbexceptions import ResultCountError\n \n rl = \"; \".join([p.identity.name for p in partitions])\n \n raise ResultCountError(\"Got too many results: {}\".format(rl)) \n else:\n return None\n \n except sqlalchemy.orm.exc.NoResultFound: \n return None\n \n \n def find_all(self, pid=None, **kwargs):\n '''Return a Partition object from the database based on a PartitionId.\n The object returned is immutable; changes are not persisted'''\n ops = self.find_orm(pid, **kwargs).all()\n \n return [ self.partition(op) for op in ops]\n\n def _pid_or_args_to_pid(self, bundle, pid, args):\n from databundles.identity import Identity, new_identity\n \n\n if isinstance(pid, Identity):\n return pid, None\n elif isinstance(pid,basestring):\n return None, pid # pid is actually the name\n elif args.get('name', False):\n return None, args.get('name', None)\n else:\n return new_identity(args, bundle=bundle), None\n\n \n def find_orm(self, pid=None, **kwargs):\n '''Return a Partition object from the database based on a PartitionId.\n An ORM object is returned, so changes can be persisted. '''\n import sqlalchemy.orm.exc\n\n pid, name = self._pid_or_args_to_pid(self.bundle, pid, kwargs)\n \n from databundles.orm import Partition as OrmPartition\n q = self.query\n \n if name is not None:\n q = q.filter(OrmPartition.name==name)\n else: \n if pid.time is not None:\n q = q.filter(OrmPartition.time==pid.time)\n \n if pid.space is not None:\n q = q.filter(OrmPartition.space==pid.space)\n \n if pid.grain is not None:\n q = q.filter(OrmPartition.grain==pid.grain)\n \n #if format is not None:\n # q = q.filter(OrmPartition.format==pid.format)\n \n if pid.table is not None:\n \n tr = self.bundle.schema.table(pid.table)\n \n if not tr:\n raise ValueError(\"Didn't find table named {} \".format(pid.table))\n \n q = q.filter(OrmPartition.t_id==tr.id_)\n\n return q\n \n \n def new_orm_partition(self, pid, **kwargs):\n '''Create a new ORM Partrition object, or return one if\n it already exists '''\n from databundles.orm import Partition as OrmPartition, Table\n \n s = self.bundle.database.session\n \n if pid.table:\n q =s.query(Table).filter( (Table.name==pid.table) | (Table.id_==pid.table) )\n table = q.one()\n else:\n table = None\n \n # 'tables' are additional tables that are part of the partion ,beyond the one in the identity\n # Probably a bad idea. \n tables = kwargs.get('tables',kwargs.get('table',pid.table if pid else None))\n \n if tables and not isinstance(tables, (list,tuple)):\n tables = [tables]\n \n if tables and pid and pid.table and pid.table not in tables:\n tables = list(tables)\n tables.append(pid.table)\n \n data=kwargs.get('data',{})\n \n data['tables'] = tables\n \n if kwargs.get('db_type'):\n data['db_type'] = kwargs.get('db_type')\n \n \n d = pid.to_dict()\n \n try: del d['table'] # OrmPartition requires t_id instead\n except: pass\n\n \n op = OrmPartition(\n t_id = table.id_ if table else None,\n d_id = self.bundle.identity.id_,\n data=data,\n state=kwargs.get('state',None),\n **d\n ) \n\n return op\n\n def clean(self):\n from databundles.orm import Partition as OrmPartition\n \n s = self.bundle.database.session\n s.query(OrmPartition).delete()\n \n def new_partition(self, pid=None, **kwargs):\n \n pid, _ = self._pid_or_args_to_pid(self.bundle, pid, kwargs)\n \n extant = self.find_orm(pid, **kwargs).all()\n \n for p in extant:\n if p.name == pid.name:\n return self.partition(p, db_type=kwargs.get('db_type',None))\n \n op = self.new_orm_partition(pid, **kwargs)\n s = self.bundle.database.session\n s.add(op) \n s.commit() \n \n p = self.partition(op, db_type=kwargs.get('db_type',None))\n return p\n\n def new_geo_partition(self, pid=None, **kwargs):\n \n if kwargs.get('shape_file'):\n return self._new_geo_partition_from_shape( pid, **kwargs)\n else:\n kwargs['db_type'] = 'geo'\n return self.new_partition(pid, **kwargs)\n \n def new_hdf_partition(self, pid=None, **kwargs):\n \n if pid:\n pid.format = 'hdf'\n \n kwargs['db_type'] = 'hdf'\n return self.new_partition(pid, **kwargs)\n \n def _new_geo_partition_from_shape(self, pid=None, **kwargs):\n \"\"\"Load a shape file into a partition as a spatialite database. \n \n Will also create a schema entry for the table speficified in the \n table parameter of the pid, using the fields from the table in the\n shapefile\n \"\"\"\n import subprocess\n from databundles.dbexceptions import ConfigurationError\n from databundles.geo.util import get_shapefile_geometry_types\n \n shape_file=kwargs.get('shape_file')\n \n t_srs=kwargs.get('t_srs')\n \n if t_srs:\n t_srs_opt = '-t_srs EPSG:{}'.format(t_srs)\n else:\n t_srs_opt = ''\n \n pid, name = self._pid_or_args_to_pid(self.bundle, pid, kwargs)\n \n try: extant = self.partitions.find(pid)\n except: extant = None # Fails with ValueError because table does not exist. \n \n if extant:\n raise Exception('Geo partition already exists for pid: {}'.format(pid.name))\n \n if shape_file.startswith('http'):\n shape_url = shape_file\n shape_file = self.bundle.filesystem.download_shapefile(shape_url)\n \n try:\n subprocess.check_output('ogr2ogr --help-general', shell=True)\n except:\n raise ConfigurationError('Did not find ogr2ogr on path. Install gdal/ogr')\n \n self.bundle.log(\"Checking types in file\")\n types, type = get_shapefile_geometry_types(shape_file)\n \n #ogr_create=\"ogr2ogr -explodecollections -skipfailures -f SQLite {output} -nlt {type} -nln \\\"{table}\\\" {input} -dsco SPATIALITE=yes\"\n \n ogr_create=\"ogr2ogr -progress -skipfailures -f SQLite {output} -gt 65536 {t_srs} -nlt {type} -nln \\\"{table}\\\" {input} -dsco SPATIALITE=yes\"\n \n if not pid.table:\n raise ValueError(\"Pid must have a table name\")\n \n table_name = pid.table\n \n t = self.bundle.schema.add_table(pid.table)\n self.bundle.database.commit()\n \n partition = self.new_partition(pid, db_type='geo')\n \n dir_ = os.path.dirname(partition.database.path)\n if not os.path.exists(dir_):\n self.bundle.log(\"Make dir: \"+dir_)\n os.makedirs(dir_)\n \n cmd = ogr_create.format(input = shape_file,\n output = partition.database.path,\n table = table_name,\n type = type,\n t_srs = t_srs_opt\n )\n \n self.bundle.log(\"Running: \"+ cmd)\n \n output = subprocess.check_output(cmd, shell=True)\n\n for row in partition.database.connection.execute(\"pragma table_info('{}')\".format(table_name)):\n self.bundle.schema.add_column(t,row[1],datatype = row[2].lower())\n\n return partition\n\n\n def find_or_new(self, pid=None, **kwargs):\n '''Find a partition identified by pid, and if it does not exist, create it. \n \n Args:\n pid A partition Identity\n tables String or array of tables to copy form the main partition\n '''\n \n try: partition = self.find(pid, **kwargs)\n except: partition = None\n \n if partition:\n return partition\n \n tables = kwargs.get('tables',kwargs.get('table',pid.table if pid else None))\n \n if tables and not isinstance(tables, (list,tuple)):\n tables = [tables]\n \n if tables and pid and pid.table and pid.table not in tables:\n tables.append(partition.identity.table)\n\n partition = self.new_partition(pid, **kwargs)\n \n if tables: \n partition.create_with_tables(tables) \n\n return partition;\n \n def find_or_new_geo(self, pid=None, **kwargs):\n '''Find a partition identified by pid, and if it does not exist, create it. \n \n Args:\n pid A partition Identity\n tables String or array of tables to copy form the main partition\n '''\n \n try: partition = self.find(pid, **kwargs)\n except: partition = None\n \n if partition:\n return partition\n\n partition = self.new_geo_partition(pid, **kwargs)\n\n return partition;\n \n def find_or_new_hdf(self, pid=None, **kwargs):\n '''Find a partition identified by pid, and if it does not exist, create it. \n \n Args:\n pid A partition Identity\n tables String or array of tables to copy form the main partition\n '''\n\n try: partition = self.find(pid, **kwargs)\n except: partition = None\n \n if partition:\n return partition\n\n partition = self.new_hdf_partition(pid, **kwargs)\n\n return partition;\n \n def delete(self, partition):\n from databundles.orm import Partition as OrmPartition\n\n q = (self.bundle.database.session\n .query(OrmPartition)\n .filter(OrmPartition.id_==partition.identity.id_))\n \n q.delete()\n\n","sub_path":"databundles/partition.py","file_name":"partition.py","file_ext":"py","file_size_in_byte":26474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"145204389","text":"\"\"\"\n-*- coding: utf-8 -*-\n@author: yangyd\n@file: 创建子进程并传递参数.py\n@time: 2019/10/16 0016 10:40\n\"\"\"\n\nfrom multiprocessing import Process\nfrom time import sleep\n\n\ndef run_test(name, **kwargs):\n print(f'子进程运行name为{name}')\n print(f'字典的值为:{kwargs}')\n\n\nif __name__ == '__main__':\n print('zhu')\n p = Process(target=run_test, args=(\"test\",), kwargs={\"age\": 12})\n p.start()\n","sub_path":"study_code/并发编程/创建子进程并传递参数.py","file_name":"创建子进程并传递参数.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"38141917","text":"try:\n from unittest2 import TestCase\nexcept ImportError:\n from unittest import TestCase\n\nfrom scheme import formats\n\nfrom mesh.address import *\nfrom mesh.constants import *\nfrom mesh.transport.base import *\n\nfrom tests.fixtures import *\n\nclass TestServer(TestCase):\n def test_construction(self):\n server = Server([ExampleBundle])\n\n self.assertEqual(server.bundles, {'examples': ExampleBundle})\n self.assertIsNone(server.default_format)\n self.assertIsNone(server.mediators)\n self.assertEqual(server.formats, {'json': formats.Json, 'application/json': formats.Json})\n\n server = Server([ExampleBundle], formats.Json, (formats.Json, formats.Yaml))\n\n self.assertIs(server.default_format, formats.Json)\n self.assertEqual(set(server.formats.keys()),\n set(['json', 'application/json', 'yaml', 'application/x-yaml']))\n\n def test_duplicate_bundle(self):\n with self.assertRaises(ValueError):\n Server([ExampleBundle, ExampleBundle])\n\n def test_invalid_bundle(self):\n with self.assertRaises(TypeError):\n Server([True])\n\nclass TestClient(TestCase):\n def test_construction(self):\n client = Client()\n\n self.assertEqual(client.context, {})\n self.assertIsNone(client.format, None)\n self.assertIsNone(client.name, None)\n\n def test_instantiation_with_bundle(self):\n client = Client(ExampleBundle)\n self.assertEqual(client.name, 'examples')\n\n def test_instantiation_with_specification(self):\n client = Client(ExampleBundle.specify())\n self.assertEqual(client.name, 'examples')\n\n def test_instantiation_with_description(self):\n client = Client(ExampleBundle.describe())\n self.assertEqual(client.name, 'examples')\n \n def test_client_registration(self):\n specification = ExampleBundle.specify()\n client = Client(specification)\n\n for arg in ('examples', specification):\n self.assertIsNone(Client.get_client(arg))\n\n returned = client.register()\n self.assertIs(returned, client)\n\n for arg in ('examples', specification):\n self.assertIs(Client.get_client(arg), client)\n\n returned = client.unregister()\n self.assertIs(returned, client)\n\n for arg in ('examples', specification):\n self.assertIsNone(Client.get_client(arg))\n\n returned = client.unregister()\n self.assertIs(returned, client)\n\n def test_get_endpoint(self):\n specification = ExampleBundle.specify()\n client = Client(specification)\n operation = client.get_endpoint('operation::/examples/1.0/example')\n\n self.assertIsInstance(operation, dict)\n self.assertEqual(set(operation.keys()),\n set(['address', 'method', 'specific', 'path', 'responses', 'name', 'schema']))\n","sub_path":"tests/test_transport.py","file_name":"test_transport.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"180690968","text":"# -*- coding: utf-8 -*-\r\n# \r\nfrom __future__ import division\r\nimport json\r\nfrom otree.db import models\r\nimport otree.models\r\nfrom otree import widgets\r\nfrom otree.common import Currency as c, currency_range\r\nimport random\r\nimport itertools\r\n# \r\n\r\nimport os\r\nfrom django.conf import settings\r\nfrom collections import defaultdict, Counter\r\n\r\nfrom section2.rrobin import round_robin_by_role\r\n\r\n\r\ndoc = \"\"\"\r\nFoo\r\n\"\"\"\r\n\r\n\r\nsource_code = \"\"\r\n\r\n\r\nbibliography = ()\r\n\r\n\r\nlinks = {}\r\n\r\n\r\nkeywords = ()\r\n\r\nclass Constants:\r\n name_in_url = 'section2'\r\n players_per_group = 2\r\n num_rounds = 16\r\n\r\n n_simple = u\"Negociación Simple\"\r\n n_simple_rounds = [1, 3, 5, 7, 9, 11, 13]\r\n proponente, respondente = \"Proponente\", \"Respondente\"\r\n\r\n n_simple_virtual = u\"Negociación Simple Virtual\"\r\n n_simple_virtual_rounds = [15]\r\n\r\n n_empresa_trabajador = u\"Negociación Empresa Trabajador\"\r\n n_empresa_trabajador_rounds = [2, 4, 6, 8, 10, 12, 14]\r\n empresa, trabajador = \"Empresa\", \"Trabajador\"\r\n\r\n n_empresa_trabajador_virtual = u\"Negociación Empresa Trabajador Virtual\"\r\n n_empresa_trabajador_virtual_rounds = [16]\r\n\r\n hombre_blanco, hombre_oscuro = \"hombre_blanco.jpg\", \"hombre_oscuro.jpg\"\r\n mujer_blanca, mujer_oscura = \"mujer_blanca.jpg\", \"mujer_oscura.jpg\"\r\n\r\n virtual_comb = {\r\n \"hombre_blanco_mujer_oscura\": [hombre_blanco, mujer_oscura],\r\n \"hombre_oscuro_mujer_blanca\": [hombre_oscuro, mujer_blanca],\r\n \"mujer_blanca_hombre_oscuro\": [mujer_blanca, hombre_oscuro],\r\n \"mujer_oscura_hombre_blanco\": [mujer_oscura, hombre_blanco],\r\n }\r\n\r\n initial_payoff = c(100)\r\n\r\n default_avatar = os.path.join(settings.BASE_DIR, \"_static\", \"global\", \"default_avatar.png\")\r\n default_avatar_b64 = \"data:image/{};base64,{}\".format(\r\n default_avatar.rsplit(\".\", 1)[1].lower(),\r\n open(default_avatar).read().encode(\"base64\")\r\n )\r\n\r\n finalizar_muestra = ([True] * 20 + [False] * 80)\r\n\r\n\r\nclass Subsession(otree.models.BaseSubsession):\r\n\r\n def before_session_starts(self):\r\n players = self.get_players()\r\n if self.round_number == 1:\r\n virtual_comb = list(Constants.virtual_comb.keys())\r\n random.shuffle(virtual_comb)\r\n repeat = itertools.cycle(virtual_comb)\r\n for p in players:\r\n p.virtual_oponent = next(repeat)\r\n p.id_in_group_1()\r\n else:\r\n for ply in players:\r\n in_round_1 = ply.in_round(1)\r\n ply.virtual_oponent = in_round_1.virtual_oponent\r\n\r\n selected = round_robin_by_role(self)\r\n self.set_groups(selected)\r\n\r\n\r\n def get_current_game(self):\r\n if self.round_number in Constants.n_simple_rounds:\r\n return Constants.n_simple\r\n elif self.round_number in Constants.n_simple_virtual_rounds:\r\n return Constants.n_simple_virtual\r\n elif self.round_number in Constants.n_empresa_trabajador_rounds:\r\n return Constants.n_empresa_trabajador\r\n return Constants.n_empresa_trabajador_virtual\r\n\r\n def show_avatar(self):\r\n return self.round_number > 8\r\n\r\n def tipo_oponente(self, player):\r\n current_game = self.get_current_game()\r\n role = player.role()\r\n if current_game == Constants.n_simple:\r\n return Constants.respondente if role == Constants.proponente else Constants.proponente\r\n elif current_game == Constants.n_empresa_trabajador:\r\n return Constants.trabajador if role == Constants.empresa else Constants.empresa\r\n\r\n def get_result_timeout(self):\r\n if self.round_number <= 3:\r\n return 30\r\n elif self.round_number <= 6:\r\n return 20\r\n return 15\r\n\r\n\r\nclass Group(otree.models.BaseGroup):\r\n\r\n # \r\n subsession = models.ForeignKey(Subsession)\r\n # \r\n\r\n n_empresa_trabajador_finalizacion_forzada = models.BooleanField(default=False)\r\n n_empresa_trabajador_fin_ciclo = models.BooleanField(default=False)\r\n\r\n def set_negociacion_simple_payoff(self):\r\n proponente = self.get_player_by_role(Constants.proponente)\r\n respondente = self.get_player_by_role(Constants.respondente)\r\n if respondente.n_simple_respuesta == \"Aceptar\":\r\n proponente.payoff = 200 - proponente.n_simple_propuesta\r\n respondente.payoff = proponente.n_simple_propuesta\r\n else:\r\n proponente.payoff = 0\r\n respondente.payoff = 0\r\n\r\n def set_negociacion_simple_virtual_payoff(self, proponente):\r\n proponente.payoff = 200 - proponente.n_simple_propuesta\r\n\r\n def set_negociacion_empresa_trabajador_payoff(self):\r\n empresa = self.get_player_by_role(Constants.empresa)\r\n trabajador = self.get_player_by_role(Constants.trabajador)\r\n if self.n_empresa_trabajador_finalizacion_forzada:\r\n empresa.payoff = c(50)\r\n trabajador.payoff = 0\r\n else:\r\n propuestas = empresa.all_propuestas()\r\n contrapropuestas = trabajador.all_contrapropuestas()\r\n X = c(\r\n contrapropuestas[-1]\r\n if len(propuestas) == len(contrapropuestas) else\r\n propuestas[-1])\r\n trabajador.payoff = X\r\n empresa.payoff = 200 - X\r\n\r\n def set_negociacion_empresa_trabajador_virtual_payoff(self, empresa):\r\n empresa.payoff = 200 - empresa.n_empresa_trabajador_propuesta\r\n\r\n def forzar_finalizacion_empresa_trabajador(self):\r\n finalizar = random.choice(Constants.finalizar_muestra)\r\n if finalizar:\r\n self.n_empresa_trabajador_finalizacion_forzada = True\r\n self.n_empresa_trabajador_fin_ciclo = True\r\n\r\n\r\nclass Player(otree.models.BasePlayer):\r\n\r\n # \r\n group = models.ForeignKey(Group, null=True)\r\n subsession = models.ForeignKey(Subsession)\r\n # \r\n\r\n player_name = models.CharField(max_length=255)\r\n avatar = models.CharField(max_length=255)\r\n\r\n virtual_oponent = models.CharField(max_length=255, choices=list(Constants.virtual_comb.keys()))\r\n\r\n n_simple_propuesta = models.CurrencyField(\r\n choices=range(0, 201), verbose_name=\"¿Cuánto le gustaría ofrecer?\", default=0)\r\n n_simple_respuesta = models.CharField(\r\n widget=widgets.RadioSelectHorizontal(),\r\n max_length=250, choices=[\"Aceptar\", \"Rechazar\"], default=\"Aceptar\")\r\n\r\n n_empresa_trabajador_propuesta = models.CurrencyField(\r\n verbose_name=\"¿Cuánto le gustaría ofrecer como salario?\", choices=range(0, 201), default=0)\r\n n_empresa_trabajador_propuestas = models.TextField(default=\"[]\")\r\n\r\n n_empresa_trabajador_respuesta = models.CharField(\r\n widget=widgets.RadioSelectHorizontal(),\r\n max_length=250, choices=[\"Aceptar\", \"Rechazar\"], default=\"Aceptar\")\r\n n_empresa_trabajador_contrapropuesta = models.CurrencyField(\r\n verbose_name=\"Contraoferta de salario:\", choices=range(0, 201), default=0)\r\n n_empresa_trabajador_contrapropuestas = models.TextField(default=\"[]\")\r\n\r\n n_empresa_trabajador_finalizacion_forzada = models.BooleanField(default=False)\r\n\r\n def all_propuestas(self):\r\n return json.loads(self.n_empresa_trabajador_propuestas)\r\n\r\n def add_propuesta(self, v):\r\n lista = self.all_propuestas()\r\n lista.append(int(v))\r\n self.n_empresa_trabajador_propuestas = json.dumps(lista)\r\n\r\n def all_contrapropuestas(self):\r\n return json.loads(self.n_empresa_trabajador_contrapropuestas)\r\n\r\n def add_contrapropuesta(self, v):\r\n lista = self.all_contrapropuestas()\r\n lista.append(int(v))\r\n self.n_empresa_trabajador_contrapropuestas = json.dumps(lista)\r\n\r\n def id_in_group_1(self):\r\n in_round_1 = self.in_round(1)\r\n return in_round_1.id_in_group\r\n\r\n def role(self):\r\n role_id = self.id_in_group_1()\r\n if self.subsession.get_current_game() == Constants.n_simple:\r\n if role_id == 1:\r\n return Constants.proponente\r\n elif role_id == 2:\r\n return Constants.respondente\r\n elif self.subsession.get_current_game() == Constants.n_empresa_trabajador:\r\n if role_id == 1:\r\n return Constants.empresa\r\n elif role_id == 2:\r\n return Constants.trabajador\r\n\r\n def avatarb64(self):\r\n if not self.avatar:\r\n return Constants.default_avatar_b64\r\n if not hasattr(self, \"_avatarb64\"):\r\n path = os.path.join(settings.BASE_DIR, \"participants_conf\", self.avatar)\r\n with open(path) as fp:\r\n self._avatarb64 = \"data:image/{};base64,{}\".format(\r\n self.avatar.rsplit(\".\", 1)[1].lower(),\r\n fp.read().encode(\"base64\")\r\n )\r\n return self._avatarb64\r\n","sub_path":"section2/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"432432672","text":"# -*- coding: utf-8 -*-\nimport datetime\nimport sys\nimport time\n\nfrom distributor import client\nfrom distributor import collector, conf\nimport schedule\n\nCHAN = '600340' # redis KEY\nINTERVAL = 3 # seconds\nDATA_FILE = sys.path[0] + '/conf/testdata.csv'\nCONFIG_FILE = sys.path[0] + '/conf/app.conf'\n\n\ndef job():\n \"\"\"\n pub 任务\n 获取距当前时间最近时间的行情\n \"\"\"\n now = datetime.datetime.now()\n print('pub job start at ' + now.strftime('%Y-%m-%d %H:%M:%S'))\n val = collector.collect(now.strftime('%H%M%S'), DATA_FILE)\n client.publish(CHAN, val)\n print('pub end')\n\n\nif __name__ == '__main__':\n # 定时调度任务\n conf.conf_redis()\n schedule.every(INTERVAL).seconds.do(job).run()\n while True:\n schedule.run_pending()\n time.sleep(1)\n","sub_path":"publisher.py","file_name":"publisher.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"460242243","text":"import six\n\nimport pytool\nfrom .util import eq_, ok_, raises, SkipTest\n\n\ndef cmp(a, b):\n # This is missing from Python 3\n return (a > b) - (a < b)\n\n\ndef test_list_proxy_instantiates_ok():\n a_list = []\n pytool.proxy.ListProxy(a_list)\n\n\ndef test_list_proxy_proxies_repr():\n c = [1, 2]\n p = pytool.proxy.ListProxy(c)\n eq_(repr(c), repr(p))\n\n\ndef test_list_proxy_proxies_comparisons():\n c = [1, 2]\n a = [1, 2]\n b = [3, 4]\n p = pytool.proxy.ListProxy(c)\n eq_(c == a, p == a)\n eq_(c == b, p == b)\n eq_(c <= a, p <= a)\n eq_(c <= b, p <= b)\n eq_(c >= a, p >= a)\n eq_(c >= b, p >= b)\n eq_(c < a, p < a)\n eq_(c < b, p < b)\n eq_(c > a, p > a)\n eq_(c > b, p > b)\n eq_(c != a, p != a)\n eq_(c != b, p != b)\n\n\ndef test_list_proxy_comparison_operator():\n if six.PY3:\n raise SkipTest('Python 2')\n c = [1, 2]\n a = [1, 2]\n b = [3, 4]\n p = pytool.proxy.ListProxy(c)\n eq_(cmp(c, a), cmp(p, a))\n eq_(cmp(c, b), cmp(p, b))\n eq_(cmp(c, 'foo'), cmp(p, 'foo'))\n eq_(cmp(p, a), p.__cmp__(a))\n\n\ndef test_list_proxy_comparison_operator_again():\n c = [1, 2]\n a = [1, 2]\n b = [3, 4]\n p = pytool.proxy.ListProxy(c)\n eq_(c, a)\n eq_(p, a)\n eq_(p, c)\n eq_(p, p)\n ok_(p == p)\n ok_(not p == b)\n ok_(not p != a)\n ok_(p < b)\n\n\ndef test_list_proxy_contains_operator():\n c = [1, 2]\n p = pytool.proxy.ListProxy(c)\n for i in range(4):\n eq_(i in c, i in p)\n\n\ndef test_list_proxy_length_operator():\n c = [1, 2]\n p = pytool.proxy.ListProxy(c)\n eq_(len(c), len(p))\n\n\ndef test_list_proxy_set_get_and_delete_items():\n c = [1, 2]\n p = pytool.proxy.ListProxy(c)\n eq_(c[0], p[0])\n eq_(c[1], p[1])\n p[0] = 3\n eq_(c, p)\n eq_(c[0], 3)\n del p[0]\n eq_(c, p)\n eq_(c[0], 2)\n\n\ndef test_list_proxy_slicing():\n c = [i for i in range(5)]\n p = pytool.proxy.ListProxy(c)\n s = p[1:3]\n eq_(s, [1, 2])\n ok_(isinstance(s, list))\n p[1:3] = [5, 10]\n eq_(p, c)\n eq_(c[1:3], [5, 10])\n p[1:3] = p\n eq_(p, c)\n p[1:3] = range(5)\n eq_(p, c)\n eq_(p[1:5], [0, 1, 2, 3])\n del p[1:]\n eq_(p, c)\n eq_(p, [0])\n\n\ndef test_list_proxy_addition():\n c = [1, 2]\n p = pytool.proxy.ListProxy(c)\n n = p + p\n eq_(n, [1, 2, 1, 2])\n ok_(isinstance(n, list))\n n = p + [3, 4]\n eq_(n, [1, 2, 3, 4])\n ok_(isinstance(n, list))\n n = p + range(2)\n eq_(n, [1, 2, 0, 1])\n ok_(isinstance(n, list))\n n = [3, 4] + p\n eq_(n, [3, 4, 1, 2])\n ok_(isinstance(n, list))\n n = range(2) + p\n eq_(n, [0, 1, 1, 2])\n ok_(isinstance(n, list))\n n = p.__radd__(p)\n eq_(n, [1, 2, 1, 2])\n ok_(isinstance(n, list))\n p += [3, 4]\n eq_(p, [1, 2, 3, 4])\n p += range(2)\n eq_(p, [1, 2, 3, 4, 0, 1])\n p += p\n eq_(p, [1, 2, 3, 4, 0, 1, 1, 2, 3, 4, 0, 1])\n eq_(c, p)\n\n\ndef test_list_proxy_as_json():\n c = pytool.proxy.ListProxy([])\n c.append('foo')\n eq_(pytool.json.as_json(c), '[\"foo\"]')\n\n\ndef test_list_proxy_multiplication():\n c = [1, 2]\n p = pytool.proxy.ListProxy(c)\n n = p * 2\n eq_(n, c * 2)\n eq_(n, [1, 2, 1, 2])\n p *= 2\n eq_(p, c)\n eq_(p, [1, 2, 1, 2])\n\n\ndef test_list_proxy_mutable_methods():\n c = [1, 2]\n p = pytool.proxy.ListProxy(c)\n p.append(3)\n eq_(c, p)\n eq_(p, [1, 2, 3])\n p.insert(0, 0)\n eq_(c, p)\n eq_(p, [0, 1, 2, 3])\n p.pop()\n eq_(c, p)\n eq_(p, [0, 1, 2])\n p.extend(p)\n eq_(c, p)\n eq_(p, [0, 1, 2, 0, 1, 2])\n p.extend([1, 2])\n eq_(c, p)\n eq_(p, [0, 1, 2, 0, 1, 2, 1, 2])\n\n\ndef test_dict_proxy_instantiates():\n d = {}\n p = pytool.proxy.DictProxy(d)\n eq_(d, p)\n\n\ndef test_dict_proxy_repr():\n d = {'one': 1, 'two': 2}\n p = pytool.proxy.DictProxy(d)\n eq_(repr(d), repr(p))\n\n\ndef test_dict_proxy_compare():\n if six.PY3:\n raise SkipTest(\"Python 2\")\n d = {'one': 1, 'two': 2}\n p = pytool.proxy.DictProxy(d)\n eq_(cmp(p, d), cmp(d, d))\n eq_(p.__cmp__(d), cmp(d, d))\n eq_(p.__cmp__(p), cmp(p, p))\n\n\ndef test_dict_proxy_compare_again():\n d = {'one': 1, 'two': 2}\n p = pytool.proxy.DictProxy(d)\n eq_(d, p)\n eq_(p, p)\n\n\ndef test_dict_proxy_get_set_del_item():\n d = {'one': 1}\n p = pytool.proxy.DictProxy(d)\n eq_(d['one'], p['one'])\n p['one'] = 1\n eq_(d['one'], p['one'])\n eq_(d['one'], 1)\n del p['one']\n eq_(d, p)\n eq_(d, {})\n eq_(p, {})\n\n\ndef test_dict_proxy_missing_handling():\n class MissingProxy(pytool.proxy.DictProxy):\n def __missing__(self, key):\n return pytool.lang.UNSET\n d = {'one': 1, 'two': 2}\n m = MissingProxy(d)\n eq_(m['three'], pytool.lang.UNSET)\n\n\ndef test_dict_proxy_clear():\n d = {'one': 1, 'two': 2}\n p = pytool.proxy.DictProxy(d)\n p.clear()\n eq_(d, {})\n eq_(p, {})\n\n\ndef test_dict_proxy_copy():\n d = {'one': 1, 'two': 2}\n p = pytool.proxy.DictProxy(d)\n c = p.copy()\n ok_(c is not d)\n eq_(d, p)\n eq_(d, c)\n\n class Subclass(pytool.proxy.DictProxy):\n pass\n\n p = Subclass(d)\n c = p.copy()\n ok_(c is not d)\n eq_(d, p)\n eq_(d, c)\n\n\ndef test_dict_proxy_update():\n d = {'one': 1, 'two': 2}\n u = {'three': 3}\n p = pytool.proxy.DictProxy(d)\n p.update(p)\n eq_(d, p)\n p.update(u)\n eq_(d, p)\n eq_(p, {'one': 1, 'two': 2, 'three': 3})\n p.update(None)\n p.update([('four', 4)])\n eq_(d, p)\n eq_(p, {'one': 1, 'two': 2, 'three': 3, 'four': 4})\n\n class Items(object):\n def items(self):\n return [('five', 5)]\n\n p.update(Items())\n eq_(d, p)\n eq_(p, {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5})\n p.update(six=6)\n eq_(d, p)\n eq_(p, {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6})\n\n\ndef test_dict_proxy_get():\n d = {}\n p = pytool.proxy.DictProxy(d)\n eq_(p.get('none'), None)\n eq_(p.get('none', 1), 1)\n eq_(p.setdefault('some', 'hun'), 'hun')\n eq_(p.get('some'), 'hun')\n\n\ndef test_dict_proxy_pop():\n d = {'one': 1}\n p = pytool.proxy.DictProxy(d)\n eq_(p.pop('one'), 1)\n eq_(p.pop('one', 2), 2)\n d['one'] = 1\n eq_(p.popitem(), ('one', 1))\n eq_(d, {})\n eq_(d, p)\n\n\ndef test_dict_proxy_iter():\n d = {'one': 1, 'two': 2}\n p = pytool.proxy.DictProxy(d)\n eq_(sorted(list(iter(p))), ['one', 'two'])\n\n\ndef test_dict_proxy_as_json():\n d = pytool.proxy.DictProxy({})\n d['foo'] = 'bar'\n eq_(pytool.json.as_json(d), '{\"foo\": \"bar\"}')\n\n\n@raises(KeyError)\ndef test_dict_proxy_raises_key_error():\n d = pytool.proxy.DictProxy({})\n d['foo']\n","sub_path":"test/test_proxy.py","file_name":"test_proxy.py","file_ext":"py","file_size_in_byte":6541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"325780945","text":"\"\"\"Adapted from a utility used in the Google Deep Learning Udacity\nCourse, available at https://www.udacity.com/course/deep-learning--ud730.\"\"\"\n\n\nimport os\nimport sys\nfrom six.moves.urllib.request import urlretrieve\n\nurl = 'https://commondatastorage.googleapis.com/books1000/'\nlast_percent_reported = None\ndata_root = '.'\n\n\ndef download_progress_hook(count, blocksize, totalSize):\n global last_percent_reported\n percent = int(count * blocksize * 100 / totalSize)\n\n if last_percent_reported != percent:\n if percent % 5 == 0:\n sys.stdout.write('{}%'.format(percent))\n sys.stdout.flush()\n else:\n sys.stdout.write('.')\n sys.stdout.flush()\n\n last_percent_reported = percent\n\ndef maybe_download(filename, expected_bytes, force=False):\n dest_filename = os.path.join(data_root, filename)\n if force or not os.path.exists(dest_filename):\n print('Attempting to download:', filename)\n filename, _ = urlretrieve(url + filename, dest_filename, reporthook=download_progress_hook)\n print('\\nDownload Complete!')\n statinfo = os.stat(dest_filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', dest_filename)\n else:\n raise Exception(\n 'Failed to verify ' + dest_filename + '. Can you get to it with a browser?')\n return dest_filename\n\n\n#train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)\ntest_filename = maybe_download('notMNIST_small.tar.gz', 8458043)\n","sub_path":"file_download.py","file_name":"file_download.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"292006565","text":"import turtle\nimport random\nfrom pygame import mixer # Load the required library\n\nmixer.init()\nmixer.music.load('hakouna')\nmixer.music.play()\n\n\n#Initialize lists\npos_list = []\nstamp_list = []\nfood_stamps = []\nfood_pos = []\n\nturtle.bgpic('rocki.gif')\nSIZE_X=1100\n\nSIZE_Y=1100\nturtle.setup(SIZE_X, SIZE_Y)\nmaze = turtle.clone()\n\n\nturtle.hideturtle()\nmaze.pensize(25)\nmaze.speed(300)\n\n\ndef new_stamp():\n maze_pos = maze.pos()\n pos_list.append(maze_pos)\n maze_stamp = maze.stamp()\n stamp_list.append(maze_stamp)\n \ndef move(x,y):\n while maze.xcor() != x:\n if maze.xcor()x:\n maze.goto((maze.xcor())-50,maze.ycor())\n new_stamp()\n while maze.ycor() != y:\n if maze.ycor()y:\n maze.goto((maze.xcor()),maze.ycor()-50)\n new_stamp()\n \ndef drawmaze():\n maze.penup()\n maze.goto(-500,100)\n maze.pendown()\n move(-500,500)\n move(100,500)\n move(100,300)\n maze.penup()\n maze.goto(100,150)\n maze.pendown()\n move(100,150)\n move(-100,150)\n move(-100,300)\n maze.penup()\n maze.goto(100,500)\n maze.pendown()\n move(500,500)\n move(500,300)\n move(300,300)\n move(500,300)\n move(500,0)\n move(300,0)\n move(500,0)\n move(500,-500)\n move(300,-500)\n move(300,-250)\n move(200,-250)\n move(200,-300)\n move(200,300)\n maze.penup()\n maze.goto(300,-500)\n maze.pendown()\n move(100,-500)\n maze.penup()\n maze.goto(100,-400)\n maze.pendown()\n move(100,0)\n move(-50,0)\n maze.penup()\n maze.goto(-200,0)\n maze.pendown()\n move(-200,-200)\n move(-100,-200)\n move(-100,-300)\n move(100,-300)\n move(100,-400)\n maze.penup()\n maze.goto(100,-500)\n maze.pendown()\n move(-200,-500)\n move(-200,-300)\n move(-300,-300)\n move(-300,-100)\n maze.penup()\n maze.goto(200,-500)\n maze.pendown()\n move(-500,-500)\n move(-500,100)\n move(-300,100)\n move(-300,300)\n move(-400,300)\n\ndrawmaze()\nturtle.showturtle()\nfood = turtle.clone()\nturtle.register_shape('chicken1.gif')\nfood.shape('chicken1.gif')\n\nturtle.hideturtle()\nfood.penup()\n\nturtle.showturtle()\n\n\nfood_stamps=[]\nfood_pos=[]\n\n\n\nfor this_food_pos in food_pos:\n food.goto(this_food_pos)\n food_stamp = food.stamp()\n food_stamps.append(food_stamp)\n\nfood.hideturtle()\n\n#this one\ndef make_food():\n min_x=-int(1000/2/50)+1\n max_x=int(1000/2/50)-1\n min_y=-int(1000/2/50)+1\n max_y=int(1000/2/50)-1\n\n food_x = random.randint(min_x,max_x)*50\n food_y = random.randint(min_y,max_y)*50\n food_xy=(food_x,food_y)\n if food_xy not in pos_list: \n food.goto(food_x,food_y)\n food_pos.append(food.pos())\n food_stamps.append(food.stamp())\n print('i added one food')\n\n\n\n\n\nscar = turtle.clone()\nturtle.register_shape(\"hunter1.gif\")\nscar.shape(\"hunter1.gif\")\n\n\n\n\n\nturtle.tracer()\n\nturtle.penup()\n#makes the turtle move more smoothly.\nsimba = turtle.clone()\n\n\nturtle.register_shape(\"simba2.gif\")\nsimba.shape(\"simba2.gif\")\nturtle.hideturtle()\nsimba.goto(400,-350)\nsimba.direction = 'Up'\n\n\nscore=0\n\n#that will make simba move using the arrows:\ndef move_simba():\n #global food_stamps\n my_pos = simba.pos()\n x_pos = my_pos[0]\n y_pos = my_pos[1]\n\n if simba.pos() in pos_list:\n print(\"you touched the wall\")\n simba.goto(450,-450)\n#if you press the up arrow simba will move forward\n elif simba.direction == 'Up' : \n simba.goto(x_pos , y_pos +50)\n print('you moved up')\n\n#if you press the down arrow simba will move down\n elif simba.direction == 'Down':\n simba.goto(x_pos, y_pos - 50)\n print('you moved down')\n\n elif simba.direction == 'Right':\n simba.goto(x_pos + 50, y_pos)\n print('you moved right')\n\n elif simba.direction == 'Left' :\n simba.goto(x_pos - 50,y_pos)\n print('you moved left')\n\n\n global score\n\n \n if simba.pos() in food_pos:\n food_index=food_pos.index(simba.pos())\n food.clearstamp(food_stamps[food_index])\n food_pos.pop(food_index)\n food_stamps.pop(food_index)\n print(\"You have eaten the food!\")\n turtle.undo()\n score=score+1\n turtle.penup()\n turtle.color('red')\n turtle.hideturtle()\n turtle.goto(-470,450)\n turtle.pendown()\n turtle.write('score : ' +str(score), False,align=\"left\", font=(\"Arial\",20,\"normal\"))\n\n\n \n#this one\n if len(food_stamps) < 1:\n make_food()\n \n \ndef up():\n simba.direction = 'Up'\n print('you pressed the up key!')\n move_simba()\nturtle.onkeypress(up, 'Up')\n\ndef down():\n simba.direction = 'Down'\n print('you pressed the down key!')\n move_simba()\nturtle.onkeypress(down, 'Down')\n\ndef right():\n simba.direction = 'Right'\n move_simba()\nturtle.onkeypress(right,'Right')\n\ndef left() :\n simba.direction = 'Left'\n move_simba()\nturtle.onkeypress(left,'Left')\n\nturtle.listen()\n\n\nsimba.speed(-2)\n \n\n \n\nmy_pos=simba.pos()\nscar_pos=scar.pos()\n\nx_pos1=scar.pos()[0]\ny_Pos1=scar.pos()[1]\n\nwhile 1==1:\n scar.penup()\n scar.speed(5)\n scar.goto((random.randint(-500,500)),(random.randint(-500,500)))\n\n\n\n\n\n\n\n\n\nprint(pos_List)\nmove_simba()\n\nturtle.mainloop()\n\n","sub_path":"lab_final-proj.py","file_name":"lab_final-proj.py","file_ext":"py","file_size_in_byte":5446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"205938759","text":"#\n# Plotting ROC curve\n# 60-473 Assignment 2 Q5\n#\n# For the RBF SVM classifier, plot the ROC curve and determine the\n# AUC for each dataset.\n#\n\nfrom sklearn import svm, datasets\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.model_selection import train_test_split\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\n# SVM Classifier using RBF\ndef svm_rbf(samples, labels):\n\n classifier = svm.SVC(kernel='rbf')\n\n # Use standard train/test split algorithm to obtain distinct sets\n X_train, X_test, y_train, y_test = train_test_split(samples, labels, test_size=.5, random_state=0)\n\n # Use classifier to fit model to training data, then score it with test data\n test_scores = classifier.fit(X_train, y_train).decision_function(X_test)\n\n # Find True and False Positive rate for ROC curve\n fpr, tpr, thresholds = roc_curve(y_test, test_scores, pos_label=2)\n\n # Calculate AUC\n roc_auc = auc(fpr, tpr)\n return fpr, tpr, roc_auc\n\n\n\n# Plotting function for ROC curve\ndef plot_roc(name, statistics):\n\n fpr, tpr, roc_auc = statistics\n\n plt.figure()\n\n # Plot ROC curve using FPR and TPR arrays\n # These arrays are of size n, where n is the number of samples\n plt.plot(fpr, tpr, color='green', lw=2,\\\n label='ROC curve for Positive class (AUC = %0.2f)' % roc_auc)\n\n plt.plot([0,1], [0,1], lw=2, linestyle='--')\n\n # Specify x and y axis range\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n\n # Plot labels\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('SVM RBF Classifier on ' + name)\n plt.legend(loc=\"lower right\")\n plt.show()\n\n\n\n# Return 5-tuple corresponding to the 5 calculated measures of efficiency\ndef calculate_efficiency(statistics):\n\n tn, fp, fn, tp = statistics\n\n # Score the training fit compared against the test samples\n return accuracy(tn, fp, fn, tp), sensitivity(tp, fn),\\\n specificity(tn, fp), ppv(tp, fp), npv(tn, fn)\n\n\n# Print all measures out nicely\ndef print_efficiency(acc, sens, spec, ppv, npv):\n\n print(\"Accuracy: \\t\", acc)\n print(\"Sensitivity:\\t\", sens)\n print(\"Specificity:\\t\", spec)\n print(\"PPV: \\t\", ppv)\n print(\"NPV: \\t\", npv)\n print()\n\n\n# Proportion of successful predictions\ndef accuracy(tn, fp, fn, tp):\n return ((tp+tn) / float(tp+fp+tn+fn))\n\n# Of all that should be classified as Positive, how many were?\ndef sensitivity(tp, fn):\n return (tp / float(tp+fn))\n\n# Of all that should be classified as Negative, how many were?\ndef specificity(tn, fp):\n return (tn / float(tn+fp))\n\n# Of all classified as Positive, how many were correctly classified?\ndef ppv(tp, fp):\n return (tp / float(tp+fp))\n\n# Of all classified as Negative, how many were correcly classified?\ndef npv(tn, fn):\n return (tn / float(tn+fn))\n\n\n\n\n# Read CSV and separate into samples and labels\ndef split_data(filename):\n\n # Read data into 2D array of samples eg. [[-1.9, 2.4, 1], [...], ...]\n data = pd.read_csv(filename, header=None).as_matrix()\n\n # Split input CSV into parts\n # s[0]-Empty , s[1]-2D array of sample data , s[2]-2D array of labels\n\n s = np.split(data, [0, 2, 3], axis=1)\n return s[1], np.reshape(s[2], np.size(s[2]))\n\n\n\ndef main():\n\n input_files = [\"twogaussians.csv\", \"halfkernel.csv\", \\\n \"twospirals.csv\", \"clusterincluster.csv\"]\n\n # Plot the ROC curve for the SVM RBF classifier on each dataset\n for filename in input_files:\n\n samples, labels = split_data(filename)\n statistics = svm_rbf(samples, labels)\n plot_roc(filename, statistics)\n\nmain()\n","sub_path":"Assignment2/question5.py","file_name":"question5.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"510160121","text":"# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport ast\nimport unittest\n\nfrom pants.engine.selectors import Get\n\n\nclass AClass:\n pass\n\n\nclass BClass:\n def __eq__(self, other):\n return type(self) == type(other)\n\n\nclass SubBClass(BClass):\n pass\n\n\nclass GetTest(unittest.TestCase):\n def test_create(self):\n # Test the equivalence of the 2-arg and 3-arg versions.\n self.assertEqual(Get(AClass, BClass()), Get(AClass, BClass, BClass()))\n\n with self.assertRaises(TypeError) as cm:\n Get(AClass, BClass)\n self.assertEqual(\n \"\"\"\\\nThe two-argument form of Get does not accept a type as its second argument.\n\nargs were: Get(({a!r}, {b!r}))\n\nGet.create_statically_for_rule_graph() should be used to generate a Get() for\nthe `input_gets` field of a rule. If you are using a `await Get(...)` in a rule\nand a type was intended, use the 3-argument version:\nGet({a!r}, {t!r}, {b!r})\n\"\"\".format(\n a=AClass, t=type(BClass), b=BClass\n ),\n str(cm.exception),\n )\n\n with self.assertRaises(ValueError) as cm:\n Get(1)\n self.assertEqual(\n \"Expected either two or three arguments to Get; got (1,).\", str(cm.exception)\n )\n\n def _get_call_node(self, input_string):\n return ast.parse(input_string).body[0].value\n\n def test_extract_constraints(self):\n parsed_two_arg_call = self._get_call_node(\"Get(A, B(x))\")\n self.assertEqual((\"A\", \"B\"), Get.extract_constraints(parsed_two_arg_call))\n\n with self.assertRaises(ValueError) as cm:\n Get.extract_constraints(self._get_call_node(\"Get(1, 2)\"))\n\n # The name of the type of a number literal AST node changed from \"Num\" to \"Constant\" between Python 3.6 and Python 3.8.\n # This will compute the correct name for either version of Python.\n n = ast.parse(\"1\").body[0].value\n num_ty = getattr(n, \"id\", type(n).__name__)\n\n msg = f\"Two arg form of Get expected (product_type, subject_type(subject)), but got: ({num_ty}, {num_ty})\"\n assert str(cm.exception) == msg\n\n parsed_three_arg_call = self._get_call_node(\"Get(A, B, C(x))\")\n self.assertEqual((\"A\", \"B\"), Get.extract_constraints(parsed_three_arg_call))\n\n with self.assertRaises(ValueError) as cm:\n Get.extract_constraints(self._get_call_node(\"Get(A, 'asdf', C(x))\"))\n\n # The name of the type of a string literal AST node also changed between Python 3.6 and Python 3.8.\n n = ast.parse(\"'test'\").body[0].value\n str_ty = getattr(n, \"id\", type(n).__name__)\n msg = f\"Three arg form of Get expected (product_type, subject_declared_type, subject), but got: (A, {str_ty}, Call)\"\n assert str(cm.exception) == msg\n\n def test_create_statically_for_rule_graph(self):\n self.assertEqual(\n Get(AClass, BClass, None), Get.create_statically_for_rule_graph(AClass, BClass)\n )\n","sub_path":"src/python/pants/engine/selectors_test.py","file_name":"selectors_test.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"121874934","text":"# -*-coding:UTF-8-*\nfrom tkinter import Entry\n\nfrom .getroot import root, dimx_\nfrom .operations import *\n\n\ndef saisie_2matrices(window, canvas):\n canvas.create_text(400, 20, text=\"n1,m1\", font=\"/font/myfont 10 bold\", fill=\"black\")\n canvas.create_text(400, 60, text=\"n2,m2\", font=\"/font/myfont 10 bold\", fill=\"black\")\n ligne1 = Entry(window, relief=\"groove\", width=3, bg=\"#eee\")\n col1 = Entry(window, relief=\"groove\", width=3, bg=\"#eee\")\n ligne2 = Entry(window, relief=\"groove\", width=3, bg=\"#eee\")\n col2 = Entry(window, relief=\"groove\", width=3, bg=\"#eee\")\n ligne1.pack()\n ligne2.pack()\n col1.pack()\n col2.pack()\n bouton_ok = Button(window, text=\"Ok\", relief=\"groove\", font=\"/font/myfont 6 bold\",\n command=lambda: affiche_saisie_2matrices(window, canvas, ligne1.get(),\n col1.get(), ligne2.get(), col2.get(),\n bouton_ok), bg=\"#eee\", fg=\"black\",\n activebackground=\"#dcc\", width=2)\n canvas.create_window(445, 20, window=ligne1)\n canvas.create_window(485, 20, window=col1)\n canvas.create_window(445, 60, window=ligne2)\n canvas.create_window(485, 60, window=col2)\n canvas.create_window(540, 40, window=bouton_ok)\n canvas.pack()\n\n\ndef saisie_1matrice(window, canvas, saiz):\n canvas.create_text(400, 20, text=\"n \", font=\"/font/myfont 10 bold\", fill=\"black\")\n ligne = Entry(window, relief=\"groove\", width=3, bg=\"#eee\")\n ligne.pack()\n\n bouton_ok = Button(window, text=\"Ok\", relief=\"groove\", font=\"/font/myfont 6 bold\",\n command=lambda: saiz(window, canvas, ligne.get(), bouton_ok),\n bg=\"#eee\", fg=\"black\", activebackground=\"#dcc\", width=2)\n canvas.create_window(425, 20, window=ligne)\n canvas.create_window(480, 20, window=bouton_ok)\n canvas.pack()\n\n\ndef affiche_saisie_1matrice(window, canvas, n, bouton):\n try:\n n = int(n)\n if n == 0:\n messagebox.showerror(\"Erreur\", \"Veuillez saisir un entier strictement positif\")\n return -1\n bouton.destroy()\n case = [[0] * n for _ in range(n)]\n for i in range(n):\n for j in range(n):\n case[i][j] = Entry(window, width=3, relief=\"groove\", font=\"/font/myfont 9\", bg=\"#eee\")\n canvas.create_window(70 + 35 * j, 100 + 35 * i, window=case[i][j])\n bouton_ok = Button(window, text=\"Ok\", bg=\"#eee\", font=\"/font/myfont 6 bold\", fg=\"black\",\n activebackground=\"#dcc\",\n width=2,\n relief=\"groove\", command=lambda: affiche_boutons_operations1matrice(canvas,\n case, n,\n bouton_ok))\n canvas.create_text(20, 100 + (35 * n / 2) - 20, text=\"A=\", font=\"/font/myfont 13\", fill=\"black\")\n canvas.create_window(50 + 35 * n + 30, 100 + (35 * n / 2) - 20, window=bouton_ok)\n except ValueError:\n messagebox.showerror(\"Erreur :-/\", \"Veuillez saisir un entier positif!\")\n\n\ndef affiche_saisie_2matrices(window, canvas, a, b, c, d, bouton):\n try:\n a = int(a)\n b = int(b)\n c = int(c)\n d = int(d)\n if (a, b, c, d) == (0, 0, 0, 0):\n messagebox.showerror(\"Erreur\", \"Veuillez saisir des entiers strictement positifs\")\n return -1\n bouton.destroy()\n case1 = [[0] * b for _ in range(a)]\n case2 = [[0] * d for _ in range(c)]\n for i in range(a):\n for j in range(b):\n case1[i][j] = Entry(window, width=3, relief=\"groove\", font=\"/font/myfont 8\", bg=\"#eee\")\n canvas.create_window(70 + 30 * j, 100 + 30 * i, window=case1[i][j])\n for k in range(c):\n for l in range(d):\n case2[k][l] = Entry(window, width=3, relief=\"groove\", font=\"/font/myfont 8\", bg=\"#eee\")\n canvas.create_window(dimx_ / 2 + 30 * l, 100 + 30 * k, window=case2[k][l])\n bouton_ok = Button(window, text=\"Ok\", relief=\"groove\",\n command=lambda: affiche_boutons_operation2matrices(canvas, case1, case2, a,\n b, c, d, bouton_ok),\n bg=\"#eee\", fg=\"black\", activebackground=\"#dcc\",\n font=\"/font/myfont 6 bold\", width=2)\n canvas.create_text(20, 100 + (30 * a / 2) - 20, text=\"A=\", font=\"/font/myfont 13\", fill=\"black\")\n canvas.create_text(370, 100 + (30 * a / 2) - 20, text=\"B=\", font=\"/font/myfont 13\", fill=\"black\")\n canvas.create_window(400 + 50 + 30 * a, 100 + (30 * a / 2) - 20, window=bouton_ok)\n except ValueError:\n messagebox.showerror(\"Erreur :-/\", \"Veuillez saisir des entiers positifs!\")\n\n\ndef affiche_saisiesyslin(window, canvas, n1, bouton):\n try:\n n = int(n1)\n bouton.destroy()\n case = [[0] * n for _ in range(n)]\n vecteur = [0] * n\n for i in range(n):\n for j in range(n):\n case[i][j] = Entry(window, width=3, relief=\"groove\", font=\"/font/myfont 9\", bg=\"#eee\")\n canvas.create_window(70 + 35 * j, 100 + 35 * i, window=case[i][j])\n for i in range(n):\n vecteur[i] = Entry(window, width=3, relief=\"groove\", font=\"/font/myfont 9\", bg=\"#eee\")\n canvas.create_window(200 + 35 * (n - 1), 100 + 35 * i, window=vecteur[i])\n bouton_ok = Button(window, text=\"Ok\", bg=\"#eee\", font=\"/font/myfont 6 bold\", fg=\"black\",\n activebackground=\"#dcc\",\n width=3,\n relief=\"groove\",\n command=lambda: affiche_bouton_operationssyslin(canvas, case, vecteur, n,\n bouton_ok))\n canvas.create_text(20, 100 + (35 * n / 2) - 20, text=\"A=\", font=\"/font/myfont 13\", fill=\"black\")\n canvas.create_text(180 + 35 * (n - 1) - 20, 100 + (35 * n / 2) - 20, text=\"b=\",\n font=\"/font/myfont 13\",\n fill=\"black\")\n canvas.create_window(250 + 35 * n + 30, 100 + (35 * n / 2) - 20, window=bouton_ok)\n except ValueError:\n messagebox.showerror(\"Error\", \"Veuillez saisir un entier positif!\")\n\n\ndef affiche_bouton_operationssyslin(canvas, mat, vect, n, bouton):\n try:\n for a in range(n):\n for b in range(n):\n float(mat[a][b].get())\n float(vect[a].get())\n except ValueError:\n messagebox.showerror(\"Erreur\", \"Veuillez saisir des reels!\")\n bouton.destroy()\n bouton_gauss = Button(root, command=lambda: operation_gauss(mat, vect, n),\n text=\"Methode de GAUSS\",\n relief=\"groove\", font=\"/font/myfont 15 bold\", bg=\"#eee\", fg=\"black\",\n activebackground=\"#dcc\")\n bouton_lu = Button(root, command=lambda: operation_lu(mat, vect, n),\n text=\"Factorisation LU\",\n relief=\"groove\", font=\"/font/myfont 15 bold\", bg=\"#eee\", fg=\"black\",\n activebackground=\"#dcc\")\n canvas.create_window(dimx_ / 4 + 100, 100 + 35 * n + 30, window=bouton_gauss)\n canvas.create_window(dimx_ / 4 + 400, 100 + 35 * n + 30, window=bouton_lu)\n\n\ndef affiche_boutons_operations1matrice(canvas, mat, n, bouton):\n try:\n for a in range(n):\n for b in range(n):\n float(mat[a][b].get())\n except ValueError:\n messagebox.showerror(\"Erreur\", \"Veuillez saisir des réels!\")\n return -1\n bouton.destroy()\n morse = Button(root, text=\"Stockage morse matrice\", command=lambda: operation_stockmorse(mat, n),\n relief=\"groove\",\n font=\"/font/myfont 11 bold\",\n bg=\"#eee\", fg=\"black\", activebackground=\"#dcc\")\n inverser = Button(root, text=\" Inverse d'une matrice\",\n command=lambda: operation_inverse(mat, n),\n relief=\"groove\",\n font=\"/font/myfont 11 bold\", bg=\"#eee\", fg=\"black\", activebackground=\"#dcc\")\n determinant = Button(root, text=\" Determinant matrice \", command=lambda: operation_determinant(\n mat, n), relief=\"groove\", font=\"/font/myfont 11 bold\",\n bg=\"#eee\", fg=\"black\", activebackground=\"#dcc\")\n trans = Button(root, text=\" Transposee matrice \",\n command=lambda: operation_transposee(mat, n), relief=\"groove\",\n font=\"/font/myfont 11 bold\",\n bg=\"#eee\", fg=\"black\", activebackground=\"#dcc\")\n bouton_valeurpropre = Button(root, command=\"\",\n text=\"Valeurs Propres matrice\",\n relief=\"groove\", font=\"/font/myfont 11 bold\", bg=\"#eee\", fg=\"black\",\n activebackground=\"#dcc\")\n canvas.create_window(dimx_ / 4 + 100, 100 + 35 * n + 30, window=morse)\n canvas.create_window(dimx_ / 4 + 100, 100 + 35 * n + 80, window=inverser)\n canvas.create_window(dimx_ / 4 + 350, 100 + 35 * n + 30, window=determinant)\n canvas.create_window(dimx_ / 4 + 350, 100 + 35 * n + 80, window=trans)\n canvas.create_window(dimx_ / 4 + 100, 100 + 35 * n + 130, window=bouton_valeurpropre)\n\n\ndef affiche_boutons_operation2matrices(canvas, mat1, mat2, a, b, c, d, bouton):\n try:\n for i in range(a):\n for j in range(b):\n float(mat1[i][j].get())\n for x in range(c):\n for y in range(d):\n float(mat2[x][y].get())\n except ValueError:\n messagebox.showerror(\"Erreur\", \"Veuillez saisir des réels!\")\n bouton.destroy()\n produit = Button(root, text=\"Produit\", command=lambda: operation_produit(\n mat1, mat2, a, b, c, d), relief=\"groove\",\n font=\"/font/myfont 12 bold\", bg=\"#eee\", fg=\"black\", activebackground=\"#dcc\")\n somme = Button(root, text=\"Somme\", command=lambda: operation_somme(mat1, mat2, a, b, c, d),\n relief=\"groove\", font=\"/font/myfont 12 bold\",\n bg=\"#eee\", fg=\"black\", activebackground=\"#dcc\")\n canvas.create_window(dimx_ / 4 + 50, 250 + 35 * a, window=somme)\n canvas.create_window(dimx_ / 4 + 50 + 250, 250 + 35 * a, window=produit)\n\n# M.TALL 2015-2016\n","sub_path":"old/src/interface/affiche_saisie.py","file_name":"affiche_saisie.py","file_ext":"py","file_size_in_byte":10557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"236382000","text":"#ecoding=utf-8\n__author__ = 'Administrator'\nimport pymongo\nimport datetime\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nfrom conf import *\nbase_path = \"/home/huolibi/data/\"\nfile_in_path2 = \"G:/data/new_fileau\"\nerror_path = base_path + \"error.data\"\n\n#file_in_linux_path = \"/home/huolibi/data/user_order_test\"\nfile_in_linux_path = base_path + \"new_fileaa\"\n\n\n\n\nfile_in = open(file_in_linux_path)\nerrpr_file = open(error_path, \"a\")\n\ncon_local=pymongo.Connection(host=\"localhost\", port=27017)\ndb = con_local.admin\ndb.authenticate(\"gtgj\",\"gtgj\")\ndb = con_local.gtgj\ndict_list = []\n\nfile_lines = len(file_in.readlines())\n# print(file_lines)\nfile_in = open(file_in_linux_path)\n# uid,p_info,account,order_date,i_status,depart_date,depart_name,arrive_name,ticket_count,train_no,amount,create_time\nfor (count, line) in enumerate(file_in):\n # print(count)\n stringArr = line.strip().split(\"\\t\")\n # dict_list = []\n # print(count)\n try:\n uid = stringArr[0]\n p_info = stringArr[1]\n account = stringArr[2]\n order_date = stringArr[3]\n s_order_date = order_date[0:4]+order_date[5:7]+order_date[8:10]\n i_order_date = int(s_order_date)\n i_status = stringArr[4]\n depart_date = stringArr[5]\n i_depart_date = int(depart_date[0:4]+depart_date[5:7]+depart_date[8:10])\n\n depart_name = stringArr[6]\n arrive_name = stringArr[7]\n depart_code, depart_city_code = getCode(depart_name.encode('gbk'))\n arrive_code, arrive_city_code = getCode(arrive_name.encode('gbk'))\n\n ticket_count = int(stringArr[8])\n train_no = stringArr[9]\n amount = float(stringArr[10])\n create_time = datetime.datetime.strptime(stringArr[11], \"%Y-%m-%d %H:%M:%S\")\n t = {\"uid\": uid, \"p_info\": p_info, \"account\": account, \"order_date\": i_order_date,\n \"i_status\": i_status, \"depart_date\": i_depart_date, \"depart_name\": depart_name, \"depart_code\":depart_code, \"depart_city_code\":depart_city_code,\n \"arrive_name\": arrive_name, \"arrive_code\":arrive_code, \"arrive_city_code\":arrive_city_code,\n \"ticket_count\": ticket_count, \"train_no\": train_no, \"amount\": amount, \"create_time\": create_time}\n dict_list.append(t)\n # conf.order.insert(t)\n except Exception as e:\n errpr_file.write(line)\n # print(Exception)\n # print(line)\n if count % 10000 == 0 or (count == (file_lines-1)):\n # print dict_list\n print(db.order.insert(dict_list))\n dict_list = []\n\n # print(uid,p_info,account, order_date)\n","sub_path":"gt_order_mongodb/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"181030481","text":"high_income = False\r\ngood_credits = True\r\nstudent = True\r\n# HIGH_INCOME AND GOOD_CREDIT ARE ALREADY BOOLEAN SO IN CONDITIONAL\r\n# STATEMENT YOU DON'T HAVE TO WRITE == TRUE (IT IS UNPROFESSIONAL)\r\n\r\n\r\nif high_income and good_credits and not student:\r\n print(\"Elligible\")\r\nelse:\r\n print(\"Not Elligible\")\r\n","sub_path":"Control Flow/Short circuit.py","file_name":"Short circuit.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"186155274","text":"# Copyright 2015 Hewlett-Packard Development Company, L.P.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\"\"\"Test Class for Common Operations.\"\"\"\n\nimport time\nimport unittest\n\nimport mock\n\nfrom proliantutils import exception\nfrom proliantutils.ilo import common\nfrom proliantutils.ilo import ribcl\nfrom proliantutils.tests.ilo import ribcl_sample_outputs as ribcl_output\n\n\nclass IloCommonModuleTestCase(unittest.TestCase):\n\n def setUp(self):\n super(IloCommonModuleTestCase, self).setUp()\n self.ribcl = ribcl.RIBCLOperations(\"x.x.x.x\", \"admin\", \"Admin\",\n 60, 443)\n\n @mock.patch.object(time, 'sleep', lambda x: None)\n @mock.patch.object(ribcl.RIBCLOperations, 'get_product_name')\n def test_wait_for_ilo_after_reset_ribcl_ok(self, name_mock):\n name_mock.return_value = ribcl_output.GET_PRODUCT_NAME\n common.wait_for_ilo_after_reset(self.ribcl)\n name_mock.assert_called_once_with()\n\n @mock.patch.object(time, 'sleep')\n @mock.patch.object(ribcl.RIBCLOperations, 'get_product_name')\n def test_wait_for_ilo_after_reset_retry(self, name_mock, sleep_mock):\n exc = exception.IloError('error')\n name_mock.side_effect = [exc, ribcl_output.GET_PRODUCT_NAME]\n common.wait_for_ilo_after_reset(self.ribcl)\n self.assertEqual(2, name_mock.call_count)\n name_mock.assert_called_with()\n\n @mock.patch.object(time, 'sleep')\n @mock.patch.object(ribcl.RIBCLOperations, 'get_product_name')\n def test_wait_for_ilo_after_reset_fail(self, name_mock, time_mock):\n exc = exception.IloError('error')\n name_mock.side_effect = exc\n self.assertRaises(exception.IloConnectionError,\n common.wait_for_ilo_after_reset,\n self.ribcl)\n self.assertEqual(common.RETRY_COUNT, name_mock.call_count)\n name_mock.assert_called_with()\n","sub_path":"proliantutils/tests/ilo/test_common.py","file_name":"test_common.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"512091180","text":"# 给定一个非负整数数组和一个整数 m,你需要将这个数组分成 m 个非空的连续子数组。设计一个算法使得这 m 个子数组各自和的最大值最小。\n\n# 注意:\n# 数组长度 n 满足以下条件:\n\n# 1 ≤ n ≤ 1000\n# 1 ≤ m ≤ min(50, n)\n# 示例:\n\n# 输入:\n# nums = [7,2,5,10,8]\n# m = 2\n\n# 输出:\n# 18\n\n# 解释:\n# 一共有四种方法将nums分割为2个子数组。\n# 其中最好的方式是将其分为[7,2,5] 和 [10,8],\n# 因为此时这两个子数组各自的和的最大值为18,在所有情况中最小。\n\n# 来源:力扣(LeetCode)\n# 链接:https://leetcode-cn.com/problems/split-array-largest-sum\n# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\n\nclass Solution:\n def splitArray(self, nums, m: int) -> int:\n low = max(nums)\n high = sum(nums)\n while(low < high):\n mid = (low + high) // 2\n cnt = 0\n sub_sum = 0\n for num in nums:\n sub_sum += num\n if(sub_sum > mid):\n cnt += 1\n sub_sum = num\n if(sub_sum > 0):\n cnt += 1\n if(cnt > m):\n low = mid + 1\n else:\n high = mid\n return low\n\n\nif __name__ == '__main__':\n solution = Solution()\n nums = [7, 8, 8, 1]\n print(solution.splitArray(nums, 3))\n","sub_path":"LeetCode/410. 分割数组的最大值.py","file_name":"410. 分割数组的最大值.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"323266250","text":"#!/usr/bin/env python\n\nimport redis\nfrom celery import Celery\nfrom flask import Flask\nfrom flask import render_template\nfrom flask_cors import CORS\n\nREDIS_HOST = 'localhost'\nREDIS_PORT = 6379\nKITTEN_COUNTER_METRIC = 'kitten_counter'\n\napp = Flask(__name__)\nCORS(app, supports_credentials=True)\napp.config['SECRET_KEY'] = 'top-secret!'\n\n# Celery configuration\napp.config['broker_url'] = 'redis://{}:{}/0'.format(REDIS_HOST, REDIS_PORT)\napp.config['result_backend'] = 'redis://{}:{}/0'.format(REDIS_HOST, REDIS_PORT)\n\n\n# Initialize Celery\ncelery = Celery(app.name, broker=app.config['broker_url'])\ncelery.conf.update(app.config)\n\n# Initialize Redis\nr = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=0)\nr.set(KITTEN_COUNTER_METRIC, 0)\n\n\n@celery.on_after_configure.connect\ndef setup_periodic_tasks(sender, **kwargs):\n # Calls count_kittens() every 1 seconds.\n sender.add_periodic_task(1.0, count_kittens.s(), name='Count a new kitten every second')\n\n\n@celery.task\ndef count_kittens():\n \"\"\"Background task to count kittens.\"\"\"\n with app.app_context():\n r.incr(KITTEN_COUNTER_METRIC, 1)\n return r.get(KITTEN_COUNTER_METRIC)\n\n\n@app.route('/', methods=['GET'])\ndef index():\n \"\"\"A very exciting landing page.\"\"\"\n return render_template('index.html')\n\n\n@app.route('/kittens', methods=['GET'])\n@app.route('/kittens/', methods=['GET'])\ndef view_kitten_count():\n \"\"\"View the kitten counter page.\"\"\"\n return render_template('kittens.html', kitten_count=r.get(KITTEN_COUNTER_METRIC))\n\n\n@app.route('/secrets', methods=['GET'])\n@app.route('/secrets/', methods=['GET'])\ndef view_secrets():\n \"\"\"View a super secret page that no one should see. Shhh.\"\"\"\n return render_template('secrets.html')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"kitten_counter/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"653543531","text":"import pandas as pd\nimport numpy as np\n\n\"\"\"\nThis assignment includes data i/o using python and a bit of data wrangling\nThis will be helpful for your final mini project.\nAll the useful datasets are present inside data folder\n\"\"\"\n\n#1\n# read csv_sample1.csv using read_csv function and store in df1\ndf1 = pd.read_csv('data/csv_sample1.csv')\n\n#2\n# read csv_sample1.csv using read_table function and store in df2\ndf2 = pd.read_table('data/csv_sample1.csv')\n\n#3\n# read csv_sample2.csv using read_csv without header and store in df3\ndf3 = pd.read_csv('data/csv_sample2.csv', header=None)\n\n\n#4\n\"\"\"\nread csv_sample2.csv in df4\nrename the columns as \"new_col1\", \"new_col2\", \"new_col3\", \"new_col4\" and \"new_value\"\nassign new_value column as index\n\"\"\"\ndf4 = pd.read_csv('data/csv_sample2.csv', index_col='new_value', names = [\"new_col1\", \"new_col2\", \"new_col3\", \"new_col4\", \"new_value\"])\n\n#5\n# read space_sep.txt using read_table and store in df5\ndf5 = pd.read_table('data/space_sep.txt')\n\n#6\n# read skip_row.csv using read_csv while skipping the rows 0, 2, 3 and 6\ndf6 = pd.read_csv('data/skip_row.csv', skiprows=[0,2,3,6])\n\n#7\n# read null_data.csv in df7. Drop the rows with null values. store the resulting data frame in df8\ndf7 = pd.read_csv('data/null_data.csv')\ndf8 = df7.dropna()\n\n#8\n# fill the null values in df7 using with 1000 and name the new data frame df8\n\ndf8 = df7.fillna(value=1000)\n\n#9\n# use forward to fill the null values in df7 data frame\n\ndf8 = df7.fillna(method='ffill')\n\n\n#10\ndf9 = pd.DataFrame({'col1':[1,2,np.nan,4,5], 'col2':[9,np.nan,7,6,5]})\n# fill the nan values of df9 with mean of column\ndf9.fillna(df9.mean())\n","sub_path":"part4/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"620349388","text":"# 引入必要的包\nimport csv\nimport os\nimport numpy as np\nimport pandas as pd\n\n# 指定数据集的路径\ndataset_path = \"../data\"\nreport_2015_datafile = os.path.join(dataset_path, \"2015.csv\")\nreport_2016_datafile = os.path.join(dataset_path, \"2016.csv\")\n#print(report_2015_datafile)\n#print(report_2016_datafile)\n\n# 读入数据\ndef load_data(data_file):\n \"\"\"\n 读取数据文件,加载数据。\n 返回的是,其中列表的每一个元素的元组(tuple),\n 包括Country , Region ,Happiness Rank , Happiness Score\n \"\"\"\n data = []\n with open(data_file,'r') as csvfile:\n data_reader = csv.DictReader(csvfile)\n for row in data_reader:\n \"\"\"\n 取出Country , Region ,Happiness Rank , Happiness Score\n \"\"\"\n data.append((row['Country'], row['Region'],row['Happiness Rank'], row['Happiness Score']))\n return data\n\nreport_2015_data = load_data(report_2015_datafile)\nreport_2016_data = load_data(report_2016_datafile)\n\n# 数据预览\nprint('2015年报告,前10条记录预览:')\nprint(report_2015_data[:10])\nprint('2016年报告,前10条记录预览:')\nprint(report_2016_data[:10])\n\n# 注意列表推导式的使用\nhappiness_2015_scores = [float(item[3]) for item in report_2015_data]\nhappiness_2016_scores = [float(item[3]) for item in report_2016_data]\n\n# 查看数据\nprint('2015年报告,前10条记录幸福指数:', happiness_2015_scores[:10])\nprint('2016年报告,前10条记录幸福指数:', happiness_2016_scores[:10])\nprint(\"===============================================\")\n\nhist_2015, hist_edge_2015 = np.histogram(happiness_2015_scores)\nhist_2016, hist_edge_2016 = np.histogram(happiness_2016_scores)\n\nprint('2015年报告,幸福指数直方图分布:{};直方图边界:{}。'.format(hist_2015, hist_edge_2015))\nprint('2016年报告,幸福指数直方图分布:{};直方图边界:{}。'.format(hist_2016, hist_edge_2016))\nprint(\"===============================================\")\n\n# 4.2 统计分析区域的幸福指数\ndef get_region_happiness_scores(report_data):\n \"\"\"\n 获取区域包含国家的幸福指数\n \"\"\"\n region_score_dict = {}\n for item in report_data:\n region = item[1] # 区域\n score = float(item[3])\n if region in region_score_dict:\n # 如果region_score_dict已经记录了该区域,则添加该区域的幸福指数到列表中\n region_score_dict[region].append(score)\n else:\n # 如果region_score_dict未记录该区域,则为该区域初始化一个空列表\n region_score_dict[region] = []\n\n return region_score_dict\n\nregion_2015_score_dict = get_region_happiness_scores(report_2015_data)\nregion_2016_score_dict = get_region_happiness_scores(report_2015_data)\n\n# 遍历数据字典,以2015年为例\nprint('2015报告:')\nfor region, scores in region_2015_score_dict.items():\n print('{}:最大值{},最小值{},平均值{},中间值{}'.format(region,\n np.max(scores), np.min(scores), np.mean(scores), np.median(scores)))\n\n\n# 将数据构建成字典,key是国家,value是其排名\n# 扩展:字典推导式\ncountry_2015_score_dict = {item[0] : int(item[2]) for item in report_2015_data}\ncountry_2016_score_dict = {item[0] : int(item[2]) for item in report_2016_data}\n\n# 2015年数据预览\nprint(country_2015_score_dict)\n\nser_2015 = pd.Series(country_2015_score_dict)\nser_2016 = pd.Series(country_2016_score_dict)\n\nprint('2015年,数据预览:')\nprint(ser_2015.head())\n\nprint('2016年,数据预览:')\nprint(ser_2016.head())\n\n# 将两年的记录相减,即得出排名变化情况\n# 注意Series在进行计算时,是按照index的顺序进行计算的,所以不需要担心顺序问题\n# NaN表示某一年的记录缺失,无法计算\nser_change = ser_2016 - ser_2015\nprint('2015-2016排名变化:')\nprint(ser_change)\n\n# 可查看某个国家的排名变化\nprint('中国的排名变化:')\nprint(ser_change['China'])\n\n# 查看上升最快的国家\nprint('2015-2016幸福指数上升最快的国家', ser_change.argmax())\n# 查看下降最快的国家\nprint('2015-2016幸福指数下降最快的国家', ser_change.argmin())\n","sub_path":"test/world_happiness_report.py","file_name":"world_happiness_report.py","file_ext":"py","file_size_in_byte":4222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"519286635","text":"import re, logging, os, math, functools\ninputs = __import__(\"inputs\")\n\nINPUT = inputs.get_program(2020, 8)\nEXAMPLE = inputs.get_program(2020, 8, True)\n\nclass GameConsole:\n def __init__(self, program, verbose=False, loops=1):\n self.instructions = {\n 'nop': None,\n 'acc': lambda i: self.accumulator(i),\n 'jmp': lambda a: self.move(offset=a)\n }\n\n self.verbose = verbose\n self.program = program\n self.acc = 0\n self.ip = 0\n self.loops = loops\n self.loop = 0\n self.seen = set()\n\n def accumulator(self, inc=1):\n if (self.verbose): print(\"Accumulate {} + {}\".format(self.acc, inc))\n self.acc += inc\n\n def move(self, address=None, offset=1):\n if address is not None:\n addr = address\n else:\n addr = self.ip + offset\n if (self.verbose): print(\"{} {} -> {}\".format(\"Jump\" if abs(self.ip - addr) > 1 else \"Move\", self.ip, addr))\n self.ip = addr\n\n def run(self):\n try:\n while True:\n op = self.program[self.ip][0]\n value = self.program[self.ip][1]\n\n # increment loop if we're back there and stop if hit number of loops\n if self.ip in self.seen:\n self.loop += 1\n self.seen.clear()\n if self.loop == self.loops: break\n\n # run the instruction the instruction pointer points to\n self.seen.add(self.ip)\n instruction = self.instructions[op]\n if instruction is not None: instruction(value)\n # yield self.ip, self.acc\n\n if op != \"jmp\":\n self.move()\n except IndexError:\n if self.verbose: print('Instruction overflow: {}!'.format(self.ip))\n # yield self.ip, self.acc\n\n except KeyError:\n if self.verbose: print('Unknown instruction: {}!'.format(self.program[self.ip]))\n # yield self.ip, self.acc\n\ng = GameConsole(EXAMPLE, True)\n\ndef part_one(program, verbose=False):\n gc = GameConsole(program, verbose=verbose, loops=1)\n # for _ in gc.run():\n # print(\"Computing...\")\n gc.run()\n\n return gc.acc\n\ndef part_two(program, verbose=False):\n gc = GameConsole(program, verbose=verbose, loops=1)\n acc = None\n\n # brute force change program until the console runs without looping\n for i, op in enumerate(program):\n if op == \"acc\":\n continue\n # replace jmp with nop and nop with jmp\n new_op = \"jmp\" if op == \"nop\" else \"nop\"\n # splice in the replaced operation, keeping int value\n new_prog = program[:i] + [(new_op, program[i][1])] + program[i+1:]\n # run it\n gc = GameConsole(new_prog, verbose=verbose, loops=1)\n gc.run()\n new_prog.clear()\n # did it run and exit?\n if gc.loop == 0:\n acc = gc.acc\n break\n\n return acc\n\n\nprint(\"Part one example: {}\".format(part_one(EXAMPLE, verbose=True)))\nprint(\"Part one: {}\".format(part_one(INPUT, verbose=False)))\nprint(\"Part two example: {}\".format(part_two(EXAMPLE)))\nprint(\"Part two: {}\".format(part_two(INPUT)))\n","sub_path":"python/day8.py","file_name":"day8.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"66118314","text":"from rest_framework import status\nfrom rest_framework.response import Response\n\nfrom ..models import Currency\nfrom ..serializers import CurrencySerializer\n\n\nclass CurrencyController:\n\n def create_new_currency(request):\n currency_data = request.data\n new_currency = Currency()\n try:\n name_upper = currency_data[\"name\"].upper()\n if CurrencyController.is_currency_exists(name_upper):\n return Response({'result': 'Currency already exixst'}, status=status.HTTP_400_BAD_REQUEST)\n else:\n new_currency.name = name_upper\n new_currency.exchange = currency_data[\"exchange\"]\n new_currency.fee_percentage = currency_data[\"fee_percentage\"]\n new_currency.quantity = currency_data[\"quantity\"]\n\n new_currency.save()\n serializer = CurrencySerializer(new_currency)\n except Exception as ex:\n return Response({'result': ex.args}, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response({'result': 'success', 'new_currency': serializer.data}, status=status.HTTP_201_CREATED)\n\n def list_or_read_currency(request, name):\n if name:\n name_upper = name.upper()\n try:\n queryset = Currency.objects.get(name=name_upper)\n except Currency.DoesNotExist:\n return Response({'result': 'we couldn’t find the currency'}, status=status.HTTP_404_NOT_FOUND)\n serializer = CurrencySerializer(queryset)\n return Response(serializer.data)\n else:\n queryset = Currency.objects.all()\n serializer = CurrencySerializer(queryset, many=True)\n return Response(serializer.data)\n\n def is_currency_exists(name):\n try:\n Currency.objects.get(name=name)\n except Currency.DoesNotExist:\n return False\n return True\n","sub_path":"app/core/controllers/CurrencyController.py","file_name":"CurrencyController.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"13890398","text":"# -*- coding:utf-8 -*-\n\nimport requests\nfrom requests.exceptions import RequestException\nimport re\nimport json\nfrom multiprocessing import Pool\nimport sys\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\ndef get_one_page(url):\n try:\n headers = {'Cookie': 'uuid=1A6E888B4A4B29B16FBA1299108DBE9CA19BF5FFE6115FF5DB33C57435722EFC; __mta=252493494.1508603211173.1508603308257.1508603314017.11; _lxsdk_s=727305baebd98f615050b97d4a5a%7C%7C26',\n 'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}\n response = requests.get(url,headers=headers)\n if response.status_code == 200:\n return response.text\n return None\n except RequestException:\n print('请求异常')\n return None\n\ndef parse_one_page(html):\n pattern = re.compile('.*?board-index.*?>(\\d*).*? data-src=\"(.*?)\".*?name\">(.*?).*?star\">(.*?)
.*?\"releasetime\">(.*?).*?'\n +'\"integer\">(.*?).*?\"fraction\">(.*?).*?',re.S)\n items = re.findall(pattern,html)\n for item in items:\n yield {\n 'index':item[0],\n 'image':item[1],\n 'title':item[2].decode('utf-8'),\n 'actor':item[3],\n 'time':item[4].strip()[5:],\n 'score':item[5]+item[6]\n }\n\ndef save_to_file(content):\n with open('result.txt','a') as f:\n f.write(json.dumps(content,ensure_ascii=False)+'\\n')\n f.close()\n\n\ndef main(offset):\n url = 'http://maoyan.com/board/4?offset='+str(offset)\n html = get_one_page(url)\n for item in parse_one_page(html):\n save_to_file(item)\n\nif __name__ == '__main__':\n\n pool = Pool()\n pool.map(main,[i * 10 for i in range(10)])\n\n","sub_path":"maoyan/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"589974191","text":"'''\r\nRepräsentiert die Zinsrechentabelle.\r\n\r\n@author: Kamfor\r\n'''\r\nfrom enum import Enum\r\nfrom util.util import util as ut\r\n\r\nclass ZDATA():\r\n\r\n NAME = 'zdata'\r\n\r\n HIDE_COLUMNS = ['TFID']\r\n\r\n COLUMN_ORDER = [\r\n 'FORMELID ASC'\r\n ]\r\n\r\n # important data with their column names\r\n class COLUMN(Enum):\r\n DATUM='Datum'\r\n RESTSCHULD='Restschuld'\r\n ZKOSTEN='Zinskosten'\r\n\r\n\r\n COLUMNS = [\r\n 'Datum', 'Restschuld', 'Zinskosten'\r\n ]\r\n\r\n DATECOLUMNS = [\r\n 'Datum'\r\n ]\r\n\r\n FLOATCOLUMNS = [\r\n 'Restschuld', 'Zinskosten'\r\n ]\r\n\r\n COLUMNS_AND_TYPES = \"'Datum' DATE, 'Restschuld' REAL, 'Zinskosten' REAL\"\r\n\r\n\r\n\r\n def __init__(self, dictionary):\r\n\r\n self.finSumme = 0\r\n self.rate = 0\r\n self.effJZ = 0\r\n self.effJZZ = 0\r\n self.sollZB = 0\r\n self.beginn = 0\r\n\r\n for name, val in dictionary.items():\r\n if name == \"FinSumme\" and len(str(val)) > 0:\r\n setattr(self, 'finSumme', ut.str_to_int(val))\r\n if name == \"Rate\" and len(str(val)) > 0:\r\n setattr(self, 'rate', ut.str_to_int(val))\r\n if name == \"EffJZ\" and len(str(val)) > 0:\r\n setattr(self, 'effJZ', ut.str_to_float(val))\r\n if name == \"EffJZZ\" and len(str(val)) > 0:\r\n setattr(self, 'effJZZ', ut.str_to_float(val))\r\n if name == \"SollZB\" and len(str(val)) > 0:\r\n setattr(self, 'sollZB', ut.str_to_int(val))\r\n if name == \"Beginn\" and len(str(val)) > 0:\r\n setattr(self, 'beginn', ut.dat_konv(val))\r\n\r\n def print_para(self):\r\n out = [[\"param\" , \"wert\"],\\\r\n [\"------\", \"----------\"],\\\r\n [\"total\" , self.finSumme],\\\r\n [\"rat\" , self.rate],\\\r\n [\"effJZ\" , self.effJZ],\\\r\n [\"effJZZ\" , self.effJZZ],\\\r\n [\"sollZB\", self.sollZB],\\\r\n [\"beginn\", self.beginn],\\\r\n [\"----------------------\",\"\"]]\r\n ut.view_2d_list(out)\r\n\r\n def status_ok(self):\r\n try:\r\n if self.finSumme > 0 \\\r\n and self.rate > 0 \\\r\n and self.effJZ > 0 \\\r\n and self.effJZZ > 0 \\\r\n and self.sollZB > 0:\r\n return True\r\n else:\r\n return False\r\n except:\r\n return False\r\n\r\n def forward_projection(self):\r\n\r\n out = []\r\n rest = self.finSumme\r\n kosten = 0.0\r\n maxDauer = 12*70 + 1\r\n tmpZins = self.effJZ\r\n\r\n if self.finSumme * (self.effJZ/12) / 100 > self.rate:\r\n print(\"Rate ist zu klein\")\r\n return\r\n\r\n for step in range(0, maxDauer):\r\n tmpDate = ut.add_month(self.beginn, step)\r\n out += [(tmpDate, rest, kosten)]\r\n\r\n # Zinanpassung nach Sollzinsbindungsdauer\r\n # -2, weil step bei Null anfängt und weil hier für den Folgemonat gerechnet wird\r\n if step > 12 * self.sollZB - 2:\r\n tmpZins = self.effJZZ\r\n#############################################\r\n# Hier ist des Pudels Kern\r\n#############################################\r\n kostenMonat = rest * (tmpZins/12) / 100\r\n kosten += kostenMonat\r\n rest += kostenMonat - self.rate\r\n if rest < 0:\r\n return out\r\n#############################################\r\n#############################################\r\n return out\r\n\r\n def eval_cost(self, val):\r\n dauerSZB = 12 * self.sollZB\r\n dauerTotal = len(val)\r\n\r\n restSchuld = 0\r\n kostenSZB = 0\r\n kostenTotal = val[-1][1]\r\n\r\n if dauerTotal >= dauerSZB:\r\n kostenSZB = val[dauerSZB][1]\r\n restSchuld = val[dauerSZB][0]\r\n else:\r\n kostenSZB = kostenTotal\r\n restSchuld = 0\r\n\r\n return (restSchuld, kostenSZB, kostenTotal, dauerTotal)\r\n","sub_path":"zinsrechner/model/tables/zinsdaten.py","file_name":"zinsdaten.py","file_ext":"py","file_size_in_byte":4047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"235282962","text":"from math import sqrt\nfrom random import random\n\n\ndef point():\n \"\"\"\n Genera un punto aleatorio en el cuadrado comprendido entre los puntos\n (-1, 1), (1,1), (1,-1), (-1,-1) y luego chequea si el punto cayó dentro del\n círculo unitario.\n \"\"\"\n x, y = 2 * random() - 1, 2 * random() - 1\n return(int(x ** 2 + y ** 2 <= 1))\n\n\ndef interval(avg, Zalpha, S, n):\n return(avg - Zalpha * S / sqrt(n), avg + Zalpha * S / sqrt(n))\n\n\ndef experiment(d, const, Iter):\n d, sample, var, n = (d / const) ** 2, [point()], 0, 1\n mean_old = sample[0]\n\n while var / n > d or n < Iter:\n sample.append(point())\n mean_new = mean_old + (sample[n] - mean_old) / (n + 1)\n var = (1 - 1 / n) * var + (n + 1) * ((mean_new - mean_old) ** 2)\n mean_old = mean_new\n n += 1\n return(mean_new, var, n)\n\n\nif __name__ == '__main__':\n \"\"\"\n Enunciado: Utilizando la función 'point' obtener un intervalo de ancho\n menor que 0.1, el cual contenga a p con el 95% de confianza. ¿Cuántas\n ejecuciones son necesarias?\n \"\"\"\n d, const, Iter = 0.1, (2 * 1.96), 100\n mean, var, n = experiment(d, const, Iter)\n print(\"Estimación de PI: {}\".format(4 * mean))\n print(\"Cantidad de iteraciones: {}\".format(n))\n const = 1.96\n a, b = interval(mean, const, var, n)\n print(\"Intervalo de confianza: ({}, {})\".format(4 * a, 4 * b))\n","sub_path":"ModelosySimulacion/modysim-master/practical6/exerc5.py","file_name":"exerc5.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"2526562","text":"\"\"\"\nwhile循环案例\n随机获取50-100内10个互不相等的10个数\n\"\"\"\nimport random\n\n\n# 方式1:通过判断和字典存储数据和打印信息来校验参数生成数据\ndef get_numbers(start: int, end: int, number: int):\n \"\"\"\n 获取一定范围内互不相等的一定数量的值\n :param start:范围的起始值\n :param end:范围的结束值\n :param number:获取的数量\n :return:返回集合\n \"\"\"\n # 定义一个集合\n # numbers = []\n # 定义一个字典存储结果\n results = {\"data\": [], 'info': \"\"}\n if end - start < number:\n results['info'] = \"提供数字范围过小,不符合要求\"\n # 开始循环,数量没控制用while\n return results\n else:\n while True:\n # 获取一个随机数,randint是右闭区间,randrange是右开区间获取\n temp = random.randint(start, end)\n # 判断集合里是都有这个值\n if temp not in results[\"data\"]:\n results[\"data\"].append(temp)\n # 判断数量是否达到要求\n if len(results[\"data\"]) == number:\n break\n return results\n\n\n# 方式1:通过异常机制判断参数\ndef get_numbers1(start: int, end: int, number: int):\n \"\"\"\n 获取一定范围内互不相等的一定数量的值\n :param start:范围的起始值\n :param end:范围的结束值\n :param number:获取的数量\n :return:返回集合\n \"\"\"\n # 定义一个集合\n numbers = []\n # 定义一个字典存储结果\n if end - start < number:\n raise Exception('提供数字范围过小,不符合要求')\n\n while True:\n # 获取一个随机数,randint是右闭区间,randrange是右开区间获取\n temp = random.randint(start, end)\n # 判断集合里是都有这个值\n if temp not in numbers:\n numbers.append(temp)\n # 判断数量是否达到要求\n if len(numbers) == number:\n break\n return numbers\n\n\nif __name__ == '__main__':\n \"\"\"\n # 方法1\n result = get_numbers(11, 100, 100)\n if len(result['info']) == 0:\n print(\"获取的集合为: \", result['data'])\n else:\n print(result['info']) \n \"\"\"\n try:\n list01 = get_numbers1(50, 100, 10)\n print(\"获取的集合为:\", list01)\n except Exception as e:\n print(str(e))\n\n\n\n\n\n","sub_path":"demo/exe/Demo3.py","file_name":"Demo3.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"311087145","text":"import pprint\nfrom datetime import datetime, timedelta\nimport ast\nimport time as timee\nimport re\nimport os\n\nimport routing\nimport xbmcgui\nimport xbmc\nimport xbmcvfs\nimport xbmcaddon\nfrom xbmcgui import ListItem\nfrom xbmcplugin import addDirectoryItem, endOfDirectory, setResolvedUrl, setContent\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\nfrom resources.lib import tamilyogi\nfrom resources.lib import tamilrasigan\nfrom resources.lib import tamilgun\nfrom resources.lib import tamildbox\nfrom resources.lib import thiraimix\nfrom resources.lib import tamilarasan\nfrom resources.lib import tamildhool\nfrom resources.lib import yupptv\nfrom resources.lib import tamilan\nfrom resources.lib import utils\nfrom resources.lib.utils import MovieViewTypeEnum\n\nplugin = routing.Plugin()\nytv = yupptv.Yupptv(plugin)\ntamilan = tamilan.Tamilan()\n\n# SITE VIEW - MAIN VIEW\n@plugin.route(\"/\")\ndef index():\n \"\"\"\n All site here\n :return:\n \"\"\"\n\n addDirectoryItem(\n plugin.handle,\n plugin.url_for(section_view, site_name=\"tamilyogi\"),\n ListItem(\"Tamil Yogi\"),\n True,\n )\n # addDirectoryItem(\n # plugin.handle,\n # plugin.url_for(section_view, site_name=\"yupptv\"),\n # ListItem(\"YuppTV\"),\n # True,\n # ),\n # addDirectoryItem(\n # plugin.handle,\n # plugin.url_for(section_view, site_name=\"tamilan\"),\n # ListItem(\"Tamilan.NET\"),\n # True,\n # )\n # addDirectoryItem(\n # plugin.handle,\n # plugin.url_for(section_view, site_name=\"tamilgun\"),\n # ListItem(\"Tamilgun\"),\n # True,\n # )\n # addDirectoryItem(\n # plugin.handle,\n # plugin.url_for(section_view, site_name=\"tamildbox\"),\n # ListItem(\"Tamildbox\"),\n # True,\n # )\n # # addDirectoryItem(plugin.handle, plugin.url_for(section_view, site_name=\"thiraimix\"), ListItem(\"ThiraiMix\"), True)\n # addDirectoryItem(\n # plugin.handle,\n # plugin.url_for(section_view, site_name=\"tamilarasan\"),\n # ListItem(\"Tamilarasan\"),\n # True,\n # )\n # addDirectoryItem(\n # plugin.handle,\n # plugin.url_for(section_view, site_name=\"tamildhool\"),\n # ListItem(\"TamilDhool\"),\n # True,\n # )\n\n # addDirectoryItem(plugin.handle, plugin.url_for(directplay), ListItem(\"Test\"), True)\n\n endOfDirectory(plugin.handle)\n\n\n# SECTION VIEW\n@plugin.route(\"/sections/\")\ndef section_view(site_name):\n \"\"\"\n all sections by sites. sections are getted from site api\n :param site_name:\n :return:\n \"\"\"\n\n setContent(plugin.handle, \"files\")\n\n if site_name == \"tamilyogi\":\n site_api = tamilyogi.TamilYogi(plugin)\n\n for section in site_api.get_sections():\n addDirectoryItem(\n plugin.handle,\n plugin.url_for(\n movies_view,\n site_name=site_name,\n section_url=utils.encode_url(section[\"url\"]),\n ),\n ListItem(section[\"name\"]),\n True,\n )\n\n elif site_name == \"tamilrasigan\":\n site_api = tamilrasigan.TamilRasigan(plugin)\n\n for section in site_api.get_sections():\n addDirectoryItem(\n plugin.handle,\n plugin.url_for(\n movies_view,\n site_name=site_name,\n section_url=utils.encode_url(section[\"url\"]),\n ),\n ListItem(section[\"name\"]),\n True,\n )\n\n elif site_name == \"thiraimix\":\n site_api = thiraimix.Thiraimix(plugin)\n for section in site_api.get_sections():\n addDirectoryItem(\n plugin.handle,\n plugin.url_for(\n movies_view,\n site_name=site_name,\n section_url=utils.encode_url(section[\"url\"]),\n ),\n ListItem(section[\"name\"]),\n True,\n )\n\n elif site_name == \"tamilgun\":\n site_api = tamilgun.Tamilgun(plugin)\n for section in site_api.get_sections():\n addDirectoryItem(\n plugin.handle,\n plugin.url_for(\n movies_view,\n site_name=site_name,\n section_url=utils.encode_url(section[\"url\"]),\n ),\n ListItem(section[\"name\"]),\n True,\n )\n\n elif site_name == \"tamildbox\":\n site_api = tamildbox.Tamildbox(plugin)\n for section in site_api.get_sections():\n addDirectoryItem(\n plugin.handle,\n plugin.url_for(\n movies_view,\n site_name=site_name,\n section_url=utils.encode_url(section[\"url\"]),\n ),\n ListItem(section[\"name\"]),\n True,\n )\n\n elif site_name == \"tamilarasan\":\n site_api = tamilarasan.Tamilarasan(plugin)\n for section in site_api.get_sections():\n addDirectoryItem(\n plugin.handle,\n plugin.url_for(\n movies_view,\n site_name=site_name,\n section_url=utils.encode_url(section[\"url\"]),\n ),\n ListItem(section[\"name\"]),\n True,\n )\n\n elif site_name == \"tamildhool\":\n site_api = tamildhool.TamilDhool(plugin)\n for section in site_api.get_sections():\n addDirectoryItem(\n plugin.handle,\n plugin.url_for(\n programmes_view,\n site_name=site_name,\n url=utils.encode_url(section[\"url\"]),\n ),\n ListItem(section[\"name\"]),\n True,\n )\n\n elif site_name == \"yupptv\" and ytv.hasCredentiels:\n for section in ytv.get_sections():\n if section[\"url\"] == \"reconnect\":\n addDirectoryItem(\n plugin.handle,\n plugin.url_for(\n action_handler, site_name=site_name, action_type=\"reconnect\",\n ),\n ListItem(section[\"name\"]),\n True,\n )\n else:\n addDirectoryItem(\n plugin.handle,\n plugin.url_for(\n channel_view,\n site_name=site_name,\n url=utils.encode_url(section[\"url\"]),\n ),\n ListItem(section[\"name\"]),\n True,\n )\n\n elif site_name == \"tamilan\":\n for section in tamilan.get_sections():\n addDirectoryItem(\n plugin.handle,\n plugin.url_for(\n movies_view,\n site_name=site_name,\n section_url=utils.encode_url(section[\"url\"]),\n ),\n ListItem(section[\"name\"]),\n True,\n )\n # elif site_name == \"yupptvlive\":\n # url = ytv.find_stream_url(\"https://www.yupptv.com/channels/sun-tv-hd/live\")\n # plugin.redirect(\"/playable/{}/{}\".format(\"playable\", utils.encode_url(url)))\n\n endOfDirectory(plugin.handle)\n\n\n@plugin.route(\"/action_handler//\")\ndef action_handler(site_name, action_type):\n print(\"#### ACTION HANDLER\")\n if site_name == \"yupptv\":\n if action_type == \"reconnect\":\n ytv.logout(ytv.box_id)\n ytv.login()\n\n\n@plugin.route(\"/channel//\")\ndef channel_view(site_name, url):\n url = utils.decode_url(url)\n\n if site_name == \"yupptv\":\n channels = ytv.get_channels(url)\n for channel in channels:\n listitem = ListItem(channel[\"name\"])\n listitem.setArt(channel[\"image\"])\n if url == \"live\":\n addDirectoryItem(\n plugin.handle,\n plugin.url_for(\n episode_view,\n site_name=site_name,\n url=utils.encode_url(channel[\"url\"]),\n ),\n listitem,\n True,\n )\n # plugin.redirect(\n # \"/playable/{}/{}\".format(channel[\"name\"], utils.encode_url(url))\n # )\n\n elif url == \"catchup\":\n addDirectoryItem(\n plugin.handle,\n plugin.url_for(\n day_view,\n site_name=site_name,\n url=utils.encode_url(channel[\"url\"]),\n ),\n listitem,\n True,\n )\n\n endOfDirectory(plugin.handle)\n\n\n@plugin.route(\"/day//\")\ndef day_view(site_name, url):\n url = utils.decode_url(url)\n\n if site_name == \"yupptv\":\n for idx in range(10):\n listitem = ListItem(\n (datetime.now() - timedelta(days=idx)).strftime(\"%a - %d %b\")\n )\n addDirectoryItem(\n plugin.handle,\n plugin.url_for(\n programmes_view,\n site_name=site_name,\n url=utils.encode_url(\"{}/{}\".format(url, idx + 1)),\n ),\n listitem,\n True,\n )\n\n endOfDirectory(plugin.handle)\n\n\n@plugin.route(\"/programmes//\")\ndef programmes_view(site_name, url):\n\n url = utils.decode_url(url)\n\n if site_name == \"thiraimix\":\n site_api = thiraimix.Thiraimix(plugin)\n programmes = site_api.get_programmes(url)\n\n for programme in programmes:\n listitem = ListItem(programme[\"name\"])\n listitem.setArt(programme[\"image\"])\n addDirectoryItem(\n plugin.handle,\n plugin.url_for(\n episode_view,\n site_name=site_name,\n url=utils.encode_url(programme[\"url\"]),\n ),\n listitem,\n True,\n )\n\n elif site_name == \"tamildhool\":\n site_api = tamildhool.TamilDhool(plugin)\n programmes = site_api.get_programmes(url)\n\n for programme in programmes:\n listitem = ListItem(programme[\"name\"])\n listitem.setArt(programme[\"image\"])\n addDirectoryItem(\n plugin.handle,\n plugin.url_for(\n episode_view,\n site_name=site_name,\n url=utils.encode_url(programme[\"url\"]),\n ),\n listitem,\n True,\n )\n\n elif site_name == \"yupptv\":\n programmes = ytv.get_programmes(url)\n for programme in programmes:\n listitem = ListItem(programme[\"name\"])\n listitem.setArt(programme[\"image\"])\n addDirectoryItem(\n plugin.handle,\n plugin.url_for(\n episode_view,\n site_name=site_name,\n url=utils.encode_url(programme[\"url\"]),\n ),\n listitem,\n True,\n )\n\n endOfDirectory(plugin.handle)\n\n\n@plugin.route(\"/episodes//\")\ndef episode_view(site_name, url):\n\n setContent(plugin.handle, \"videos\")\n url = utils.decode_url(url)\n\n if site_name == \"thiraimix\":\n site_api = thiraimix.Thiraimix(plugin)\n episodes = site_api.get_episodes(url)\n\n elif site_name == \"tamildhool\":\n site_api = tamildhool.TamilDhool(plugin)\n episodes = site_api.get_episodes(url)\n\n elif site_name == \"yupptv\":\n if \"live\" in url:\n url = ytv.find_stream_url(url)\n plugin.redirect(\n \"/playable/{}/{}\".format(\"Play Live\", utils.encode_url(url))\n )\n\n else:\n url = ytv.find_stream_url(url)\n plugin.redirect(\"/playable/{}/{}\".format(\"Play\", utils.encode_url(url)))\n\n return\n\n for episode in episodes:\n addDirectoryItem(\n plugin.handle,\n plugin.url_for(\n stream_list_view,\n site_name=site_name,\n movie_name=episode[\"prog_name\"],\n movie_url=utils.encode_url(episode[\"url\"]),\n ),\n ListItem(episode[\"name\"]),\n True,\n )\n\n endOfDirectory(plugin.handle)\n xbmc.executebuiltin(\"Container.SetViewMode(508)\")\n\n\n@plugin.route(\"/movies//\")\ndef movies_view(site_name, section_url):\n \"\"\"\n show all movies from url (section_url).\n :param site_name:\n :param section_url:\n :return:\n \"\"\"\n\n setContent(plugin.handle, \"videos\")\n\n # window = xbmcgui.Window(xbmcgui.getCurrentWindowId())\n # print(window.getProperty(\"visuel-view\"))\n # print(plugin.getSetting(\"visuel-view\"))\n # xbmc.executebuiltin(\"Container.SetViewMode(55)\")\n # window = xbmcgui.Window(xbmcgui.getCurrentWindowId())\n # print(dir(window))\n # print('### PROP')\n # print(window.getProperty())\n\n section_url = utils.decode_url(section_url)\n\n if site_name == \"tamilyogi\":\n site_api = tamilyogi.TamilYogi(plugin)\n\n elif site_name == \"tamilrasigan\":\n site_api = tamilrasigan.TamilRasigan(plugin)\n\n elif site_name == \"tamilgun\":\n site_api = tamilgun.Tamilgun(plugin)\n\n elif site_name == \"tamildbox\":\n site_api = tamildbox.Tamildbox(plugin)\n\n elif site_name == \"tamilarasan\":\n site_api = tamilarasan.Tamilarasan(plugin)\n\n elif site_name == \"tamildhool\":\n site_api = tamildhool.TamilDhool(plugin)\n\n elif site_name == \"tamilan\":\n site_api = tamilan\n\n movies = site_api.get_movies(section_url)\n\n if len(movies) == 0:\n xbmcgui.Dialog().notification(\"Error 404\", \"No movies found\")\n setContent(plugin.handle, \"files\")\n plugin.redirect(\"/sections/{}\".format(site_name))\n\n for movie in movies:\n listitem = ListItem(utils.color_movie_name(movie[\"name\"]))\n listitem.setArt({\"thumb\": movie[\"image\"], \"poster\": movie[\"image\"]})\n listitem.setInfo(\"video\", movie[\"infos\"])\n addDirectoryItem(\n plugin.handle,\n plugin.url_for(\n stream_list_view,\n site_name=site_name,\n movie_name=movie[\"name\"],\n movie_url=utils.encode_url(movie[\"url\"]),\n ),\n listitem,\n True,\n )\n\n # print(\"###### MODE\")\n # print(xbmc.executebuiltin(\"Container.Viewmode\"))\n endOfDirectory(plugin.handle)\n xbmc.executebuiltin(\"Container.SetViewMode(508)\")\n\n\n@plugin.route(\"/playable//\")\ndef playable(name, url):\n listitem = ListItem(name)\n listitem.setProperty(\"IsPlayable\", \"true\")\n addDirectoryItem(\n plugin.handle, utils.decode_url(url), listitem, False,\n )\n endOfDirectory(plugin.handle)\n\n\n@plugin.route(\"/directplay\")\ndef directplay():\n listitem = ListItem(\"test\")\n listitem.setProperty(\"IsPlayable\", \"true\")\n # url = ytv.find_stream_url(\"https://www.yupptv.com/channels/sun-tv-hd/live\")\n\n # addDirectoryItem(\n # plugin.handle,\n # 'https://www1411.hlsmp4.com/token=RYov85oqYWpHg890KDQYiQ/1559514826/0.0.0.0/47/6/bb/cb0147d55dfd8f9e3b9b793083480bb6-720p.mp4',\n # listitem,\n # False\n # )\n\n # url = \"{}|User-Agent='Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'\".format(\n # url\n # )\n\n addDirectoryItem(\n plugin.handle, url, listitem, False,\n )\n endOfDirectory(plugin.handle)\n\n # url = \"https://yuppmsluk.akamaized.net/hls/live/2007561/jayatv/jayatv/jayatv_200/chunklist.m3u8?hdntl=exp=1590055482~acl=!*/jayatv/jayatv/*!/payload/yuppTVCom_5_5580371_79a18526b9174175_FR_82.65.54.29/*~data=hdntl~hmac=e73ae3b1d9497c206e2ceefe26eaee2dfd2bb83026d4f97de0d6a6e08e1d35e5\"\n # item = {\n # \"label\": \"Playable\",\n # \"path\": url,\n # \"is_playable\": True,\n # }\n # return plugin.set_resolved_url(item)\n\n\n# STREAM LIST VIEW\n@plugin.route(\"/stream_list///\")\ndef stream_list_view(site_name, movie_name, movie_url):\n \"\"\"\n show all stream for the movie with quality\n :param site_name:\n :param movie_name:\n :param movie_url:\n :return:\n \"\"\"\n\n movie_url = utils.decode_url(movie_url)\n\n # If hit Next page\n if movie_name == \"Next Page\":\n plugin.redirect(\"/movies/{}/{}\".format(site_name, utils.encode_url(movie_url)))\n\n else:\n if site_name == \"tamilyogi\":\n site_api = tamilyogi.TamilYogi(plugin)\n\n elif site_name == \"tamilrasigan\":\n site_api = tamilrasigan.TamilRasigan(plugin)\n\n elif site_name == \"tamilgun\":\n site_api = tamilgun.Tamilgun(plugin)\n\n elif site_name == \"thiraimix\":\n site_api = thiraimix.Thiraimix(plugin)\n\n elif site_name == \"tamildbox\":\n site_api = tamildbox.Tamildbox(plugin)\n\n elif site_name == \"tamilarasan\":\n site_api = tamilarasan.Tamilarasan(plugin)\n\n elif site_name == \"tamildhool\":\n site_api = tamildhool.TamilDhool(plugin)\n\n elif site_name == \"yupptv\":\n site_api = ytv\n\n elif site_name == \"tamilan\":\n site_api = tamilan\n\n stream_urls = site_api.get_stream_urls(movie_name, movie_url)\n\n if len(stream_urls) == 0:\n xbmcgui.Dialog().notification(\n heading=\"Error 404\", message=\"Video is no longer available\"\n )\n\n else:\n for stream_url in stream_urls:\n listitem = ListItem(stream_url[\"name\"] + \" | \" + stream_url[\"quality\"])\n listitem.setInfo(type=\"video\", infoLabels={\"Title\": stream_url[\"name\"]})\n listitem.setArt({\"icon\": stream_url[\"quality_icon\"]})\n listitem.setProperty(\"IsPlayable\", \"true\")\n addDirectoryItem(plugin.handle, stream_url[\"url\"], listitem, False)\n\n endOfDirectory(plugin.handle)\n\n\nif __name__ == \"__main__\":\n plugin.run()\n","sub_path":"plugin.video.tamilstreamer/addon.py","file_name":"addon.py","file_ext":"py","file_size_in_byte":18258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"609645686","text":"'''\nIl tris e' un popolarissimo gioco. Si gioca su una griglia quadrata di 3x3 caselle.\nA turno, i due giocatori scelgono una cella vuota e vi disegnano il proprio simbolo \n(un giocatore ha come simbolo una \"o\" e l'avversario una 'x'). \nVince il giocatore che riesce a disporre tre dei propri simboli in linea retta \norizzontale, verticale o diagonale. Se la griglia viene riempita \nsenza che nessuno dei giocatori sia riuscito a completare una linea \nretta di tre simboli, il gioco finisce in parita'. Nel caso in cui il gioco \nfinisse in parita', la partita e' detta \"patta\". \nPer convenzione a griglia vuota la prima mossa spetta sempre al giocatore 'o'\n\nUna configurazione del gioco e' dunque univocamente determinata dal contenuto della griglia.\n\nNel seguito assumiamo che il contenuto della griglia sia rappresentato tramite lista di liste.\nLa dimensione della lista di liste M e' 3x3 ed M[i][j] contiene '', 'x', o 'o' a seconda \nche la cella della griglia appartenente all'iesima riga e j-ma colonna sia ancora libera, \ncontenga il simbolo 'x' o contenga il simbolo 'o'. \n\nData una configurazione C del gioco, l'albero di gioco per C e' l'albero che \nsi ottiene ricorsivamente partendo dalla configurazione C e assegnando come figli le configurazioni \nche e' possibile ottenere da C con una mossa ulteriore del gioco. Ovviamente risulteranno \nfoglie dell'albero i possibili esiti della partita vale a dire le diverse configurazioni cui e' \npossibile arrivare partendo da C e che rappresentano patte, vittorie per 'o' o vittorie per 'x'.\nSe veda ad esempio l'immagine albero_di_gioco.png che mostra l' albero di gioco che si ottiene a partire \ndalla configurazione rappresentata da [['x', 'o', 'o'], ['x', 'x', 'o'], ['', '', '']]\n \n\nSi consideri la seguente Classe di oggetti:\n\n\nclass NodoTris:\n def __init__(self, griglia):\n self.nome = griglia\n self.lista_figli = [] \n\n\nBisogna progettare le seguente funzione \n\ngen_tree(griglia)\nche, data la configurazione di gioco griglia, costruisce l'albero di gioco che si ottiene a partire \ndalla configurazione griglia e ne restituisce la radice. I nodi dell'albero devono essere \noggetti della classe NodoTris.\n\nPer testare la correttezza della vostra implementazione di gen_tree() il grade utilizzera' quattro metodi \ndella classe NodoTris che dovete comunque implementare: \n\n1)\ntipo(self)\nche, dato un nodo NodoTris, restituisce:\n 'o' se la configurazione rappresentata dal nodo e' una configurazione di vittoria per il giocatore 'o'\n 'x' se la configurazione rappresentata dal nodo e' una configurazione di vittoria per il giocatore 'x'\n '-' se la configurazione rappresentata dal nodo e' una configurazione di patta\n '?' se la configurazione rappresentata dal nodo e' una configurazione di gioco non ancora terminato\n\n2)\nesiti(self)\nche, dato un nodo radice di un albero di gioco, restituisce una tripla con i possibili \nesiti della partita che ha come configurazione iniziale quella rappresentata dal nodo. \nPiu' precisamente: il primo elemento della tripla e' il numero di patte possibili, \nil secondo e' il numero di possibili vittorie per il giocatore 'o' mentre il terzo elemento \ne' il numero di possibili vittorie per il giocatore 'x'.\n\n3)\nvittorie_livello(self, giocatore, h)\nche, dato un nodo radice di un albero di gioco, uno dei due giocatori ed un intero h,\nrestituisce il numero di nodi che rappresentano una vittoria per il giocatore e si \ntrovano ad altezza h nell'albero. In altri termini restituisce il numero di vittorie possibili \nper giocatore in esattamente h mosse, nella partita che ha come configurazione iniziale \nquella rappresentata dalla radice dell'albero.\n\n4)\nstrategia_vincente(self,giocatore)\nche, dato un nodo radice di un albero di gioco ed uno dei due giocatori, restituisce True o False. \nRestituisce True se giocatore ha una strategia vincente nella partita \nche ha come configurazione iniziale quella rappresentata dal nodo radice, False altrimenti.\n\nNota che un giocatore ha una strategia vincente rispetto ad una certa configurazione se, \nqualunque siano le mosse dell'avversario ha sempre la possibilita' di rispondere in modo \nche la partita termini con la sua vittoria.\n\nPotete ovviamente definire ulteriori funzioni e altri metodi per la Classe NodiTris \nse li ritenete utili al fine della risoluzione del compito.\n\nPotete assumere che le configurazioni di gioco rappresentate da griglia siano sempre configurazioni \nlecite (vale a dire ottenute dopo un certo numero di mosse a parire dalla griglia vuota).\n\n\nAVVERTENZE: non usare caratteri non ASCII, come le lettere accentate; non\nimportare moduli che non sono nella libreria standard.\n\nATTENZIONE: i test vengono eseguiti con un timeout globale di 2*N secondi (se il grader esegue N test).\n'''\nimport time\nclass NodoTris:\n def __init__(self, griglia, turno=''):\n self.nome = griglia\n self.lista_figli = [] #lista dei nodi figli\n self.vincitore = self.tipo()\n \n if turno:\n self.turno = turno\n else:\n self.turno = self.mossa()\n \n def tipo(self):\n vuote = len(self.celle_vuote())\n if vuote <= 4: #Se ci sono piu' di 4 caselle vuote, la partita sicuramente non e' ancora finita\n for y in range(3):\n if self.nome[y][0] != '' and self.nome[y][0] == self.nome[y][1] and self.nome[y][0] == self.nome[y][2]:\n return self.nome[y][0]\n \n for x in range(3):\n if self.nome[0][x] != '' and self.nome[0][x] == self.nome[1][x] and self.nome[0][x] == self.nome[2][x]:\n return self.nome[0][x]\n \n if self.nome[0][0] != '' and self.nome[0][0] == self.nome[1][1] and self.nome[0][0] == self.nome[2][2]:\n return self.nome[0][0]\n \n if self.nome[2][0] != '' and self.nome[2][0] == self.nome[1][1] and self.nome[2][0] == self.nome[0][2]:\n return self.nome[2][0]\n \n if vuote == 0:\n return '-'\n else:\n return '?'\n else:\n return '?'\n \n def esiti(self, patte=0,vinceO=0,vinceX=0):\n '''inserire qui il vostro codice'''\n if self.vincitore == '-':\n return (patte+1,vinceO,vinceX)\n elif self.vincitore == 'o':\n return (patte,vinceO+1,vinceX)\n elif self.vincitore == 'x':\n return (patte,vinceO,vinceX+1)\n \n if self.vincitore == '?':\n for figlio in self.lista_figli:\n patte, vinceO, vinceX = figlio.esiti(patte,vinceO,vinceX)\n \n return (patte,vinceO,vinceX)\n \n def vittorie_livello(self, giocatore, h, corrente=0, conta=0):\n '''inserire qui il vostro codice'''\n if corrente <= h: \n if self.vincitore == giocatore and corrente == h: #Se vince il giocatore dopo h mosse\n return conta+1\n \n for figlio in self.lista_figli:\n conta = figlio.vittorie_livello(giocatore,h,corrente+1, conta)\n \n return conta\n \n\n def strategia_vincente(self, giocatore):\n\n if giocatore == 'o': avversario = 'x'\n else: avversario = 'o'\n\n #caso base\n if self.vincitore == giocatore:\n return True\n elif self.vincitore == avversario or self.vincitore == '-':\n return False\n\n if self.turno == giocatore: #Se tocca al giocatore,\n for figlio in self.lista_figli:\n if figlio.strategia_vincente(giocatore) == True: #Basta che uno dei suoi figlio abbia una strategia vincente e puo' vincere\n return True\n return False\n else: #Se tocca all'avversario \n for figlio in self.lista_figli:\n if figlio.strategia_vincente(giocatore) == False: #Se solo un figlio non puo' vincere, non ha una strategia vincente\n return False\n return True\n \n \n \n \n \n \n #------ Metodi aggiuntivi -------\n \n def mossa(self):\n '''Ritorna a chi tocca il turno ('x' o 'o')'''\n count = 0\n for riga in self.nome:\n for cella in riga:\n\n if cella != '':\n count += 1\n \n if count % 2 == 0:\n return 'o'\n else: \n return 'x'\n \n def celle_vuote(self):\n '''Ritorna una lista con gli indici delle celle vuote di una griglia'''\n ris = []\n for y in range(len(self.nome)):\n for x in range(len(self.nome[0])):\n if self.nome[y][x] == '':\n ris.append((x,y))\n return ris\n \ndef copiagriglia(lista):\n x0 = lista[0][:]\n x1 = lista[1][:]\n x2 = lista[2][:]\n return [x0]+[x1]+[x2]\n\ndef gen_tree(griglia, simbolo=''):\n '''inserire qui il vostro codice'''\n radice = NodoTris(griglia, simbolo)\n if radice.turno == 'x':\n simbolo = 'o'\n elif radice.turno == 'o':\n simbolo = 'x'\n\n if radice.vincitore == '?':\n for x,y in radice.celle_vuote():\n copia = copiagriglia(griglia)\n copia[y][x] = radice.turno\n radice.lista_figli.append(gen_tree(copia,simbolo))\n return radice\n \n \n","sub_path":"students/1815023/homework04/program02.py","file_name":"program02.py","file_ext":"py","file_size_in_byte":9386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"227400675","text":"# RGB to Hex Conversion\n\n\ndef rgb(r, g, b):\n \"\"\"\n purpose:\n convert a rgb value to its corresponding hex value\n Pre:\n :param r: an integer, if r<0, assume 0, if r>255, assume 255\n :param g: an integer, if g<0, assume 0, if g>255, assume 255\n :param b: an integer, if b<0, assume 0, if b>255, assume 255\n\n post:\n None\n :return:\n The Hex value for the rbg as a string\n \"\"\"\n def rounder(c):\n if c < 0:\n c = 0\n return c\n elif c >= 255:\n c = 255\n return c\n else:\n return c\n\n def rgb_helper(c):\n color = list()\n if c == 0:\n color.append(0)\n color.append(0)\n return color\n elif c < 9:\n color.append(c)\n color.append(0)\n return color\n else:\n while c > 0:\n color.append(c % 16)\n c = c//16\n return color\n\n def converter(color):\n # where color is a list\n for i in range(len(color)):\n if color[i] == 15:\n color[i] = \"F\"\n elif color[i] == 14:\n color[i] = \"E\"\n elif color[i] == 13:\n color[i] = \"D\"\n elif color[i] == 12:\n color[i] = \"C\"\n elif color[i] == 11:\n color[i] = \"B\"\n elif color[i] == 10:\n color[i] = \"A\"\n else:\n color[i] = str(color[i])\n return color\n # reverse of the list to get proper HEX value\n\n r = rounder(r)\n g = rounder(g)\n b = rounder(b)\n\n red = rgb_helper(r)\n green = rgb_helper(g)\n blue = rgb_helper(b)\n\n r = converter(red)[::-1]\n g = converter(green)[::-1]\n b = converter(blue)[::-1]\n\n rgb = r+g+b\n\n return \"\".join(rgb)\n\n\n","sub_path":"RBGtoHEX.py","file_name":"RBGtoHEX.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"62049154","text":"class Solution:\n def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:\n if len(nums1) > len(nums2):\n nums1, nums2 = nums2, nums1\n if len(nums1) == 0:\n if len(nums2) == 0:\n return 0\n elif len(nums2) % 2 > 0:\n return nums2[len(nums2) // 2]\n else:\n return (nums2[len(nums2) // 2] + nums2[len(nums2) // 2 - 1]) / 2\n start = 1\n end = len(nums1)\n total = len(nums1) + len(nums2) \n mid = (total) // 2\n while True:\n indexA = (start + end) // 2\n indexB = mid - indexA\n if indexA == 0:\n ALeft = -float(\"inf\")\n ARight = nums1[0]\n if mid == len(nums2):\n BLeft = nums2[-1]\n BRight = float(\"inf\")\n else:\n BLeft = nums2[mid-1]\n BRight = nums2[mid]\n elif indexA == len(nums1):\n ARight = float(\"inf\")\n ALeft = nums1[-1]\n if mid == len(nums1):\n BLeft = -float(\"inf\")\n BRight = nums2[0]\n else:\n BLeft = nums2[mid - len(nums1) -1]\n BRight = nums2[mid - len(nums1)]\n else:\n ALeft = nums1[indexA - 1]\n ARight = nums1[indexA]\n BLeft = nums2[indexB - 1]\n BRight = nums2[indexB]\n # print(ALeft, ARight, BLeft, BRight)\n if ALeft > BRight:\n end = indexA-1\n elif BLeft > ARight:\n start = indexA + 1\n end = len(nums1)\n else:\n break\n if total % 2 > 0:\n return min(ARight, BRight)\n else:\n return (max(ALeft, BLeft) + min(ARight, BRight)) / 2\n","sub_path":"accepted_codes/Median_of_Two_Sorted_Arrays/Median of Two Sorted Arrays_278520644.py","file_name":"Median of Two Sorted Arrays_278520644.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"493550048","text":"import INPUTS as c\nimport pandas as pd\n\n\ndf = pd.read_csv(c.OUTPUT_DF_PATH, sep=\" \")\n\n# get columns names\ncol = df.columns\nprint(df.columns)\n\n# switch Y with Z in columns names\ncol2 = ['name', 'label', 'ID_tree', 'sec1', 'Tmin_SEEK', 'Tmean_SEEK',\n 'Tmax_SEEK', 'sec2', 'Tmin_SEEK_corr_avg', 'Tmean_SEEK_corr_avg',\n 'Tmax_SEEK_corr_avg', 'Tmin_SEEK_corr_max', 'Tmean_SEEK_corr_max',\n 'Tmax_SEEK_corr_max', 'sec3', 'X_mm', 'Z_mm', 'Z_cam_mm', 'Y_mm',\n 'X_trunk_mm', 'Z_trunk_mm', 'Z_trunk_cam_mm', 'Y_trunk_mm', 'sec4',\n 'X_relativo', 'Z_relativo', 'Y_relativo', 'X_trunk_rel', 'Z_trunk_rel',\n 'Y_trunk_rel', 'sec5', 'X_rel_range_mm', 'Z_rel_range_mm',\n 'Y_rel_range_mm', 'X_trunk_rel_mm', 'Z_trunk_rel_mm', 'Y_trunk_rel_mm',\n 'sec6', 'Tree_wall', 'Card_dir', 'Estim_Fruit_diam_mm']\n\n# positioning Y coords after X and before Z\ncol3 = ['name', 'label', 'ID_tree', 'sec1', 'Tmin_SEEK', 'Tmean_SEEK',\n 'Tmax_SEEK', 'sec2', 'Tmin_SEEK_corr_avg', 'Tmean_SEEK_corr_avg',\n 'Tmax_SEEK_corr_avg', 'Tmin_SEEK_corr_max', 'Tmean_SEEK_corr_max',\n 'Tmax_SEEK_corr_max', 'sec3', 'X_mm', 'Y_mm', 'Z_mm', 'Z_cam_mm',\n 'X_trunk_mm', 'Y_trunk_mm', 'Z_trunk_mm', 'Z_trunk_cam_mm', 'sec4',\n 'X_relativo', 'Y_relativo', 'Z_relativo', 'X_trunk_rel', 'Y_trunk_rel', 'Z_trunk_rel',\n 'sec5', 'X_rel_range_mm', 'Y_rel_range_mm', 'Z_rel_range_mm',\n 'X_trunk_rel_mm', 'Y_trunk_rel_mm', 'Z_trunk_rel_mm',\n 'sec6', 'Tree_wall', 'Card_dir', 'Estim_Fruit_diam_mm']\n\n# intermediate df: switch Y with Z in columns names\ndf2 = pd.DataFrame(columns= col2)\n\ncount = 0\nfor item in df2.columns:\n df2[item] = df[col[count]]\n count +=1\n\ndf3 = df2[col3]\n\ndf3.to_csv(c.DELIVERABLE_OUTPUT_DF_PATH, sep=\" \")\n","sub_path":"SHEET_RGBDT_system/reordering_output_CSV_for_DELIVERABLE.py","file_name":"reordering_output_CSV_for_DELIVERABLE.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"16698942","text":"import smtplib\nimport os\nimport praw\nimport subprocess\n\nfrom email.MIMEMultipart import MIMEMultipart\nfrom email.MIMEText import MIMEText\n\n\ndef email ():\n fromemail = \"\"\n toemail = \"\"\n \n message = MIMEMultipart()\n message['From'] = fromemail\n message['To'] = toemail\n \n date = subprocess.Popen(\"date +'%A %d %B'\", stdout=subprocess.PIPE, shell=True)\n date = date.communicate()[0]\n \n message['Subject'] = \"Top Reddit Posts for %s!\" % date\n \n with open(\"todaysredditnews.txt\", \"r\") as body_file:\n body = body_file.read()\n \n message.attach(MIMEText(body, 'plain'))\n \n mail_server = smtplib.SMTP('smtp.gmail.com', 587)\n mail_server.starttls()\n mail_server.login(fromemail, \"\")\n text = message.as_string()\n \n mail_server.sendmail(fromemail, toemail, text)\n mail_server.quit()\n \n return\n \n \ndef get_posts (reddit_bot, subreddits):\n \n posts = []\n \n for sub in subreddits:\n subreddit = reddit_bot.get_subreddit(sub)\n posts.append([\"Top 5 posts for /r/\" + sub])\n posts.append([submission.title + \" \" + str(submission.score) + \" \" + submission.url for submission in subreddit.get_top(limit = 5)])\n \n with open(\"todaysredditnews.txt\", \"r+\") as trnfile:\n for subreddit in posts:\n for section in subreddit:\n trnfile.write(section + \"\\n\")\n \n return\n\n\ndef main ():\n reddit_bot = praw.Reddit(user_agent = \"todaysredditnews 1.0 by /u/_afk4790\")\n subreddits = [\"all\", \"movies\", \"pcmasterrace\", \"linuxmasterrace\"]\n \n get_posts(reddit_bot, subreddits)\n email()\n \n return\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"todaysredditnews.py","file_name":"todaysredditnews.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"317036137","text":"__author__ = 'sid'\n\nimport re\n\nimport common\nfrom common import *\nimport datetime\nfrom datetime import datetime\n\ndef getAppCharts(country, type):\n chartUrl = getChartUrl(country, type)\n chartFile = getChartFile(country, type)\n prefix = getApptUrlPrefix(country)\n section = getChartPageSection(country)\n\n date = datetime.now().strftime('%Y-%m-%d')\n f = open(DATA_DIR + \"/\" + chartFile + \"_\" + date, 'wb')\n mainPage = common.getPageAsSoup(chartUrl)\n total = 0\n appGrid = mainPage.find('section', {'class': section})\n i = 0\n for aDiv in appGrid.findAll('a', href=re.compile('^' + prefix)):\n if i == 0:\n i += 1\n elif i == 1:\n i += 1\n continue\n elif i == 2:\n i = 0\n continue\n\n # print(aDiv)\n appUrl = aDiv.get('href')\n img = aDiv.find('img')\n title = img.get('alt')\n iconUrl = img.get('src')\n print(title, '\\n', appUrl, '\\n', iconUrl)\n str_out = title + '\\n' + iconUrl + '\\n' + appUrl + '\\n'\n bytes_out = str_out.encode('utf-8')\n f.write(bytes_out)\n\n f.close()\n\ndef dumpChartsGlobal():\n getAppCharts('us', 'free')\n getAppCharts('us', 'paid')\n\ndef dumpChartsCN():\n getAppCharts('cn', 'free')\n getAppCharts('cn', 'paid')\n\nif __name__ == \"__main__\":\n dumpChartsCN();\n dumpChartsGlobal()\n #putToDb()\n\n\n","sub_path":"crawler_chart.py","file_name":"crawler_chart.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"35360188","text":"\"\"\"\nThis module contains helper functions that provide information about how\nQCoDeS is installed and about what other packages are installed along with\nQCoDeS\n\"\"\"\nimport sys\nfrom typing import Dict, List, Optional\nimport subprocess\nimport json\nimport logging\nimport requirements\n\nif sys.version_info >= (3, 8):\n from importlib.metadata import distribution, version, PackageNotFoundError\nelse:\n # 3.7 and earlier\n from importlib_metadata import distribution, version, PackageNotFoundError\nimport qcodes\n\n\nlog = logging.getLogger(__name__)\n\n\ndef is_qcodes_installed_editably() -> Optional[bool]:\n \"\"\"\n Try to ask pip whether QCoDeS is installed in editable mode and return\n the answer a boolean. Returns None if pip somehow did not respond as\n expected.\n \"\"\"\n\n answer: Optional[bool]\n\n try:\n pipproc = subprocess.run(['python', '-m', 'pip', 'list', '-e', '--no-index',\n '--format=json'],\n check=True,\n stdout=subprocess.PIPE)\n e_pkgs = json.loads(pipproc.stdout.decode('utf-8'))\n answer = any([d[\"name\"] == 'qcodes' for d in e_pkgs])\n except Exception as e: # we actually do want a catch-all here\n log.warning(f'{type(e)}: {str(e)}')\n answer = None\n\n return answer\n\n\ndef get_qcodes_version() -> str:\n \"\"\"\n Get the version of the currently installed QCoDeS\n \"\"\"\n return qcodes.version.__version__\n\n\ndef get_qcodes_requirements() -> List[str]:\n \"\"\"\n Return a list of the names of the packages that QCoDeS requires\n \"\"\"\n qc_pkg = distribution('qcodes').requires\n if qc_pkg is None:\n return []\n package_names = [list(requirements.parse(req))[0].name for req in qc_pkg]\n\n return package_names\n\n\ndef get_qcodes_requirements_versions() -> Dict[str, str]:\n \"\"\"\n Return a dictionary of the currently installed versions of the packages\n that QCoDeS requires. The dict maps package name to version string.\n If an (optional) dependency is not installed the name maps to \"Not installed\".\n \"\"\"\n\n req_names = get_qcodes_requirements()\n\n req_versions = {}\n\n for req in req_names:\n try:\n req_versions[req] = version(req)\n except PackageNotFoundError:\n req_versions[req] = \"Not installed\"\n\n return req_versions\n","sub_path":"qcodes/utils/installation_info.py","file_name":"installation_info.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"52304237","text":"import ast\nfrom ast import *\nimport json\nimport sys\nimport collections\nfrom PyCS.utils.InfoClass import InfoClass\nfrom PyCS.utils.find_show_code import find_show_code\n\n\ndef par(node):\n # PAR: number of parameters\n return len(node.args.args)\n\n\ndef isLPL(node):\n with open('PyCS/static/json/configure.json') as conf_file:\n conf = json.load(conf_file)\n THRESHOLD_PAR = conf[\"LPL\"][\"PAR\"]\n return par(node) >= THRESHOLD_PAR\n\n\ndef detect(file, content, codes):\n # PAR >= 5\n root = ast.parse(content)\n for n in ast.walk(root):\n if isinstance(n, FunctionDef) and isLPL(n):\n yield result_report(n, file, codes)\n\n\ndef get_function_decl(node):\n args_name = [x.arg for x in node.args.args]\n return node.name + '(' + ','.join(args_name) + ')'\n\n\ndef result_report(node, file, codes):\n return InfoClass(\n 'Long Parameter List',\n node.lineno,\n node.col_offset + 1,\n file,\n 'Method: {:s}'.format(get_function_decl(node)),\n 'A method or a function that has a long parameter list.'\n ' Try to divide it or reconstruct the method or function.',\n find_show_code(codes, node.lineno)\n )\n\nif __name__ == '__main__':\n detect('./temp.py')\n","sub_path":"PyCS/utils/LPLDetect.py","file_name":"LPLDetect.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"423569720","text":"from tkinter import *\nimport calendar\n\nclass searchTrain():\n def __init__(self, rootwin):\n self.searchTrain = rootwin\n self.titleFrame = None\n self.titleLabel = None\n self.search_trainFrame = None\n self.depart_fromLabel = None\n self.departs_fromOptionMenu = None\n self.arrives_atLabel = None\n self.arrives_atOptionMenu = None\n self.departure_date_label = None\n self.departure_date_entry = None\n\n self.calendarWindow = Toplevel()\n self.calendarWindow.title('Calendar')\n self.calendarWindow.withdraw()\n self.selectDepartureWindow = Toplevel()\n self.selectDepartureWindow.title('Select Departure')\n self.selectDepartureWindow.withdraw()\n\n self.make_search_train()\n\n def make_search_train(self):\n self.searchTrain.title(\"Search Train\")\n\n # title label frame\n self.titleFrame = Frame(rootwin)\n self.titleFrame.pack(side=TOP)\n\n # Search Train title label\n self.titleLabel = Label(self.titleFrame, text=\"Search Train\", font=(\"\", 20))\n self.titleLabel.grid(row=0, column=0, sticky=N+E+W, padx=50, pady=10)\n\n # Search Train frame\n self.search_trainFrame = Frame(rootwin)\n self.search_trainFrame.pack(side=TOP)\n\n # Departs From label\n depart_fromLabel = Label(self.search_trainFrame, text=\"Departs From\")\n depart_fromLabel.grid(row=0, column=0, sticky=W, padx=30, pady=10)\n\n # Departs From option menu\n self.departs_fromList = (\"StationA\", \"StationB\", \"StationC\", \"StationD\", \"StationE\", \"StationF\")\n self.departsFrom = StringVar()\n self.departs_fromOptionMenu = OptionMenu(self.search_trainFrame, self.departsFrom, *self.departs_fromList)\n self.departs_fromOptionMenu.config(bg = \"white\", bd = 2, width=30)\n self.departs_fromOptionMenu.grid(row=0, column=1, sticky=E, padx=30, pady=10)\n\n # Arrives At label\n self.arrives_atLabel = Label(self.search_trainFrame, text=\"Arrives At\")\n self.arrives_atLabel.grid(row=1, column=0, sticky=W, padx=30, pady=10)\n\n # Arrives At option menu\n self.arrives_atList = (\"StationA\", \"StationB\", \"StationC\", \"StationD\", \"StationE\", \"StationF\")\n self.arrivesAt = StringVar()\n self.arrives_atOptionMenu = OptionMenu(self.search_trainFrame, self.arrivesAt, *self.arrives_atList)\n self.arrives_atOptionMenu.config(bg = \"white\", bd = 2, width=30)\n self.arrives_atOptionMenu.grid(row=1, column=1, sticky=E, padx=30, pady=10)\n\n # Departure Date frame\n self.departure_date_frame = Frame(rootwin)\n self.departure_date_frame.pack(side=TOP)\n\n # Departure Date label\n self.departure_date_label = Label(self.departure_date_frame, text=\"Departure Date (M/D/Y)\", font=(\"\",14))\n self.departure_date_label.grid(row=0, column=0, padx=0, pady=30)\n\n # Month\n self.monthList = (1, 2, 3, 4, 5)\n self.month = IntVar()\n self.month_OptionMenu = OptionMenu(self.departure_date_frame, self.month, *self.monthList)\n self.month_OptionMenu.config(bg = \"white\", bd = 2, width=5)\n self.month_OptionMenu.grid(row=0, column=1, sticky=E, padx=2, pady=10)\n\n # Day\n self.dayList = (\"01\", \"02\", \"03\", \"04\", \"05\", \"06\", \"07\", \"08\", \"09\", \"10\", \"11\", \"12\",\n \"13\", \"14\", \"15\", \"16\", \"17\", \"18\", \"19\", \"20\", \"21\", \"22\", \"23\",\n \"24\", \"25\", \"26\", \"27\", \"28\", \"29\", \"30\", \"31\")\n self.day = StringVar()\n self.day_OptionMenu = OptionMenu(self.departure_date_frame, self.day, *self.dayList)\n self.day_OptionMenu.config(bg = \"white\", bd = 2, width=5)\n self.day_OptionMenu.grid(row=0, column=2, sticky=E, padx=2, pady=10)\n\n # Year\n self.year = StringVar()\n self.year_OptionMenu = OptionMenu(self.departure_date_frame, self.year, \"2016\")\n self.year_OptionMenu.config(bg = \"white\", bd = 2, width=10)\n self.year_OptionMenu.grid(row=0, column=3, sticky=E, padx=2, pady=10)\n\n # back button\n self.backButton = Button(self.departure_date_frame, text=\"Back\")\n self.backButton.grid(row=1, column=0, sticky=W, padx=5, pady=5)\n\n # find trains button\n self.find_trainsButton = Button(self.departure_date_frame, text=\"Find Trains\", command=self.show_trains)\n self.find_trainsButton.grid(row=1, column=3, sticky=E, padx=5, pady=5)\n\n def show_trains(self):\n print(self.month.get())\n # self.searchTrain.withdraw()\n # self.showTrainWin = Toplevel()\n #\n # # title\n # self.showTrainTitle = Label(self.showTrainWin, text=\"Select Departure\", font=(\"\", 20))\n # self.showTrainTitle.grid(row=0, column=0, sticky=N + E + W, padx=50, pady=10)\n #\n # # Frame\n # self.showTrainFrame = Frame(self.showTrainWin)\n # self.showTrainFrame.grid(row=0,column=0)\n #\n # # Button frame\n # self.buttons = Frame(self.showTrainWin)\n # self.buttons.grid(row=1, column=0)\n #\n # # back button\n # self.backToSearch = Button(self.buttons, text=\"Back\")\n # self.backToSearch.grid(row=1, column=1, sticky=E, padx=5, pady=5)\n #\n # # next button\n # self.toPassInfo = Button(self.buttons, text=\"Next\")\n # self.toPassInfo.grid(row=1, column=3, sticky=E, padx=5, pady=5)\n\n\n\n\n\nrootwin = Tk()\napp = searchTrain(rootwin)\nrootwin.mainloop()","sub_path":"Done/search_train.py","file_name":"search_train.py","file_ext":"py","file_size_in_byte":5486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"631730446","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nimport pymysql\n\nclass CrawdoubanPipeline(object):\n def __init__(self):\n self.client = pymysql.connect(\n host='127.0.0.1',\n port=3306,\n user='root',\n passwd='123456',\n db='demo',\n charset='utf8'\n )\n self.count = 0\n self.cur = self.client.cursor()\n print(\"-------------------测试\")\n def open_spider(self,spider):\n print(\"open-------------\")\n def process_item(self, item, spider):\n print(\"测试\"+'111111111111111111111111111')\n n = select_item(self,item['title'])\n if n==():\n insert_item(self,item)\n return item\n\ndef select_item(self,title):\n select_sql = \"select * from movie where title = %s\"\n self.cur.execute(select_sql,title)\n n = self.cur.fetchall()\n return n\n\ndef insert_item(self,item):\n insert_sql = \"insert into movie(title,score,qote) VALUES (%s,%s,%s)\"\n self.cur.execute(insert_sql,(item['title'],item['score'],item['qote']))\n self.client.commit()","sub_path":"crawDouBan/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"403618374","text":"import os\nfrom itertools import chain\n\nimport numpy as np\n\nfrom gensim.models import KeyedVectors\n\nfrom nltk.corpus import reuters\n\nfrom utils import Timer, tick\n\n\n@tick\ndef to_embeddings(w2v, samples, maxlen, dim=300):\n N, L, D = len(samples), maxlen, dim\n print('Embedding: [{0}, {1}, {2}]'.format(N, L, D))\n vecs = np.zeros([N, L, D])\n for i, sent in enumerate(samples):\n sent = sent[:L]\n for j, w in enumerate(sent):\n try:\n v = w2v.word_vec(w)\n except KeyError:\n v = w2v.word_vec('')\n vecs[i, j] = v\n return vecs\n\n\n@tick\ndef prepare_reuters2(w2v, pos, neg):\n n_train = 1650\n n_test = 719\n\n def _embed(ids):\n train = np.array([i for i in ids if 'train' in i], dtype=str)\n test = np.array([i for i in ids if 'test' in i], dtype=str)\n\n train = np.random.choice(train, size=n_train, replace=False)\n test = np.random.choice(test, size=n_test, replace=False)\n\n X_train = [list(chain.from_iterable(reuters.sents(i))) for i in train]\n X_train = to_embeddings(w2v, X_train, maxlen=160)\n X_test = [list(chain.from_iterable(reuters.sents(i))) for i in test]\n X_test = to_embeddings(w2v, X_test, maxlen=160)\n return X_train, X_test\n\n print('Embedding...')\n X_train_pos, X_test_pos = _embed(pos)\n X_train_neg, X_test_neg = _embed(neg)\n y_train_pos = np.zeros(n_train, dtype=np.uint8)\n y_test_pos = np.zeros(n_test, dtype=np.uint8)\n y_train_neg = np.ones(n_train, dtype=np.uint8)\n y_test_neg = np.ones(n_test, dtype=np.uint8)\n\n X_train = np.vstack((X_train_pos, X_train_neg)).astype(np.float16)\n X_test = np.vstack((X_test_pos, X_test_neg)).astype(np.float16)\n y_train = np.concatenate((y_train_pos, y_train_neg))\n y_test = np.concatenate((y_test_pos, y_test_neg))\n\n print('Saving data...')\n reuters2 = os.path.expanduser('~/data/reuters/reuters2.npz')\n np.savez(reuters2, X_train=X_train, y_train=y_train, X_test=X_test,\n y_test=y_test)\n\n\n@tick\ndef prepare_reuters5(w2v, labels):\n n_train = 347\n n_test = 117\n\n def _embed(ids):\n train = np.array([i for i in ids if 'train' in i], dtype=str)\n test = np.array([i for i in ids if 'test' in i], dtype=str)\n\n train = np.random.choice(train, size=n_train, replace=False)\n test = np.random.choice(test, size=n_test, replace=False)\n\n X_train = [list(chain.from_iterable(reuters.sents(i))) for i in train]\n X_train = to_embeddings(w2v, X_train, maxlen=350)\n X_test = [list(chain.from_iterable(reuters.sents(i))) for i in test]\n X_test = to_embeddings(w2v, X_test, maxlen=350)\n return X_train, X_test\n\n print('Embedding...')\n n = len(labels)\n X_trains, X_tests = [None]*n, [None]*n\n y_trains, y_tests = [None]*n, [None]*n\n\n for i, label in enumerate(labels):\n X_trains[i], X_tests[i] = _embed(label)\n y_trains[i] = np.zeros(n_train, dtype=np.uint8) + i\n y_tests[i] = np.zeros(n_test, dtype=np.uint8) + i\n\n X_train = np.vstack(X_trains).astype(np.float16)\n X_test = np.vstack(X_tests).astype(np.float16)\n y_train = np.concatenate(y_trains)\n y_test = np.concatenate(y_tests)\n\n print('Saving data...')\n reuters5 = os.path.expanduser('~/data/reuters/reuters5.npz')\n np.savez(reuters5, X_train=X_train, y_train=y_train, X_test=X_test,\n y_test=y_test)\n\n\nwith Timer():\n print('Loading word2vec')\n ifname = os.path.expanduser('~/data/glove/glove.840B.300d.w2v')\n w2v = KeyedVectors.load(ifname)\n\npos = reuters.fileids('acq')\nneg = reuters.fileids('earn')\nprepare_reuters2(w2v, pos, neg)\n\nlabels = [reuters.fileids(label)\n for label in ['crude', 'grain', 'interest', 'money-fx', 'trade']]\nprepare_reuters5(w2v, labels)\n","sub_path":"src/prepare_reuters.py","file_name":"prepare_reuters.py","file_ext":"py","file_size_in_byte":3823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"215799772","text":"#!/usr/bin/env python3\n\n\nimport os\nimport sys\nimport time\nimport numpy\nimport datetime\n\nimport rospy\nfrom std_msgs.msg import Float64\n\n\n# --\nname = 'status_monitor'\ndata_dir = '/home/amigos/data/experiments/'\nsave_dir = os.path.join(data_dir, name)\n\nexp_time = datetime.datetime.fromtimestamp(time.time())\nymd = exp_time.strftime(\"%Y%m%d_\")\nhms = exp_time.strftime(\"%H%M%S\")\nfilename = ymd + hms + \".txt\"\nsaveto = os.path.join(save_dir, filename)\n# --\n\n\ninterval = int(sys.argv[1])\n\n\nclass status_monitor(object):\n\n def __init__(self):\n self.timestamp = 0.\n self.l218_temp = [0.] * 8\n self.tpg261_pressure = 0.\n self.ondo = 0.\n self.hum = 0.\n\n def callback_temp(self, req, idx):\n self.l218_temp[idx] = float(req.data)\n return\n\n def callback_pressure(self, req):\n self.tpg261_pressure = float(req.data)\n return\n\n def callback_ondo(self, req):\n self.ondo = float(req.data)\n return\n\n def callback_hum(self, req):\n self.hum = float(req.data)\n return\n\n def log(self):\n while not rospy.is_shutdown():\n f = open(saveto, 'a')\n _ctime = time.time()\n ctime = datetime.datetime.fromtimestamp(_ctime)\n date1 = [ctime.strftime('%Y-%m-%d %H:%M:%S')]\n date2 = [time.time()]\n l218_temp = [temp for temp in self.l218_temp]\n pre = [self.tpg261_pressure]\n ondo = [self.ondo, self.hum]\n msg1 = date1 + l218_temp + pre + ondo\n msg2 = date2 + l218_temp + pre + ondo\n msg1 = '{0} {1:.2f}K {2:.2f}K {3:.2f}K {4:.2f}K {5:.2f}K {6:.2f}K {7:.2f}K {8:.2f}K {9:.1}torr {10:.2f}deg {11:.2f}%'.format(*msg1)\n msg2 = '{0} {1:.2f} {2:.2f} {3:.2f} {4:.2f} {5:.2f} {6:.2f} {7:.2f} {8:.2f} {9:.1} {10:.2f} {11:.2f}\\n'.format(*msg2)\n print(msg1)\n f.write(msg2)\n f.close()\n\n time.sleep(interval)\n continue\n return\n\n\nif __name__ == '__main__':\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n pass\n\n st = status_monitor()\n rospy.init_node(name)\n temp_sub_list = [rospy.Subscriber('/lakeshore_ch{}'.format(ch),\n Float64,\n st.callback_temp,\n callback_args = ch-1) \\\n for ch in range(1, 8 + 1)]\n pressure_sub = rospy.Subscriber('/tpg261_torr', Float64, st.callback_pressure)\n ondo_sub = rospy.Subscriber('/ondotori_temp', Float64, st.callback_ondo)\n hum_sub = rospy.Subscriber('/ondotori_hum', Float64, st.callback_hum)\n st.log()\n\n\n","sub_path":"scripts/status_monitor.py","file_name":"status_monitor.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"529295672","text":"from collections import Counter\nimport pandas as pd\n\ndf = pd.read_csv('situacao_clientes.csv')\nx_df = df[['recencia', 'frequencia', 'semanas_de_inscricao']]\ny_df = df['situacao']\n\nxdummies_df = pd.get_dummies(x_df)\nydummies_df = y_df\nx_arr = xdummies_df.values\ny_arr = ydummies_df.values\n\n#porcentagens de teste treino e validação\npor_treino = 0.8\npor_teste = 0.1\npor_validacao = 0.1\n\n#separa os dados\ntamanho_treino = int(por_treino * len(y_arr))\ntamanho_teste = int(por_teste * len(y_arr))\ntamanho_validacao = int(len(y_arr) - tamanho_treino - tamanho_teste)\n\n#0 ate 799\ntreino_dados = x_arr[0:tamanho_treino]\ntreino_marcacoes = y_arr[0:tamanho_treino]\n\n#800 ate 899\nfim_teste = (tamanho_treino + tamanho_teste)\nteste_dados = x_arr[tamanho_teste:fim_teste]\nteste_marcacoes = y_arr[tamanho_teste:fim_teste]\n\n#900 ate 999\nvaliacao_dados = x_arr[fim_teste:]\nvaliacao_marcacoes = y_arr[fim_teste:]\n\ndef fit_predict(modelo, nome, treino_dados, treino_marcacoes, teste_dados, teste_marcacoes):\n modelo.fit(treino_dados, treino_marcacoes)\n resultado_arr = modelo.predict(teste_dados)\n acertos = (resultado_arr == teste_marcacoes)\n total_acertos = sum(acertos)\n toral_elementos = len(teste_dados)\n taxa_acerto = 100.0 * total_acertos / toral_elementos\n print(\"taxa de acerto do {0}: {1} \".format(nome, taxa_acerto))\n return taxa_acerto\n\n#usar um algoritimo quando tenho mult class\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.svm import LinearSVC\n#modelo = OneVsRestClassifier(LinearSVC(random_state=0))\n#modelo.fit(treino_dados, treino_marcacoes)\n#print(modelo.predict(teste_dados))\n#print(teste_marcacoes)\nmodeloOneVsRest = OneVsRestClassifier(LinearSVC(random_state=0))\nresultadoOneVsRest = fit_predict(modeloOneVsRest, 'OneVsRest', treino_dados, treino_marcacoes, teste_dados, teste_marcacoes)\n\n\n#roda o algorimo\nfrom sklearn.naive_bayes import MultinomialNB\nmodeloMultinomialNB = MultinomialNB()\nresultadoMultinomialNB = fit_predict(modeloMultinomialNB, 'MultinomialNB', treino_dados, treino_marcacoes, teste_dados, teste_marcacoes)\n\n#roda o algoritimo\nfrom sklearn.ensemble import AdaBoostClassifier\nmodeloAdaBoostClassifier = AdaBoostClassifier()\nresultadoAdaBoostClassifier = fit_predict(modeloAdaBoostClassifier, 'AdaBoostClassifier', treino_dados, treino_marcacoes, teste_dados, teste_marcacoes)\n\nif resultadoMultinomialNB > resultadoAdaBoostClassifier:\n vencedor = modeloMultinomialNB\nelse:\n vencedor = modeloAdaBoostClassifier\n\nresultadorvencedor = vencedor.predict(valiacao_dados)\n\n#mosta o vencedor\nacertos = (resultadorvencedor == valiacao_marcacoes)\ntotal_acertos = sum(acertos)\ntoral_elementos = len(valiacao_dados)\ntaxa_acerto = 100.0 * total_acertos / toral_elementos\nprint(\"taxa de acerto do vencedor: {0} \".format(taxa_acerto))\n\n#algoritimo basico\nacerto_base = max(Counter(valiacao_marcacoes).values())\ntaxa_acerto_base = 100.0 * acerto_base/len(valiacao_marcacoes) \nprint(\"taxa de acerto base: %f\" % taxa_acerto_base)\nprint(\"total de validacao: %i\" % len(valiacao_marcacoes))\n\n","sub_path":"Alura/MLClassificacao2/A1V4_Classificacao_cliente.py","file_name":"A1V4_Classificacao_cliente.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"22569034","text":"import cv2\nimport numpy as np\nfrom skimage.feature import peak_local_max\nfrom skimage.morphology import watershed\nfrom scipy import ndimage\n\nminRadius=2\nmaxRadius=6\n\n# Read the image.\nimg=cv2.imread(\"./test3/sample_17.jpg\")\n\n# Save image copy.\nimgCopy=img.copy()\n\n# Finding the needed square.\ncv2.rectangle(img,(180,150),(500,475),(0,255,0),2)\ncv2.imshow(\"input\",img)\n\n# Cropped\nimg=imgCopy[150:475,180:500]\ncv2.imshow(\"cropped\",img)\n\n# Convert to grayscale.\ngray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\ncv2.imshow(\"gray\",gray)\n\n# Filter the image.\nfiltered=cv2.GaussianBlur(gray,(3,3),0)\ncv2.imshow(\"filtered\",filtered)\n\n# Detect edges.\nedges=cv2.adaptiveThreshold(filtered,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,11,6)\n#edges=cv2.Canny(filtered,127,200)\ncv2.imshow(\"edges\",edges)\n\n# Watershed to find overlapping circles.\nthresh=edges\n\nD = ndimage.distance_transform_edt(thresh)\nlocalMax = peak_local_max(D, indices=False, min_distance=1,\n\tlabels=thresh)\n \n# perform a connected component analysis on the local peaks,\n# using 8-connectivity, then appy the Watershed algorithm\nmarkers = ndimage.label(localMax, structure=np.ones((3, 3)))[0]\nlabels = watershed(-D, markers, mask=thresh)\n\nimage=img.copy()\n\nradiusList=[]\n\nfor label in np.unique(labels):\n\t# If the label is zero, we are examining the 'background'\n\t# so simply ignore it\n if label == 0:\n continue\n\n # Otherwise, allocate memory for the label region and draw\n # it on the mask\n mask = np.zeros(gray.shape, dtype=\"uint8\")\n mask[labels == label] = 255\n\n # Detect contours in the mask and grab the largest one\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)[-2]\n c = max(cnts, key=cv2.contourArea)\n \n # Draw a circle enclosing the object\n ((x, y), r) = cv2.minEnclosingCircle(c)\n\n # Filter that radius it should fall between the minimum and max.\n\n if r>minRadius and r 0:\n\n for grade in grades:\n\n grade_lvl = grade\n tri_nbr = grades[grade]['tri_nbr']\n course_year = grades[grade]['course_year']\n start = grades[grade]['start']\n end = grades[grade]['end']\n\n update_enroll_date(grade_lvl, tri_nbr, course_year, start, end)\n\n return redirect(url_for('mclub_admin.index'))\n\n","sub_path":"app/mclub/admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"586927466","text":"#usr/bin/python3.3\n\nimport time\nimport random\n\n\ndef fib(n):\n index = 0\n a = 0\n b = 1\n while index < n:\n sleep_cnt = yield b\n print('let me think {0} secs'.format(sleep_cnt))\n time.sleep(int(sleep_cnt))\n a, b = b, a + b\n index += 1\n\n\ndef copy_fib(n):\n print(\"I am copy from stupid fib\")\n yield from fib(n)\n print(\"copy end\")\n\n\nprint('-'*10 + 'test yield from and send' + '-'*10)\nN = 20\ncfib = copy_fib(N)\nfib_res = next(cfib)\nwhile True:\n print(fib_res)\n try:\n fib_res = cfib.send(random.uniform(0, 0.5))\n except StopIteration:\n break\n\n","sub_path":"example/coroutines/fib_with_yield_from.py","file_name":"fib_with_yield_from.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"402691927","text":"from __future__ import print_function\nimport unittest\nfrom pytraj import io as mdio\nfrom pytraj.utils import eq, aa_eq, a_isinstance\nfrom pytraj.decorators import no_test, test_if_having, test_if_path_exists\nimport pytraj.common_actions as pyca\n\nclass Test(unittest.TestCase):\n @test_if_having(\"numpy\")\n def test_0(self):\n import numpy as np\n from array import array as pyarray\n traj = mdio.iterload(\"./data/md1_prod.Tc5b.x\", \"./data/Tc5b.top\")\n alist = traj.calc_molsurf().tolist()\n anp = traj.calc_molsurf().to_ndarray()\n a_pyarray = traj.calc_molsurf().to_pyarray()\n aa_eq(alist, anp)\n aa_eq(alist, a_pyarray)\n\n alist2 = pyca.calc_distance(traj, \"@2 @4\", dtype='list')\n anp2 = pyca.calc_distance(traj, \"@2 @4\", dtype='ndarray')\n a_pyarray2 = pyca.calc_distance(traj, \"@2 @4\", dtype='pyarray')\n a_isinstance(alist2, list)\n a_isinstance(anp2, np.ndarray)\n a_isinstance(a_pyarray2, pyarray)\n\n aa_eq(alist2, anp2)\n aa_eq(alist2, a_pyarray2)\n\n # test hist\n try:\n import matplotlib\n d0 = traj.calc_molsurf()\n print (d0.hist(bins=3, range=[d0.min(), d0.max()]))\n print (d0.to_ndarray())\n except:\n print (\"pass `hist` test since don't have matplotlib\")\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_to_pyarray.py","file_name":"test_to_pyarray.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"220411765","text":"#20201121\r\n#1\r\n\"\"\"\r\ndef arrayStringsAreEqual( word1, word2):\r\n import functools\r\n return functools.reduce(lambda x,y:x+y, word1)==functools.reduce(lambda x,y:x+y, word2)\r\nprint(arrayStringsAreEqual( word1 = [\"abc\", \"d\", \"defg\"], word2 = [\"abcddefg\"]))\r\n\"\"\"\r\n#2\r\n\"\"\"\r\ndef getSmallestString(n, k):\r\n z=(k-n)//25\r\n res=(k-n)%25\r\n a=n-z-1 if res!=0 else n-z\r\n print(a,res,z)\r\n if res==0:\r\n return \"a\"*a+\"z\"*z\r\n else:\r\n return \"a\"*a+chr(ord(\"a\")+res)+\"z\"*z\r\nprint(getSmallestString(9,36))\r\n\"\"\"\r\n\r\n#3.\r\n\"\"\"\r\ndef waysToMakeFair(nums):\r\n d={}\r\n osum=sum([nums[x] for x in range(len(nums)) if x%2==1])\r\n esum=sum([nums[x] for x in range(len(nums)) if x%2==0])\r\n preo=0\r\n pree=0\r\n print(osum,esum)\r\n for x in range(len(nums)):\r\n if x%2==0:\r\n post_e=esum-pree-nums[x]\r\n post_o=osum-preo\r\n else:\r\n post_e=esum-pree\r\n post_o=osum-preo-nums[x]\r\n d[x]=[preo,pree,post_o,post_e]\r\n if x%2==0:\r\n pree+=nums[x]\r\n else:\r\n preo+=nums[x]\r\n ans=0\r\n for x in range(len(nums)):\r\n if d[x][0]+d[x][3]==d[x][1]+d[x][2]:\r\n ans+=1\r\n return ans\r\nprint(waysToMakeFair([1,2,3]))\r\n\"\"\"\r\n\r\n#4\r\n\"\"\"\r\ndef minimumEffort(tasks):\r\n def merg(t1,t2):\r\n return t1[0]+t2[0],min(max(t1[1]+t2[0],t2[1]),max(t1[0]+t2[1],t1[1]))\r\n base=sorted(tasks,key=lambda x:x[1]-x[0])[::-1]\r\n return functools.reduce(merg,base)[1]\r\n\r\nprint(minimumEffort(tasks = [[1,3],[2,4],[10,11],[10,12],[8,9]]))\r\n\"\"\"\r\n\r\n#biweek\r\n#5557.Maximum Repeating Substring\r\n\"\"\"\r\ndef maxRepeating(sequence, word):\r\n import re\r\n s=len(sequence)\r\n n=len(word)\r\n if sa and len(s)+len(nums)-i-1>=k:\r\n s.pop()\r\n if len(s)=0 and max_w>=boxes[j][1] and max_b>=1:\r\n max_w-=boxes[j][1]\r\n max_b-=1\r\n if not portset or boxes[j][0]!=portset[-1]:\r\n cur_port+=1\r\n portset.append(boxes[j][0])\r\n dp[i]=min(dp[i],dp[j]+cur_port+1)\r\n j-=1\r\n print(dp)\r\n return dp[n]\r\n\r\nprint(boxDelivering( boxes = [[2,4],[2,5],[3,1],[3,2],[3,7],[3,1],[4,4],[1,3],[5,2]], portsCount = 5, maxBoxes = 5, maxWeight = 7))\r\n\"\"\"\r\n\r\n#5625. Count of Matches in Tournament\r\n\"\"\"\r\ndef numberOfMatches(n):\r\n ans=0\r\n while n!=1:\r\n ans+=n//2\r\n if n%2==0:\r\n n/=2\r\n else:\r\n n=n//2+1\r\n \r\n return int(ans)\r\nprint(numberOfMatches(14))\r\n\"\"\"\r\n\r\n#5626. Partitioning Into Minimum Number Of Deci-Binary Numbers\r\n\"\"\"\r\ndef minPartitions(n):\r\n return max(map(int,list(n)))\r\nprint(minPartitions(n = \"27346209830709182346\"))\r\n\"\"\"\r\n\r\n#5627. Stone Game VII\r\n\"\"\"\r\ndef stoneGameVII(stones):\r\n presum=[0]+stones[:]\r\n for i in range(1,len(presum)):\r\n presum[i]+=presum[i-1]\r\n def getsc(i,j):\r\n return presum[j+1]-presum[i]\r\n n=len(stones)\r\n dp=[[0]*n for _ in range(n)]\r\n \r\n #for i in range(n-1,-1,-1):\r\n # for j in range(i+1,n):\r\n # dp[i][j]=max(getsc(i+1,j)-dp[i+1][j],getsc(i,j-1)-dp[i][j-1])\r\n for x in range(1,n):\r\n for y in range(x-1,-1,-1):\r\n dp[y][x]=max(getsc(y+1,x)-dp[y+1][x], getsc(y,x-1)-dp[y][x-1])\r\n \r\n \r\n return dp[0][n-1]\r\n \r\n\r\nprint(stoneGameVII([5,3,1,4,2]))\r\n\"\"\"\r\n\r\n#5245. Maximum Height by Stacking Cuboids\r\n\"\"\"\r\ndef maxHeight(cuboids):\r\n dic={}\r\n cuboids.sort(key=lambda x:x[2])\r\n for x in cuboids:\r\n for key in dic.keys():\r\n if max(x[:2])<=min(key):\r\n dic(tuple(x[:2]))=dic[key]+x[2]\r\n\"\"\" \r\n\r\n#1694. Reformat Phone Number\r\n\"\"\"\r\ndef reformatNumber(number):\r\n ans=\"\"\r\n for x in number:\r\n if ord(x) in range(48,58):\r\n ans+=x\r\n res=\"\"\r\n if len(ans)%3==0:\r\n for x in range(len(ans)):\r\n if x%3==0 and x!=0:\r\n res+=\"-\"\r\n res+=ans[x]\r\n elif len(ans)%3==1:\r\n for x in range(len(ans)):\r\n if x%3==0 and x!=0 and x!=len(ans)-1:\r\n res+=\"-\"\r\n if x==len(ans)-2:\r\n res+=\"-\"\r\n res+=ans[x]\r\n else:\r\n for x in range(len(ans)):\r\n if x%3==0 and x!=0:\r\n res+=\"-\"\r\n res+=ans[x]\r\n\r\n return res\r\nprint(reformatNumber(\"12345612\"))\r\n\"\"\"\r\n\r\n#1695. Maximum Erasure Value\r\n\"\"\"\r\ndef maximumUniqueSubarray(nums):\r\n if len(nums)==1:\r\n return nums[0]\r\n ans=0\r\n cur=nums[0]\r\n seen=set()\r\n seen.add(cur)\r\n i=0\r\n j=1\r\n n=len(nums)\r\n while jk:\r\n heapq.heappop(hp)\r\n ans=nums[i]-hp[0][0] if hp else nums[i]\r\n heapq.heappush(hp,(-ans,i))\r\n return ans\r\nprint(maxResult(nums = [1,-1,-2,4,-7,3], k = 2))\r\n\"\"\"\r\n\r\n#5621. Number of Students Unable to Eat Lunch\r\n\r\n#def cnts(students, sandwiches):\r\n# import collections\r\n# c=collections.Counter(students)\r\n# ans=0\r\n# for x in sandwiches:\r\n# if c[x]>0:\r\n# c[x]-=1\r\n# ans+=1\r\n# else:\r\n# break\r\n# return len(students)-ans\r\n#print(cnts(students = [1,1,1,0,0,1], sandwiches = [1,0,0,0,1,1]))\r\n\r\n\r\n#5622. Average Waiting Time\r\n\"\"\"\r\ndef averageWaitingTime(customers):\r\n n=len(customers)\r\n if n==1:\r\n return customers[0][1]\r\n cur=customers[0][0]\r\n ans=0\r\n for s,t in customers:\r\n if s>=cur:\r\n ans+=t\r\n cur=s+t\r\n else:\r\n ans+=cur-s+t\r\n cur+=t\r\n return ans/n\r\nprint(averageWaitingTime(customers = [[5,2],[5,4],[10,3],[20,1]]))\r\n\"\"\"\r\n\r\n#5623. Maximum Binary String After Change\r\n\"\"\"\r\ndef maximumBinaryString(binary):\r\n import itertools\r\n import collections\r\n c=collections.Counter(binary)\r\n if '0' in c.keys():\r\n if c['0']==1:\r\n return binary\r\n else:\r\n numz=c['0']\r\n numo=c['1']\r\n i=binary.index('0')\r\n return '1'*(i+numz-1)+'0'+'1'*(len(binary)-i-numz)\r\n else:\r\n return binary\r\nprint(maximumBinaryString(\"000110\"))\r\n\"\"\"\r\n\r\n#5637. Determine if String Halves Are Alike]\r\n\"\"\"\r\ndef halvesAreAlike(s):\r\n d=set(['a','e','i','o','u','A','E','I','O','U'])\r\n def getvo(s):\r\n ans=0\r\n for x in s:\r\n if x in d:\r\n ans+=1\r\n return ans\r\n n=len(s)\r\n return getvo(s[:n//2])==getvo(s[n//2:])\r\nprint(halvesAreAlike(s = \"AbCdEfGh\"))\r\n\"\"\"\r\n\r\n#5638. Maximum Number of Eaten Apples\r\n\r\n\r\n\r\n#5210. Where Will the Ball Fall\r\n\"\"\"\r\ndef findBall(grid):\r\n ans=[]\r\n n=len(grid)\r\n m=len(grid[0])\r\n\r\n for x in range(m):\r\n cur=x\r\n y=0\r\n while cur>=0 and yb:\r\n ans+=b*v\r\n n-=b\r\n else:\r\n ans+=n*v\r\n return ans\r\n return ans\r\nprint(maximumUnits([[5,10],[2,5],[4,7],[3,9]],\r\n10))\r\n\"\"\"\r\n\r\n#5642. Count Good Meals\r\n\"\"\"\r\ndef countPairs(deliciousness):\r\n import collections\r\n c=collections.Counter(deliciousness)\r\n ans=0\r\n for x in c.keys():\r\n for index in range(22):\r\n if 2**index-x in c and 2**index-x>=x:\r\n if 2**index-x==x:\r\n ans+=(c[x]*(c[x]-1))//2\r\n else:\r\n ans+=c[x]*c[2**index-x]\r\n print(c)\r\n return ans%(10**9+7)\r\nprint(countPairs([149,107,1,63,0,1,6867,1325,5611,2581,39,89,46,18,12,20,22,234]))\r\n\"\"\"\r\n\r\n#5643. Ways to Split Array Into Three Subarrays\r\n\"\"\"\r\ndef waysToSplit(nums):\r\n import bisect\r\n import math\r\n l=[0]*(len(nums))\r\n for i in range(1,len(nums)):\r\n l[i]=l[i-1]+nums[i-1]\r\n l=l[1:]\r\n s=sum(nums)\r\n print(l)\r\n ans=0\r\n i=bisect.bisect_right(l,(s*2)//3)\r\n while i>1:\r\n cur=l[i-1]\r\n min_l=max(0,cur-(s-cur))\r\n max_l=cur//2\r\n le=bisect.bisect_left(l,min_l)\r\n ri=bisect.bisect_right(l,max_l)\r\n print(l,i,le,ri)\r\n if ri==0:\r\n continue\r\n else:\r\n ans=ans+ri-le+1 if le!=0 else ans+ri-le \r\n i-=1\r\n return ans\r\nprint(waysToSplit([1,2,2,2,5,0]))\r\n\"\"\"\r\n\r\n#5644. Minimum Operations to Make a Subsequence\r\n\"\"\"\r\ndef minOperations(target, arr):\r\n import collections\r\n import bisect\r\n\r\n d={x:i for i,x in enumerate(target)}\r\n res=[]\r\n for x in arr:\r\n if x in d:\r\n res.append(d[x]) \r\n s = []\r\n for r in res:\r\n pos = bisect.bisect_left(s, r)\r\n if pos == len(s):\r\n s.append(r)\r\n else:\r\n s[pos] = r\r\n return len(target)-len(s)\r\nprint(minOperations(target = [6,4,8,1,3,2], arr = [4,7,6,2,3,8,6,1]))\r\n\"\"\"\r\n\r\n#5633. Calculate Money in Leetcode Bank\r\n\"\"\"\r\ndef totalMoney(n):\r\n w=n//7\r\n d=n%7\r\n ans=0\r\n ans+=(28+28+7*(w-1))*w//2\r\n ans+=(1+d)*d//2+d*w\r\n return ans\r\nprint(totalMoney(10))\r\n\"\"\"\r\n\r\n#5634. Maximum Score From Removing Substrings\r\n\"\"\"\r\ndef maximumGain(s, x, y):\r\n st, ans = [], 0\r\n st2 = []\r\n #ab>=ba\r\n if x >= y:\r\n for ch in s:\r\n if ch ==\"b\":\r\n if not st or st[-1] != \"a\":\r\n st.append(ch)\r\n else:\r\n st.pop()\r\n ans+=x\r\n else:\r\n st.append(ch)\r\n while st:\r\n ch = st.pop()\r\n if ch ==\"b\":\r\n if st2 and st2[-1] == \"a\":\r\n st2.pop()\r\n ans+=y\r\n else:\r\n st2.append(ch)\r\n else:\r\n for ch in s:\r\n if ch ==\"a\":\r\n if not st or st[-1] != \"b\":\r\n st.append(ch)\r\n else:\r\n st.pop()\r\n ans+=x\r\n else:\r\n st.append(ch)\r\n print(ans, st)\r\n while st:\r\n ch = st.pop()\r\n if ch ==\"a\":\r\n if st2 and st2[-1] == \"b\":\r\n st2.pop()\r\n ans+=y\r\n else:\r\n st2.append(ch)\r\n\r\n return ans\r\nprint(maximumGain(s = \"cdbcbbaaabab\", x = 4, y = 5))\r\n\"\"\"\r\n\r\n#5635. Construct the Lexicographically Largest Valid Sequence\r\n#MM\r\n\"\"\"\r\n if n == 1:\r\n return [1]\r\n if n == 2:\r\n return [2,1,2]\r\n ans=[]\r\n if n%2==0:\r\n for num in range(n,1,-2):\r\n ans.append(num)\r\n ans.append(n-1)\r\n for num in range(2,n+1,2):\r\n ans.append(num)\r\n for num in range(n-3,1,-2):\r\n ans.append(num)\r\n ans.append(n-1)\r\n ans.append(1)\r\n for num in range(3,n-1,2):\r\n ans.append(num)\r\n else:\r\n for num in range(n,1,-2):\r\n ans.append(num)\r\n ans.append(1)\r\n ans.append(n-1)\r\n for num in range(3,n+1,2):\r\n ans.append(num)\r\n for num in range(n-3,1,-2):\r\n ans.append(num)\r\n ans.append(n-1)\r\n for num in range(2,n-2,2):\r\n ans.append(num)\r\n return ans\r\n\"\"\"\r\n\r\n\r\n#print(constructDistancedSequence(2))\r\n\r\n#5649. Decode XORed Array\r\n\"\"\"\r\ndef decode(encoded, first):\r\n def getxor(num,res):\r\n n=bin(num)[2:]\r\n r=bin(res)[2:]\r\n if len(n)>=len(r):\r\n r=\"0\"*(len(n)-len(r))+r\r\n else:\r\n n=\"0\"*(len(r)-len(n))+n\r\n ans=\"\"\r\n for x in range(len(r)):\r\n if r[x]==\"0\":\r\n ans+=n[x]\r\n else:\r\n ans+=\"1\" if n[x]==\"0\" else \"0\"\r\n return int(ans,2) \r\n ans=[first]\r\n for x in range(len(encoded)):\r\n ans.append(getxor(ans[-1],encoded[x]))\r\n return ans\r\nprint(decode(encoded = [6,2,7,3], first = 4))\r\n\"\"\"\r\n\r\n#5650. Minimize Hamming Distance After Swap Operations\r\n\"\"\"\r\ndef minimumHammingDistance(source, target, allowedSwaps):\r\n import collections\r\n class UF:\r\n def __init__(self,n):\r\n self.p=list(range(n))\r\n def union(self,x,y):\r\n self.p[self.find(x)]=self.find(y)\r\n def find(self,x):\r\n if x!= self.p[x]:\r\n self.p[x]=self.find(self.p[x])\r\n return self.p[x]\r\n uf=UF(len(source))\r\n res=[]\r\n m=collections.defaultdict(list)\r\n for x,y in allowedSwaps:\r\n uf.union(x,y)\r\n for i in range(len(source)):\r\n m[uf.find(i)].append(i)\r\n ans=0\r\n print(res)\r\n print(m)\r\n for x in m.keys():\r\n sset=collections.defaultdict(int)\r\n tset=collections.defaultdict(int)\r\n for idx in m[x]:\r\n sset[source[idx]]+=1\r\n tset[target[idx]]+=1\r\n print(sset,tset)\r\n for k in sset.keys():\r\n ans+=min(sset[k],tset[k])\r\n print(ans)\r\n return len(source)-ans\r\nprint(minimumHammingDistance([50,46,54,35,18,42,26,72,75,47,50,4,54,21,18,18,61,64,100,14],\r\n[83,34,43,73,61,94,10,68,74,31,54,46,28,60,18,18,4,44,79,92],\r\n[[1,8],[14,17],[3,1],[17,10],[18,2],[7,12],[11,3],[1,15],[13,17],[18,19],[0,10],[15,19],[0,15],[6,7],[7,15],[19,4],[7,16],[14,18],[8,10],[17,0],[2,13],[14,10],[12,17],[2,9],[6,15],[16,18],[2,16],[2,6],[4,5],[17,5],[10,13],[7,2],[9,16],[15,5],[0,5],[8,0],[11,12],[9,7],[1,0],[11,17],[4,6],[5,7],[19,12],[3,18],[19,1],[13,18],[19,6],[13,6],[6,1],[4,2]]))\r\n\"\"\"\r\n\r\n#5639. Find Minimum Time to Finish All Jobs\r\n\r\n#5653. Number Of Rectangles That Can Form The Largest Square\r\n\"\"\"\r\ndef countGoodRectangles(rectangles):\r\n import collections\r\n d = collections.defaultdict(int)\r\n for r in rectangles:\r\n d[min(r[0],r[1])] += 1\r\n return d[max(d.keys())]\r\nprint(countGoodRectangles( rectangles = [[5,8],[3,9],[5,12],[16,5]]))\r\n\"\"\"\r\n\r\n#5243. Tuple with Same Product\r\n\"\"\"\r\ndef tupleSameProduct(nums):\r\n import collections\r\n d = collections.defaultdict(int)\r\n n = len(nums)\r\n if n < 4:\r\n return 0\r\n for i in range(n-1):\r\n for j in range(i + 1, n):\r\n d[nums[i] * nums[j]] += 1\r\n ans = 0\r\n for x in d.values():\r\n if x > 1:\r\n ans += ((x - 1) * x //2) * 8 \r\n return ans\r\nprint(tupleSameProduct(nums = [1,2,4,5,10]))\r\n\"\"\"\r\n\r\n#5655. Largest Submatrix With Rearrangements\r\n\"\"\"\r\ndef largestSubmatrix(matrix):\r\n n = len(matrix)\r\n m = len(matrix[0])\r\n if n == 1:\r\n return sum(matrix[0])\r\n for i in range(1, n):\r\n for j in range(m):\r\n matrix[i][j] = matrix[i-1][j] + 1 if matrix[i][j] == 1 else 0\r\n ans = 0\r\n for i in range(n):\r\n tmp = matrix[i][:]\r\n tmp.sort(reverse = True)\r\n for j in range(m):\r\n ans = max(ans , tmp[j]*(j + 1))\r\n\r\n return ans\r\n\r\n\r\n\r\n S = set(((0,0,0),))\r\n def getset(l):\r\n ans = []\r\n idx = 0\r\n while idx < len(l):\r\n if l[idx] == 1:\r\n s=idx\r\n while idx < len(l) and l[idx] == 1:\r\n idx += 1\r\n ans.append(tuple((s, idx - 1, 1)))\r\n idx += 1\r\n return ans\r\n def getu(s1, s2):\r\n if s1[0] > s2[1] or s1[1] < s2[0]:\r\n return tuple((0,0,0))\r\n else:\r\n return tuple((max(s1[0] , s2[0]), min(s1[1], s2[1]), s1[2]+s2[2]))\r\n for idx in range(len(matrix[0])):\r\n tmp = [matrix[i][idx] for i in range(len(matrix))]\r\n \r\n if getset(tmp) != []:\r\n for s in getset(tmp):\r\n S |= set( getu(i , s) for i in S)\r\n S |= set(getset(tmp))\r\n return max((s[1]-s[0]+1)*s[2] for s in S)\"\"\"\r\n\r\n#biweek\r\n#5645. Find the Highest Altitude\r\n\"\"\"\r\ndef largestAltitude(gain):\r\n ans = 0\r\n cur = 0\r\n for g in gain:\r\n cur += g\r\n ans = max(ans ,cur)\r\n return ans\r\nprint(largestAltitude(gain = [-4,-3,-2,-1,4,3,2]))\"\"\"\r\n\r\n#5646. Minimum Number of People to Teach\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#5648. Count Ways to Make Array With Product\r\n\"\"\"\r\ndef waysToFillArray(queries):\r\n import collections\r\n import math\r\n def primeFactors(n): \r\n ans = collections.defaultdict(int)\r\n while n % 2 == 0: \r\n ans[2] += 1 \r\n n = n // 2\r\n for i in range(3,int(math.sqrt(n))+1,2): \r\n while n % i== 0: \r\n ans[i] += 1\r\n n = n // i \r\n if n > 2: \r\n ans[n] += 1\r\n return ans\r\n for n, k in queries:\r\n d = primeFactors(k)\r\n if sum(d.values())\r\n return\r\nprint(waysToFillArray(queries = [[2,6],[5,1],[73,660]]))\r\n\"\"\"\r\n#1738. Find Kth Largest XOR Coordinate Value\r\n\"\"\"\r\ndef kthLargestValue(matrix, k):\r\n m, n = len(matrix), len(matrix[0])\r\n if m == 1 and n == 1:\r\n return matrix[0][0]\r\n elif m == 1:\r\n ans = []\r\n ans.append(matrix[0][0])\r\n cur = matrix[0][0]\r\n for i in range(1, n):\r\n cur ^= matrix[0][i]\r\n ans.append(cur)\r\n ans.sort(reverse = True)\r\n return ans[k - 1]\r\n elif n == 1:\r\n ans = []\r\n ans.append(matrix[0][0])\r\n cur = matrix[0][0]\r\n for i in range(1, m):\r\n cur ^= matrix[i][0]\r\n ans.append(cur)\r\n ans.sort(reverse = True)\r\n return ans[k - 1]\r\n else:\r\n res = [[0]*n for _ in range(m)]\r\n res[0][0] = matrix[0][0]\r\n cur = res[0][0]\r\n for i in range(1, n):\r\n cur ^= matrix[0][i]\r\n res[0][i] = cur\r\n cur = res[0][0]\r\n for i in range(1, m):\r\n cur ^= matrix[i][0]\r\n res[i][0] = cur\r\n for i in range(1, m):\r\n for j in range(1, n):\r\n res[i][j] = res[i - 1][j] ^ res[i][j - 1] ^ res[i - 1][j - 1] ^ matrix[i][j]\r\n ans = []\r\n for i in range(m):\r\n for j in range(n):\r\n ans.append(res[i][j])\r\n ans.sort(reverse = True)\r\n print(res)\r\n print(ans)\r\n return ans[k - 1]\r\nprint(kthLargestValue([[10,9,5],[2,0,4],[1,0,9],[3,4,8]]\r\n,10))\r\n\"\"\"\r\n\r\n#1737. Change Minimum Characters to Satisfy One of Three Conditions\r\n\"\"\"\r\ndef minCharacters(a, b):\r\n import collections\r\n la, lb = len(a), len(b)\r\n ca = collections.Counter(a)\r\n cb = collections.Counter(b)\r\n ans = la + lb\r\n #a strictly less than b\r\n if 'a' not in cb.keys():\r\n kb = min(cb.keys())\r\n cur = la\r\n for ka in ca.keys():\r\n if ka < kb:\r\n cur = min(cur, la - ca[ka])\r\n ans = min(cur, ans)\r\n if 'z' not in ca.keys():\r\n ka = max(ca.keys())\r\n cur = la\r\n for kb in cb.keys():\r\n if kb > ka:\r\n cur = min(cur, lb - cb[kb])\r\n ans = min(cur, ans)\r\n #b strictly less than a\r\n if 'a' not in ca.keys():\r\n ka = min(ca.keys())\r\n cur = lb\r\n for kb in cb.keys():\r\n if kb < ka:\r\n cur = min(cur, lb - cb[kb])\r\n ans = min(cur, ans)\r\n if 'z' not in cb.keys():\r\n kb = max(cb.keys())\r\n cur = la\r\n for ka in ca.keys():\r\n if ka > kb:\r\n cur = min(cur, la - ca[ka])\r\n ans = min(cur, ans)\r\n #only one character\r\n d = ca + cb\r\n for k in d.keys():\r\n ans = min(ans, la + lb - d[k])\r\n return ans\r\nprint(minCharacters(a = \"dabadd\", b = \"cda\"))\r\n\"\"\"\r\n\r\n#5654. Maximum Number of Balls in a Box\r\n\"\"\"\r\ndef countBalls(lowLimit, highLimit):\r\n import collections\r\n d = collections.defaultdict(int)\r\n for i in range(lowLimit, highLimit + 1):\r\n d[sum(map(int,list(str(i))))] += 1\r\n return max(d.values())\r\nprint(countBalls(lowLimit = 19, highLimit = 28))\r\n\"\"\"\r\n\r\n\r\n#5665. Restore the Array From Adjacent Pairs\r\n\"\"\"\r\ndef restoreArray(adjacentPairs):\r\n import collections\r\n if len(adjacentPairs) == 1:\r\n return adjacentPairs[0]\r\n ds = collections.defaultdict(list)\r\n dl = collections.defaultdict(list)\r\n d = collections.defaultdict(int)\r\n for l, r in adjacentPairs:\r\n ds[min(l, r)].append(max(l, r))\r\n dl[max(l, r)].append(min(l, r))\r\n d[l] += 1\r\n d[r] += 1\r\n\r\n ends = []\r\n for i in d.keys():\r\n if d[i] == 1:\r\n ends.append(i)\r\n pre = ends[0]\r\n\r\n ans = [pre]\r\n for i in range(len(adjacentPairs) - 1):\r\n if i == 0:\r\n cur = ds[pre][0] if pre in ds else dl[pre][0]\r\n else:\r\n tmp = []\r\n tmp.extend(ds[pre])\r\n tmp.extend(dl[pre])\r\n for item in tmp:\r\n if item != ans[-2]:\r\n cur = item\r\n ans.append(cur)\r\n pre = cur\r\n ans.append(ends[-1])\r\n return ans\r\nprint(restoreArray([[4,-2],[1,4],[-3,1]]))\r\n\"\"\"\r\n\r\n#5667. Can You Eat Your Favorite Candy on Your Favorite Day?\r\n\"\"\"\r\ndef canEat(candiesCount, queries):\r\n n = len(candiesCount)\r\n micnt = [0] * n\r\n micnt[0] = 1\r\n maxcnt = [0] * n\r\n maxcnt[0] = candiesCount[0]\r\n print(\"mincnt = \",micnt)\r\n print(\"maxcnt = \",maxcnt)\r\n for idx in range(n - 1):\r\n micnt[idx + 1] = micnt[idx] + candiesCount[idx]\r\n maxcnt[idx + 1] = maxcnt[idx] + candiesCount[idx + 1]\r\n print(\"mincnt = \",micnt)\r\n print(\"maxcnt = \",maxcnt)\r\n ans = []\r\n for t, d, c in queries:\r\n ans.append((d + 1) * c >= micnt[t] and d + 1<= maxcnt[t] - 1) \r\n return ans\r\nprint(canEat(candiesCount = [5,2,6,4,1], queries = [[3,1,2],[4,10,3],[3,10,100],[4,100,30],[1,3,1]]))\r\n\"\"\"\r\n#5657. Sum of Unique Elements\r\n\"\"\"\r\ndef sumOfUnique(nums):\r\n import collections\r\n c = collections.Counter(nums)\r\n ans = 0\r\n for x in c:\r\n if c[x] == 1:\r\n ans += x\r\n return ans\r\nprint(sumOfUnique(nums = [1,2,3,4,5]))\r\n\"\"\"\r\n\r\n#5658. Maximum Absolute Sum of Any Subarray\r\n\"\"\"\r\ndef maxAbsoluteSum(nums):\r\n n = len(nums)\r\n pos, neg = [0] * n, [0] * n\r\n if nums[0] >= 0:\r\n pos[0] = nums[0]\r\n else:\r\n neg[0] = - nums[0]\r\n for idx in range(1, n):\r\n cur = nums[idx]\r\n if cur >= 0:\r\n pos[idx] = pos[idx - 1] + cur\r\n neg[idx] = max(0, neg[idx - 1] - cur)\r\n else:\r\n neg[idx] = neg[idx - 1] - cur\r\n pos[idx] = max(0, pos[idx - 1] + cur)\r\n\r\n print(\"pos = \", pos)\r\n print(\"neg = \", neg)\r\n return max(max(pos), max(neg))\r\nprint(maxAbsoluteSum([-3,-5,-3,-2,-6,3,10,-10,-8,-3,0,10,3,-5,8,7,-9,-9,5,-8]))\r\n\"\"\"\r\n\r\n#5659. Minimum Length of String After Deleting Similar Ends\r\n\"\"\"\r\ndef minimumLength(s):\r\n import collections\r\n import itertools\r\n dq = collections.deque()\r\n tmp = [(k, len(list(g))) for k,g in itertools.groupby(s)]\r\n ans = len(s)\r\n leng = len(tmp)\r\n for t in tmp:\r\n dq.append(t)\r\n while leng > 1 and dq[0][0] == dq[-1][0]:\r\n ans -= dq.popleft()[1]\r\n ans -= dq.pop()[1]\r\n leng -= 2\r\n if leng > 1:\r\n return ans\r\n else:\r\n return ans if ans == 1 else 0\r\nprint(minimumLength(s = \"aabccabba\"))\r\n\"\"\"\r\n\r\n#5672. Check if Array Is Sorted and Rotated\r\n\"\"\"\r\ndef check(nums):\r\n n = len(nums)\r\n if n <= 2:\r\n return True\r\n def isinc(arr):\r\n n = len(arr)\r\n if n == 1:\r\n return True\r\n else:\r\n for i in range(n - 1):\r\n if arr[i] > arr[i + 1]:\r\n return False\r\n return True\r\n for idx in range(n - 1):\r\n if nums[idx] > nums[idx + 1]:\r\n return nums[-1] <= nums[0] and isinc(nums[idx + 1:])\r\n return True\r\nprint(check([2,1,3,4]))\r\n\"\"\"\r\n\r\n#5673. Maximum Score From Removing Stones\r\n\"\"\"\r\ndef maximumScore(a, b, c):\r\n n = [a, b, c]\r\n n.sort()\r\n if n[-1] >= n[0] + n[1]:\r\n return n[0] + n[1]\r\n else:\r\n return sum(n) // 2\r\nprint(maximumScore( a = 4, b = 4, c = 6))\r\n\"\"\"\r\n\r\n#5674. Largest Merge Of Two Strings\r\n\"\"\"\r\ndef largestMerge(s, t):\r\n m, n = len(s), len(t)\r\n def dfs(i, j):\r\n if i == m:\r\n return t[j : ]\r\n elif j == n:\r\n return s[i : ]\r\n else:\r\n if s[i] > t[j]:\r\n return s[i] + dfs(i + 1, j)\r\n elif s[i] < t[j]:\r\n return t[j] + dfs(i, j + 1)\r\n else:\r\n return s[i] + max(dfs(i + 1, j), dfs(i, j + 1))\r\n return dfs(0, 0)\r\nprint(largestMerge(s = \"abcabc\", t = \"abdcaba\"))\r\n\r\n\r\n\"\"\"\r\n\r\n#1760. Minimum Limit of Balls in a Bag\r\n\r\n#2020 NA Regionals Practice Contest 14\r\n\r\n#5668. Longest Nice Substring\r\n\"\"\"\r\ndef longestNiceSubstring(s):\r\n import collections\r\n def cnt(st):\r\n d = collections.defaultdict(int)\r\n for s in st:\r\n if ord(s) <= 90:\r\n if d[s] == 2 or d[s] == 0:\r\n d[s] += 1\r\n else:\r\n s = s.upper()\r\n if d[s] == 1 or d[s] == 0:\r\n d[s] += 2\r\n return all(d[x] == 3 for x in d.keys())\r\n n = len(s)\r\n ans = \"\"\r\n mlen = 0\r\n for i in range(n - 1):\r\n for j in range(i + 2, n + 1):\r\n if cnt(s[i : j]):\r\n if j - i > mlen:\r\n ans = s[i : j]\r\n mlen = j - i\r\n return ans\r\nprint(longestNiceSubstring(s = \"YazaAay\"))\r\n\"\"\"\r\n\r\n#5669. Form Array by Concatenating Subarrays of Another Array\r\n\"\"\"\r\ndef canChoose(groups, nums):\r\n n = len(nums)\r\n idx = 0\r\n for g in groups:\r\n curl = len(g)\r\n find = False\r\n while idx < n:\r\n if nums[idx : idx + curl] == g:\r\n find = True\r\n idx += curl\r\n break\r\n else:\r\n idx += 1\r\n if not find:\r\n return False\r\n return True\r\nprint(canChoose(groups = [[1,-1,-1],[3,-2,0]], nums = [1,-1,0,1,-1,-1,3,-2,0]))\r\n\"\"\"\r\n\r\n#5671. Map of Highest Peak\r\n\r\n#5685. Merge Strings Alternately\r\n\"\"\"\r\ndef mergeAlternately(word1, word2):\r\n m, n = len(word1), len(word2)\r\n ans = \"\"\r\n for i in range(min(m, n)):\r\n ans += word1[i]\r\n ans += word2[i]\r\n if m > n:\r\n ans += word1[n:]\r\n if n > m:\r\n ans += word2[m:]\r\n return ans\r\nprint(mergeAlternately( word1 = \"abcd\", word2 = \"pq\"))\r\n\"\"\"\r\n\r\n#5686. Minimum Number of Operations to Move All Balls to Each Box\r\n\"\"\"\r\ndef minOperations(boxes):\r\n n = len(boxes)\r\n l = [0] * n\r\n r = [0] * n\r\n cur = 0\r\n for idx in range(1, n):\r\n cur += boxes[idx - 1] == \"1\"\r\n l[idx] = cur\r\n cur = 0\r\n for idx in range(n - 2, -1, -1):\r\n cur += boxes[idx + 1] == \"1\"\r\n r[idx] = cur\r\n ans = [0] * n\r\n s = 0\r\n for i, x in enumerate(boxes):\r\n if x == \"1\":\r\n s += i\r\n ans[0] = s\r\n for idx in range(1, n):\r\n s += l[idx] - r[idx - 1]\r\n ans[idx] = s\r\n return ans\r\nprint(minOperations(boxes = \"001011\"))\r\n\"\"\"\r\n\r\n#5687. Maximum Score from Performing Multiplication Operations\r\n\"\"\"\r\ndef maximumScore(nums, multipliers):\r\n\"\"\"\r\n\r\n\r\n#5689. Count Items Matching a Rule\r\n\"\"\"\r\ndef countMatches(items, ruleKey, ruleValue):\r\n ans = 0\r\n if ruleKey == 'type':\r\n for t, c, n in items:\r\n if t == ruleValue:\r\n ans += 1\r\n return ans\r\n elif ruleKey == 'color':\r\n for t, c, n in items:\r\n if c == ruleValue:\r\n ans += 1\r\n return ans\r\n else:\r\n for t, c, n in items:\r\n if n == ruleValue:\r\n ans += 1\r\n return ans\r\nprint(countMatches( items = [[\"phone\",\"blue\",\"pixel\"],[\"computer\",\"silver\",\"phone\"],[\"phone\",\"gold\",\"iphone\"]], ruleKey = \"type\", ruleValue = \"phone\"))\r\n\"\"\"\r\n\r\n#5690. Closest Dessert Cost\r\n\"\"\"\r\ndef closestCost(baseCosts, toppingCosts, target):\r\n s = set([0])\r\n for t in toppingCosts:\r\n cur = set(x + t for x in s)\r\n cur |= set(x + 2 * t for x in s)\r\n s |= cur\r\n ans = 0\r\n diff = float('inf')\r\n print(s)\r\n for b in baseCosts:\r\n for i in s:\r\n if abs(target - i - b) < diff:\r\n ans = i + b\r\n diff = abs(target - i - b)\r\n return ans\r\n\r\nprint(closestCost(baseCosts = [10], toppingCosts = [1], target = 1))\r\n\"\"\"\r\n\r\n#5691. Equal Sum Arrays With Minimum Number of Operations\r\n\"\"\"\r\ndef minOperations(nums1, nums2):\r\n import collections\r\n m, n = len(nums1), len(nums2)\r\n if m > 6 * n or n > 6 * m:\r\n return -1\r\n \r\n ans = 0\r\n c1 = collections.Counter(nums1)\r\n c2 = collections.Counter(nums2)\r\n s1 = sum(nums1)\r\n s2 = sum(nums2)\r\n if s1 > s2:\r\n dif = s1 - s2\r\n d5 = c1[6] + c2[1]\r\n max5 = dif // 5\r\n dif -= 5 * min(max5, d5)\r\n ans += min(max5, d5)\r\n \r\n d4 = c1[5] + c2[2] + d5 - min(max5, d5)\r\n max4 = dif // 4\r\n dif -= 4 * min(max4, d4)\r\n ans += min(max4, d4)\r\n\r\n d3 = c1[4] + c2[3] + d4 - min(max4, d4)\r\n max3 = dif // 3\r\n dif -= 3 * min(max3, d3)\r\n ans += min(max3, d3)\r\n\r\n d2 = c1[3] + c2[4] + d3 - min(max3, d3)\r\n max2 = dif // 2\r\n dif -= 2 * min(max2, d2)\r\n ans += min(max2, d2)\r\n\r\n d3 = c1[2] + c2[5] + d2 - min(max2, d2)\r\n ans += dif\r\n else:\r\n dif = s2 - s1\r\n d5 = c1[1] + c2[6]\r\n max5 = dif // 5\r\n dif -= 5 * min(max5, d5)\r\n ans += min(max5, d5)\r\n \r\n d4 = c1[2] + c2[5] + d5 - min(max5, d5)\r\n max4 = dif // 4\r\n dif -= 4 * min(max4, d4)\r\n ans += min(max4, d4)\r\n\r\n d3 = c1[3] + c2[4] + d4 - min(max4, d4)\r\n max3 = dif // 3\r\n dif -= 3 * min(max3, d3)\r\n ans += min(max3, d3)\r\n\r\n d2 = c1[4] + c2[3] + d3 - min(max3, d3)\r\n max2 = dif // 2\r\n dif -= 2 * min(max2, d2)\r\n ans += min(max2, d2)\r\n\r\n d3 = c1[5] + c2[2] + d2 - min(max2, d2)\r\n ans += dif\r\n return ans\r\nprint(minOperations(nums1 = [6,6], nums2 = [1]))\r\n\"\"\"\r\n\r\n#5692. Car Fleet II\r\n\r\n\r\n#biweekly 47\r\n#5680. Find Nearest Point That Has the Same X or Y Coordinate\r\n\"\"\"\r\ndef nearestValidPoint(x, y, points):\r\n ans = float('inf')\r\n idx = -1\r\n for i, p in enumerate(points):\r\n if p[0] == x or p[1] == y:\r\n if abs(p[0] - x) + abs(p[1] - y) < ans:\r\n idx = i\r\n ans = min(ans, abs(p[0] - x) + abs(p[1] - y))\r\n return idx if ans < float('inf') else -1\r\nprint(nearestValidPoint(x = 3, y = 4, points = [[1,2],[3,1],[2,4],[2,3],[4,4]]))\"\"\"\r\n\r\n#5681. Check if Number is a Sum of Powers of Three\r\n\"\"\"\r\ndef checkPowersOfThree(n):\r\n while n >= 3:\r\n mod, res = n // 3, n % 3\r\n if res > 1:\r\n return False\r\n n = mod\r\n return n < 2\r\nprint(checkPowersOfThree(91))\r\n\"\"\"\r\n\r\n#5682. Sum of Beauty of All Substrings\r\n\"\"\"\r\ndef beautySum(s):\r\n import collections\r\n n = len(s)\r\n if n == 1:\r\n return 0\r\n ans = 0\r\n for i in range(n - 1):\r\n d = collections.defaultdict(int)\r\n d[s[i]] += 1\r\n for j in range(i + 1, n):\r\n d[s[j]] += 1\r\n ans += (max(d.values()) - min(d.values()))\r\n return ans\r\nprint(beautySum(s = \"aabcbaa\"))\r\n\"\"\"\r\n\r\n#5683. Count Pairs Of Nodes\r\n\r\n#Weekly Contest 231\r\n#5697. Check if Binary String Has at Most One Segment of Ones\r\n\"\"\"\r\ndef checkOnesSegment(s):\r\n import itertools\r\n tmp = [[k, len(list(g))] for k, g in itertools.groupby(s)]\r\n return len([x for x in tmp if x[0] == '1']) <= 1\r\nprint(checkOnesSegment('1101'))\r\n\"\"\"\r\n\r\n#5698. Minimum Elements to Add to Form a Given Sum\r\n\"\"\"\r\ndef minElements(nums, limit, goal):\r\n import math\r\n diff = goal - sum(nums)\r\n return math.ceil(abs(diff) / limit)\r\nprint(minElements(nums = [1,-1,1], limit = 3, goal = -4))\r\n\"\"\"\r\n\r\n#5700. Make the XOR of All Segments Equal to Zero\r\n\"\"\"\r\ndef minChanges(nums, k):\r\n import collections\r\n d = collections.defaultdict(dict)\r\n n = len(nums)\r\n if k == 1:\r\n return n - nums.count(0)\r\n if k == n:\r\n res = 0\r\n for i in nums:\r\n res ^= i\r\n return 0 if res == 0 else 1\r\n for i, x in enumerate(nums):\r\n if x in d[i % k]:\r\n d[i % k][x] += 1\r\n else:\r\n d[i % k][x] = 1\r\n \r\n return \r\nprint(minChanges(nums = [1,2,0,3,0], k = 1)) \r\n\"\"\"\r\n\r\n#5693. Second Largest Digit in a String\r\n\"\"\"\r\ndef secondHighest(s):\r\n import collections\r\n c = collections.defaultdict(int)\r\n for ch in s:\r\n if ch.isdigit():\r\n c[int(ch)] += 1\r\n if len(c) < 2:\r\n return -1\r\n else:\r\n return sorted(list(c.keys()))[1]\r\nprint(secondHighest( s = \"dfa12321afd\"))\r\n\"\"\"\r\n#5712. Maximum Number of Consecutive Values You Can Make\r\n\"\"\"\r\nSuppose we have a coin of value c[i], we can use this coin to extend current consecutive values, only if we have already built consecutive values no less than c[i] - 1. \r\n\r\nSo just sort coins by value, for each coin[i], check if c[i] satisfies c[i] <= cur_max + 1. If so, increase the maximum value from cur_max to cur_max + coin[i], otherwise, return c[i] + 1 (No need to try any following coins because coin with larger value can not cover cur_max + 1 neither). \r\n\r\nFor example:\r\nvalues = [0,1,2,3,4,5,6]\r\nnew_c = 7\r\nTherefore we can build [7, 8, ... , 13] with new value = 7, now the consecutive value list is [0, 1, ... , 13]\r\n\r\nHowever, if the new value is 2 more larger than the maximum number, we can not find a way to fill the value = max_values + 1\r\n\r\nFor example:\r\nvalues = [0,1,2,3,4,5,6]\r\nnew_c = 8\r\nIn this case, max_value = 6, we cant fit the value 7 with current coins, meaning the maximum value we can reach is max_value, ans = max_value + 1 (including 0)\r\n\r\n```\r\ndef getMaximumConsecutive(self, coins: List[int]) -> int:\r\n coins.sort()\r\n cur = 0\r\n for idx in range(len(coins)):\r\n if coins[idx] - 1 > cur:\r\n break\r\n cur += coins[idx]\r\n return cur + 1\r\nprint(getMaximumConsecutive([1,23,4,1]))\r\n```\r\n\"\"\"\r\n\r\n\r\n#1800. Maximum Ascending Subarray Sum\r\n\"\"\"\r\ndef maxAscendingSum(nums):\r\n n = len(nums)\r\n ans = nums[0]\r\n cur = nums[0]\r\n for idx in range(1, n):\r\n if nums[idx] <= nums[idx - 1]:\r\n cur = 0\r\n cur += nums[idx]\r\n ans = max(ans, cur)\r\n return ans\r\nprint(maxAscendingSum(nums = [12,17,15,13,10,11,12]))\r\n\"\"\"\r\n\r\n\r\n#1802. Maximum Value at a Given Index in a Bounded Array\r\n\"\"\"\r\ndef maxValue(n, index, maxSum):\r\n def getmin(n, idx, L):\r\n if n > idx + 1:\r\n l = (2 * n - idx) * (idx + 1) // 2\r\n else:\r\n l = (n + 1) * n // 2 + (idx + 1 - n)\r\n\r\n if n > L - idx:\r\n r = (2 * n - L + idx + 1) * (L - idx) // 2\r\n else:\r\n r = (n + 1) * n // 2 + (L - idx - n)\r\n return r + l - n\r\n l = 1\r\n r = maxSum \r\n while l < r:\r\n m = r - (r - l) // 2\r\n if getmin(m, index, n) <= maxSum:\r\n l = m\r\n else:\r\n r = m - 1\r\n return l\r\nprint(maxValue(n = 6, index = 1, maxSum = 10))\r\n\"\"\"\r\n\r\n#5705. Determine Color of a Chessboard Square\r\n\r\n#5706. Sentence Similarity III\r\n\"\"\"\r\ndef areSentencesSimilar(s1, s2):\r\n s1 = s1.split()\r\n s2 = s2.split()\r\n if len(s1) == len(s2):\r\n return s1 == s2\r\n\r\n if len(s1) > len(s2):\r\n s1, s2 = s2, s1\r\n m, n = len(s1), len(s2)\r\n if s2[:m] == s1 or s2[-m:] == s1:\r\n return True\r\n i = 0\r\n while s1[i] == s2[i]:\r\n i += 1\r\n j = 1\r\n while s1[-j] == s2[-j]:\r\n j += 1\r\n return i + j - 1 == m\r\nprint(areSentencesSimilar(\"My name is Haley\", \"My Haley\")) \r\n\"\"\"\r\nimport collections\r\n#5708. Count Nice Pairs in an Array\r\n\"\"\"\r\ndef countNicePairs(nums):\r\n d = collections.defaultdict(int)\r\n n = len(nums)\r\n if n == 1:\r\n return 0\r\n for num in nums:\r\n diff = num - int(str(num)[::-1])\r\n d[diff] += 1\r\n print(d)\r\n return sum( x * (x - 1) // 2 for x in d.values())\r\nprint(countNicePairs(nums = [42,11,1,97])) % (10 ** 9 + 7)\r\n\"\"\"\r\n\r\n#5707. Maximum Number of Groups Getting Fresh Donuts\r\ndef maxHappyGroups(batchSize, groups):\r\n res = [group % batchSize for group in groups]\r\n print(res)\r\n\r\n return\r\nprint(maxHappyGroups(batchSize = 4, groups = [1,3,2,5,2,2,1,6]))","sub_path":"Leetcode/contest_leetcode.py","file_name":"contest_leetcode.py","file_ext":"py","file_size_in_byte":40623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"472317919","text":"#-------------------------------------------------------------------------------\n# configuration_reader.py\n#\n# Copyright (C) 2013, Shinya Takamaeda-Yamazaki\n# License: Apache 2.0\n#-------------------------------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nimport os\nimport sys\nimport re\n\nfrom flipsyrup.configuration_reader.resource_definition import OnchipMemoryDefinition\nfrom flipsyrup.configuration_reader.resource_definition import OffchipMemoryDefinition\n\n#-------------------------------------------------------------------------------\n# Reading Resource Definitions from an input file\n#-------------------------------------------------------------------------------\n\ndef to_int(v):\n m = re.match('(\\d+)(K|k)', v)\n if m: return int(m.group(1)) * 1024\n m = re.match('(\\d+)(M)', v)\n if m: return int(m.group(1)) * 1024 * 1024\n m = re.match('(\\d+)(G)', v)\n if m: return int(m.group(1)) * 1024 * 1024 * 1024\n return int(v)\n\ndef readResourceDefinitions(config):\n if 'BRAM' not in config:\n raise ValueError(\"BRAM parameters are not defined.\")\n if 'DRAM' not in config:\n raise ValueError(\"DRAM parameters are not defined.\")\n\n config['BRAM']['size']\n bram_size = 32 * 1024 if 'size' not in config['BRAM'] else to_int(config['BRAM']['size'])\n bram = OnchipMemoryDefinition('BRAM', bram_size)\n\n config['DRAM']['size']\n dram_size = 128 * 1024 * 1024 if 'size' not in config['DRAM'] else to_int(config['DRAM']['size'])\n dram_width = 128 if 'width' not in config['DRAM'] else int(config['DRAM']['width'])\n dram_addrlen = 32 if 'addrlen' not in config['DRAM'] else int(config['DRAM']['addrlen'])\n dram = OffchipMemoryDefinition('DRAM', dram_size, dram_width, dram_addrlen)\n\n resourcelist = [bram, dram]\n return resourcelist\n","sub_path":"flipsyrup/configuration_reader/configuration_reader.py","file_name":"configuration_reader.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"245069033","text":"#my alphabet\nmyalphabet = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\ndef getIndex(c):\n\tc = c.upper()\n\treturn myalphabet.index(c)\ndef charNum(i):\n\ti = i % len(myalphabet)\n\treturn myalphabet[i]\ndef prepareText(text):\n\tprocesses = \"\"\n\t#removes punctuation from plaintext\n\tfor s in text:\n\t\tif s!= '.' and s!= ',' and s!= ' ' and s!= '!' and s!= '?' and s!= '\\'':\n\t\t\tprocesses = processes + s.upper()\n\t#converts to uppercase\n\t#processes = processes.upper() \n\treturn processes\ndef affine_encode(plaintext, a, b):\n\tprocess = \"\"\n\tcipherFinal = \"\"\n\tmodulusValue = len(myalphabet)\n\t#removes punctuation from plaintext\n\tfor s in plaintext:\n\t\tif s!= '.' and s!= ',' and s!= ' ' and s!= '!' and s!= '?' and s!= '\\'':\n\t\t\tprocess+=s\n\t#converts to uppercase\n\tprocess = process.upper() \n\t# converts each character using\ty=ax+b(mod 26)\n\tfor letter in process:\n\t\tind = myalphabet.index(letter)\n\t\tstep1 = ind * a\n\t\tstep2 = step1 + b\n\t\tstep3 = step2 % modulusValue\n\t\tchar = myalphabet[step3]\n\t\tcipherFinal+= char\n\t# returns the ciphertext string\n\treturn cipherFinal\ndef affine_decode(ciphertext, c, d):\n\tstringproc = \"\"\n\tplainFinal = \"\"\n\tmodulusVal = len(myalphabet)\n\t#return plainFinal\n\t# strip\tpunctuation\tfrom ciphertext###\n\t#convert to\tuppercase###\n\tfor s in ciphertext:\n\t\tif s!= '.' and s!= ',' and s!= ' ' and s!= '!' and s!= '?' and s!= '\\'':\n\t\t\tstringproc+=s\n\tstringproc = stringproc.upper()\n\t# converts each character using\tx=cy+d (mod\t26)\n\tfor letters in stringproc:\n\t\tindex = myalphabet.index(letters)\n\t\tstepone = index * c\n\t\tsteptwo = stepone + d\n\t\tstepthr = steptwo % modulusVal\n\t\tchars = myalphabet[stepthr]\n\t\tplainFinal += chars\n\t# note the (c,d) pair are the inverse coefficients of\n\t#the(a,b) pair used to encode\n\t# returns the plaintext\tstring\n\treturn plainFinal\ndef affine_crack(c1, p1, c2, p2):\n\treturn c2\n\t#o c1,p1,c2,p2\tare\tcharacters\t\n\t# c1\tis\tthe\tencoded\tchar\tof\tp1\n\t# c2\tis\tthe\tencoded\tchar\tof\tp2\n\t# returns\ta\tpair\t(c,d)\tto\tuse\tin\taffine_decode\n\t# solves\ta\tlinear\tsystem\n\t# result:\tp1\t=\tc\t*\tc1\t+\td2\tand\tp2\t=\tc\t*\tc2\t+\td2\t(mod\t26)\n\t# returns\tthe\tpair\t(c,\td)\tor\tNone\tif\tno\tsolution\tcan\tbe\tfound\ndef mod_inverse(a,m):\n\tx = 1\n\tfor i in range(0,m-1):\n\t\tif (a*i) % m == 1:\n\t\t\tx = i\n\t\t\tbreak\n\treturn x \ndef affine_encode_digraph(plaintext, a, b):\n\tmodulusValue = len(myalphabet)\n\tplaint = prepareText(plaintext)\n\tciphertext = \"\"\n\tif len(plaint)%2 == 1:\n\t\tplaint += \"X\"\n\tfor i in range(0, len(plaint)//2):\n\t\tciphertext += intToDigraph(((digraphToInt(plaint[2*i] + plaint[2*i+1])*a)+b)%(26**2))\n\treturn ciphertext #####\ndef affine_decode_digraph(ciphertext, c, d):\n\tciphert = prepareText(ciphertext)\n\tif len(ciphert) % 2 == 1:\n\t\tciphert += \"X\"\n\tplaintext = \"\"\n\tfor i in range (0,len(ciphert)//2):\n\t\tplaintext += intToDigraph(((digraphToInt(ciphert[2*i]+ciphert[2*i+1])*c)+d)%(26**2))\n\treturn plaintext\ndef digraphToInt(s):\n one = myalphabet.index(s[0])*26\n two = myalphabet.index(s[1])\n return one + two ####\ndef intToDigraph(i):\n thingone = charNum((i-i%26)//26)\n thingtwo = charNum(i)\n return thingone + thingtwo ####\n #return a digraph computer from the integer i\n\n#problem set 2\n#last few questions\n#messageToCode1 = \"HELLO\"\nmessageToCode = \"I can use modular equations to encode messages\"\na = 5\nb = 10\nciphered = affine_encode_digraph(messageToCode,a,b)\nprint(ciphered)\nprint(mod_inverse(5,676))\nprint(5410 % 676)\n\nc = 541\nd = -2\ndecrypted = affine_decode_digraph(ciphered,c,d)\nprint(decrypted)\n\nprint()\nprint()\nmyMessage = \"baby\"\nmyA = 117\nmyB = -35\nprint(affine_encode_digraph(myMessage,myA,myB))\nprint()\ntheMessage = \"MQQO\"\ntheA = 31 # mod 5...\ntheB = 29 # mod 3...\n\ntheC = 21\ntheD = -11\nprint(affine_decode_digraph(theMessage,theC,theD))\n#encodeMe = \"On November 15th I will be 17\"\n#encodeMe = \"my name is Swetha\"\n#cipher = affine_encode(encodeMe,a,b)\n#print(cipher)\n\n#print(mod_inverse(5,36)) #29\n#print(290 % 36) #2\n\n#c = 29 #inverses\n#d = -2\n#decoded = affine_decode(cipher,c,d)\n#print(decoded)\n\n#the table\n#for i in range(26,40):\n#\tprint(i*2, '|', ((i*2)+1), '|', ((i*2)+1)*(i*2))\n","sub_path":"Cryptography/TESTPREPSTUFF/affineDigraphs.py","file_name":"affineDigraphs.py","file_ext":"py","file_size_in_byte":3996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"265763255","text":"from collections import deque\r\n\r\ndx = [0,0,1,-1]\r\ndy = [1,-1,0,0]\r\n\r\nn,m = map(int,input().split())\r\n\r\na = [ list(map(int,input().split())) for _ in range(n) ]\r\n\r\npos = []\r\n\r\nfor i in range(n):\r\n for j in range(n):\r\n if a[i][j] == 2:\r\n pos.append((i,j))\r\n\r\nans = -1\r\ndef bfs():\r\n d = [[-1]*n for _ in range(n) ]\r\n q = deque()\r\n for i in range(n):\r\n for j in range(n):\r\n if a[i][j] == 3:\r\n q.append((i,j))\r\n d[i][j] = 0\r\n\r\n while q:\r\n x,y = q.popleft()\r\n for k in range(4):\r\n nx,ny = x+dx[k], y+dy[k]\r\n if 0 <= nx < n and 0 <= ny < n:\r\n if 0 <= nx < n and 0 <= ny < n:\r\n if a[nx][ny] != 1 and d[nx][ny] == -1:\r\n d[nx][ny] = d[x][y] + 1\r\n q.append((nx,ny))\r\n cur = 0\r\n for i in range(n):\r\n for j in range(n):\r\n if a[i][j] == 0:\r\n if d[i][j] == -1:\r\n return\r\n if cur < d[i][j]:\r\n cur = d[i][j]\r\n\r\n global ans\r\n if ans == -1 or ans> cur:\r\n ans = cur\r\n \r\ndef go(index,cnt):\r\n if index == len(pos):\r\n if cnt == m:\r\n bfs()\r\n else:\r\n x,y = pos[index]\r\n a[x][y] = 3\r\n go(index+1,cnt+1)\r\n a[x][y] = 2\r\n go(index+1,cnt)\r\n\r\ngo(0,0)\r\nprint(ans)\r\n","sub_path":"python/연구소3.py","file_name":"연구소3.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"346915540","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nfrom keras import backend as K\nfrom keras.datasets import mnist\nfrom keras.layers import Lambda, Input, Dense\nfrom keras.losses import binary_crossentropy\nfrom keras.models import Model\n\n\n# reparameterization trick\n# instead of sampling from Q(z|X), sample epsilon = N(0,I)\n# z = z_mean + sqrt(var) * epsilon\ndef sampling(args):\n \"\"\"Reparameterization trick by sampling from an isotropic unit Gaussian.\n\n # Arguments\n args (tensor): mean and log of variance of Q(z|X)\n\n # Returns\n z (tensor): sampled latent vector\n \"\"\"\n\n z_mean, z_log_var = args\n batch = K.shape(z_mean)[0]\n dim = K.int_shape(z_mean)[1]\n # by default, random_normal has mean = 0 and std = 1.0\n epsilon = K.random_normal(shape=(batch, dim))\n return z_mean + K.exp(0.5 * z_log_var) * epsilon\n\n\ndef supervised_train(x_train, y_train, latent_dim):\n model = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(input_shape=(latent_dim,)),\n tf.keras.layers.Dense(512, activation=tf.nn.relu),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)\n ])\n model.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n model.fit(x_train, y_train, epochs=10)\n model.save_weights('model/encoder_FFN/model.h5')\n\n\ndef supervised_test(x_test, y_test, latent_dim):\n model = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(input_shape=(latent_dim,)),\n tf.keras.layers.Dense(512, activation=tf.nn.relu),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)\n ])\n\n model.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n model.load_weights('model/encoder_FFN/model.h5')\n scores = model.evaluate(x_test, y_test)\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1] * 100))\n\n\n# MNIST dataset\n(x_train, y_train), (_, _) = mnist.load_data()\npart_2 = x_train[42000:54000]\nlabel_2 = y_train[42000:54000]\npart_3 = x_train[54000:60000]\nlabel_3 = y_train[54000:60000]\nx_train = part_2\ny_train = label_2\nx_test = part_3\ny_test = label_3\n\nimage_size = x_train.shape[1]\noriginal_dim = image_size * image_size\nx_train = np.reshape(x_train, [-1, original_dim])\nx_test = np.reshape(x_test, [-1, original_dim])\nx_train = x_train.astype('float32') / 255\nx_test = x_test.astype('float32') / 255\n\n# network parameters\ninput_shape = (original_dim,)\nintermediate_dim = 512\nbatch_size = 128\nlatent_dim = 20\nepochs = 50\n\n# VAE model = encoder + decoder\n# build encoder model\ninputs = Input(shape=input_shape, name='encoder_input')\nx = Dense(intermediate_dim, activation='relu')(inputs)\nz_mean = Dense(latent_dim, name='z_mean')(x)\nz_log_var = Dense(latent_dim, name='z_log_var')(x)\n\n# use reparameterization trick to push the sampling out as input\n# note that \"output_shape\" isn't necessary with the TensorFlow backend\nz = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])\n\n# instantiate encoder model\nencoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')\n\n# build decoder model\nlatent_inputs = Input(shape=(latent_dim,), name='z_sampling')\nx = Dense(intermediate_dim, activation='relu')(latent_inputs)\noutputs = Dense(original_dim, activation='sigmoid')(x)\n\n# instantiate decoder model\ndecoder = Model(latent_inputs, outputs, name='decoder')\n\n# instantiate VAE model\noutputs = decoder(encoder(inputs)[2])\nvae = Model(inputs, outputs, name='vae_mlp')\n\nmodels = (encoder, decoder)\n\n# VAE loss\nreconstruction_loss = binary_crossentropy(inputs,\n outputs)\n\nreconstruction_loss *= original_dim\nkl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)\nkl_loss = K.sum(kl_loss, axis=-1)\nkl_loss *= -0.5\nvae_loss = K.mean(reconstruction_loss + kl_loss)\nvae.add_loss(vae_loss)\nvae.compile(optimizer='adam')\nvae.load_weights('model/vae/vae_mlp_mnist.h5')\n\n# z_mean, _, _ = encoder.predict(x_train,\n# batch_size=batch_size)\n# supervised_train(z_mean, y_train, latent_dim)\n\nz_mean, _, _ = encoder.predict(x_test,\n batch_size=batch_size)\nsupervised_test(z_mean, y_test, latent_dim)\n","sub_path":"HW4/encoder_FFN.py","file_name":"encoder_FFN.py","file_ext":"py","file_size_in_byte":4439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"493692613","text":"#!/usr/bin/env python\nimport os\nimport sys\nimport re\nimport csv\nimport argparse\nimport numpy as np\nfrom collections import namedtuple\n\nDEBUG = 1\n\nOneTestPoint = namedtuple('TestPoint', ['qp', 'fps', 'bit_rate',\n 'psnr_y', 'psnr_u', 'psnr_v'])\n\n\ndef BDRate(PSNR1, BR1, PSNR2, BR2):\n lBR1 = np.log(BR1)\n p1 = np.polyfit( PSNR1, lBR1, 3)\n\n lBR2 = np.log(BR2)\n p2 = np.polyfit( PSNR2, lBR2, 3)\n\n min_int = max(min(PSNR1), min(PSNR2))\n max_int = min(max(PSNR1), max(PSNR2))\n\n # find integral\n p_int1 = np.polyint(p1)\n p_int2 = np.polyint(p2)\n\n int1 = np.polyval(p_int1, max_int) - np.polyval(p_int1, min_int)\n int2 = np.polyval(p_int2, max_int) - np.polyval(p_int2, min_int)\n\n # find avg diff\n avg_exp_diff = (int2-int1)/(max_int-min_int)\n avg_diff = (np.exp(avg_exp_diff)-1)*100\n\n return avg_diff\n\n\ndef BDPSNR(PSNR1, BR1, PSNR2, BR2):\n lBR1 = np.log10(BR1)\n p1 = np.polyfit( lBR1, PSNR1, 3)\n\n lBR2 = np.log10(BR2)\n p2 = np.polyfit( lBR2, PSNR2, 3)\n\n min_int = max(min(lBR1), min(lBR2))\n max_int = min(max(lBR1), max(lBR2))\n\n # find integral\n p_int1 = np.polyint(p1)\n p_int2 = np.polyint(p2)\n\n int1 = np.polyval(p_int1, max_int) - np.polyval(p_int1, min_int)\n int2 = np.polyval(p_int2, max_int) - np.polyval(p_int2, min_int)\n\n # find avg diff\n avg_diff = (int2-int1)/(max_int-min_int)\n\n return avg_diff\n\n\ndef read_results_from_csv(output_name):\n csv_name = output_name+'.csv'\n if DEBUG:\n dir = os.getcwd()\n sys.stdout.write(\"Reading %s\\n\" %(dir + os.sep + csv_name) )\n csv_file = open(csv_name, 'r')\n reader = csv.reader(csv_file, dialect='excel')\n current_dict = {}\n name_idx_dict = {'name': 0, 'qp': 1, 'fps': 2, 'bitrate': 3,\n 'psnry': 4, 'psnru': 5, 'psnrv': 6}\n\n for idx, item in enumerate(reader[0]):\n if item in name_idx_dict:\n name_idx_dict[item] = idx\n\n for row in reader[1:]:\n name = row[name_idx_dict['name']]\n if not current_dict.has_key(name):\n current_dict[name] = {}\n qp = row[name_idx_dict['qp']]\n if not current_dict[name].has_key(qp):\n current_dict[name][qp] = {}\n current_dict[name][qp] = OneTestPoint(qp,\n row[name_idx_dict['fps']],\n row[name_idx_dict['bitrate']],\n row[name_idx_dict['psnry']],\n row[name_idx_dict['psnru']],\n row[name_idx_dict['psnrv']])\n csv_file.close()\n return current_dict\n\n\ndef write_testpoint_to_csv(exe_path, result_path, TestPoint_dict):\n exe_name = ((exe_path.split(os.sep))[-1])\n result_name = '%s' %(result_path) + os.sep +'Result_%s.csv' %exe_name\n result_file = open(result_name, 'a+')\n result_file.write('name,points,qp,fps,bitrate,psnry,psnru,psnrv\\n')\n for yuv_item in TestPoint_dict:\n cur_yuv = sorted(TestPoint_dict[yuv_item])\n for qp in cur_yuv:\n test_point = TestPoint_dict[yuv_item][qp]\n result_file.write('%s,%d,%d,%f,%f,%f,%f,%f\\n'\n %(yuv_item, qp,\n test_point.qp, test_point.fps, test_point.bit_rate,\n test_point.psnr_y, test_point.psnr_u, test_point.psnr_v))\n result_file.close()\n\n\ndef calculate_from_two_dicts(result_file, dict1, dict2):\n #here the calculation refers to https://github.com/serge-m/bjontegaard2/blob/master/bjontegaard2.m\n for name in dict1:\n if dict2.has_key(name):\n # found the the matched yuv\n PSNR1 = []\n PSNR2 = []\n UPSNR1 = []\n UPSNR2 = []\n BR1 = []\n BR2 = []\n\n FPS1 = []\n FPS2 = []\n for qp in dict1[name]:\n if dict2[name].has_key(qp):\n # found the match qp\n PSNR1.append(dict1[name][qp].psnr_y)\n PSNR2.append(dict2[name][qp].psnr_y)\n\n UPSNR1.append(dict1[name][qp].psnr_u)\n UPSNR2.append(dict2[name][qp].psnr_u)\n\n BR1.append(dict1[name][qp].bit_rate)\n BR2.append(dict2[name][qp].bit_rate)\n\n FPS1.append(dict1[name][qp].fps)\n FPS2.append(dict2[name][qp].fps)\n\n if len(PSNR1) == 4:\n # have enough points to calculate\n PSNR1 = sorted(PSNR1)\n PSNR2 = sorted(PSNR2)\n UPSNR1 = sorted(UPSNR1)\n UPSNR2 = sorted(UPSNR2)\n\n BR1 = sorted(BR1)\n BR2 = sorted(BR2)\n\n avg_U_bd_diff = BDRate(UPSNR1, BR1, UPSNR2, BR2)\n avg_bd_diff = BDRate(PSNR1, BR1, PSNR2, BR2)\n avg_PSNR_diff = BDPSNR(PSNR1, BR1, PSNR2, BR2)\n avg_fps_diff = 0\n for i in range(len(FPS1)):\n avg_fps_diff+= (FPS2[i]-FPS1[i])*100/FPS1[i]\n avg_fps_diff = avg_fps_diff/len(FPS1)\n result_file.write(str([name, avg_fps_diff, avg_bd_diff, avg_U_bd_diff])+'\\n')\n\n\n\nif __name__ == '__main__':\n argParser = argparse.ArgumentParser()\n argParser.add_argument(\"-log1\", nargs='?', default=None, help=\"log1.csv\")\n argParser.add_argument(\"-log2\", nargs='?', default=None, help=\"log2.csv\")\n args = argParser.parse_args()\n\n # filename: Result_Cisco_Absolute_Power_1280x720_30_1121_EnblFsOpen.csv\n if os.path.isfile(args.log1) and os.path.isfile(args.log2):\n match_re = re.compile(r'Result_(.*).csv')\n name1 = name2 = None\n r = match_re.search(args.log1)\n if r is not None:\n name1 = (r.groups()[0])\n r = match_re.search(args.log2)\n if r is not None:\n name2 = (r.groups()[0])\n\n if name1 is not None and name2 is not None:\n result_file = open(\"Result_%s_%s.csv\" %(name1, name2))\n dict1 = read_results_from_csv(name1)\n dict2 = read_results_from_csv(name2)\n calculate_from_two_dicts(result_file, dict1, dict2)\n result_file.close()\n\n\n\n\n\n\n\n\n\n","sub_path":"BDRate.py","file_name":"BDRate.py","file_ext":"py","file_size_in_byte":6280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"349494333","text":"class Ex():\r\n def __init__(self, id=None, name=None, time=None, end_time=None, location=None, location_name=None, price=None, description=None, web_sales=None):\r\n self.id = id\r\n self.name = name\r\n self.time = time\r\n self.end_time = end_time\r\n self.location = location\r\n self.location_name = location_name\r\n self.price = price\r\n self.description = description\r\n self.web_sales = web_sales\r\n\r\nclass ExResponse():\r\n def __init__(self, status=None, result=None):\r\n self.status = status\r\n self.result = result\r\n","sub_path":"Cloud Functions/Exhibition Retrieve/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"329924559","text":"# importing the multiprocessing module\nimport multiprocessing\nimport os\nfrom multiprocessing import Pool, TimeoutError\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.metrics.pairwise import pairwise_distances\nfrom sklearn.metrics.pairwise import manhattan_distances\nfrom sklearn.metrics.pairwise import euclidean_distances\nfrom numpy.linalg import *\nfrom numpy import *\nfrom math import sqrt\nimport networkx as nx\nimport math\nimport csv\nimport time\nimport random\nfrom scipy import sparse\nimport numpy as np\nfrom scipy.sparse import csc_matrix\nfrom numpy import linalg as LA\nimport sys\nsys.path.insert(0, '../SweepingMethod')\nimport EigenPair as EP\nimport Timing as TG\n\n\n\n\ndef worker1(counter):\n # printing process id\n for i in range (10000):\n print(\"worker: {}\" +str(counter)+ \": \"+ str(i))\n print(\"\\n\")\n return str(counter)\n\n\ndef worker2(args):\n obj = EP.EigenPair()\n vals, vecs= obj.distinct_eigen_pairs_multi(args[0], args[1], args[2], args[3], False)\n\n #print(str((args[1], args[2])))\n #print (\"\\n\")\n #for item in vals:\n # print(str(item))\n # print (\"\\n\")\n return vals, vecs\n\ndef back(counter):\n # printing process id\n for i in range(100000000000):\n print(\"worker2: {}\" + str(i))\n print(\"\\n\")\n\n\ndef worker(A, maximum, minimum, counter):\n obj = EP.EigenPair()\n return obj.distinct_eigen_pairs_multi(A, maximum, minimum, 0.001 , False)\n\ndef experiment1(datadir, graphNames, k=-1):\n eps = [0.001]\n for graphname in graphNames:\n print (\"****************graphname: \" + graphname)\n for ep in eps:\n print (\"****************ep :\" + str(ep))\n obj = EP.EigenPair()\n G = nx.read_edgelist(datadir + \"dataset/\" + graphname + \".txt\")\n obj.update_parameters(G)\n obj.eigen_pairs(ep,k)\n obj.distinct_eigen_pairs(obj.A, obj.maximum, obj.minimum, ep, False )\n if k ==-1:\n obj.log2(datadir, graphname, ep)\n else:\n obj.log3(datadir, graphname, ep)\n\n\n\n print (\"finish\")\n\n\ndef test():\n cpu = multiprocessing.cpu_count()\n args = [1, 2,3, 4]\n\n #while Pool(cpu) as p:\n\n p = Pool(cpu)\n results = p.map(worker1, args)\n\n print (str(results))\n print (\"\\n\")\n\n\ndef merge(results,ep):\n vals = []\n vecs = []\n for item in results:\n valtemps =item[0]\n vectemps = item[1]\n if len(vals)>0 and len(valtemps)>0:\n if abs(valtemps[0]-vals[-1])\n\n\"\"\"Service descriptor.\"\"\"\n\nfrom ggrc.models.reflection import AttributeInfo\nfrom cached_property import cached_property\n\nfrom docbuilder.descriptors.base import Descriptor\nfrom docbuilder.descriptors.model import Model\n\nMOCK_DATA = {\n \"id\": \"1\",\n \"context\": \"\"\"{{ \"id\": 1, \"type\": \"Context\" }}\"\"\",\n \"default_to_current_user\": 'false',\n \"delete\": 'false',\n \"mandatory\": 'true',\n \"read\": 'true',\n \"my_work\": 'true',\n \"name\": '\"Name\"',\n 'object_type': '\"Object Type\"',\n \"tooltip\": '\"Tooltip information\"',\n \"update\": 'false',\n \"type\": '\"{object_type}\"',\n \"created_at\": '\"2015-08-14T14:24:43\"',\n \"updated_at\": '\"2015-08-14T14:24:43\"',\n \"modified_by\": \"\"\"{{ \"id\": 1, \"type\": \"Person\"}}\"\"\",\n \"non_editable\": \"false\",\n \"access_control_list\": \"\"\"[{{}}]\"\"\",\n \"custom_attribute_values\": \"\"\"[{{}}]\"\"\",\n \"custom_attributes\": \"\"\"[{{}}]\"\"\",\n \"custom_attribute_definitions\": \"\"\"[{{}}]\"\"\",\n \"description\": '\"Object description\"',\n \"notes\": \"'Object Notes'\",\n \"object_people\": \"{{}}\",\n \"related_destinations\": \"[]\",\n \"related_sources\": \"[]\",\n \"slug\": '\"OBJECT-1\"',\n \"start_date\": '\"2015-08-14\"',\n \"status\": '\"Active\"',\n \"task_group_objects\": \"[]\",\n \"title\": '\"Object title\"',\n \"preconditions_failed\": 'false',\n 'archived': 'false',\n 'assessment_type': 'Control',\n 'assignees': '[]',\n 'audit': '{{ \"id\": 1, \"type\": \"Audit\"}}',\n \"design\": '\"Operationally\"',\n \"end_date\": '\"2015-08-14\"',\n \"finished_date\": '\"2015-08-14\"',\n 'send_by_default': \"false\",\n 'verified': 'false',\n 'verified_date': 'null',\n 'audit_firm': '{{ \"id\": 1, \"type\": \"OrgGroup\"}}',\n 'program': '{{ \"id\": 1, \"type\": \"Program\"}}',\n 'snapshotted_objects': '[]',\n}\n\n\nclass Service(Descriptor):\n \"\"\"Service descriptor.\"\"\"\n\n @classmethod\n def collect(cls):\n \"\"\"Collects all application services.\"\"\"\n from ggrc.services import all_services\n return all_services()\n\n @cached_property\n def name(self):\n \"\"\"Service name.\"\"\"\n return '%s -> %s' % (self.model.name, self.obj.name)\n\n @cached_property\n def table_singular(self):\n \"\"\"Object's table_singualr forme.\"\"\"\n return self.obj.model_class._inflector.table_singular\n\n @cached_property\n def json_value(self):\n \"\"\"Json value\"\"\"\n def func(name):\n return MOCK_DATA.get(name, '\"\"').format(\n object_type=self.obj.model_class.__name__)\n return func\n\n @cached_property\n def url(self):\n \"\"\"Endpoint URL.\"\"\"\n return '/api/%s' % self.obj.name\n\n @cached_property\n def attributes(self):\n \"\"\"Endpoint attributes\"\"\"\n return AttributeInfo.gather_attr_dicts(self.obj.model_class, '_api_attrs')\n\n @cached_property\n def doc(self):\n \"\"\"Doc-stirng of wrapped model class.\"\"\"\n return self.model.doc\n\n @cached_property\n def model(self):\n \"\"\"Descriptor of wrapped model class.\"\"\"\n return Model(self.obj.model_class)\n\n @cached_property\n def readonly(self):\n \"\"\"Is service read-only?\"\"\"\n return self.obj.service_class.__name__.startswith('ReadOnly')\n","sub_path":"src/docbuilder/descriptors/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":3164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"208842906","text":"###### Assignment 1 parity check and framing .py file ###########\n\n####### write your code here ##########\n\ndef parity(x): # function for parity bit\n\tz=x.count('1')\n\tif z%2==1:\n\t\treturn 0\n\telse:\n\t\treturn 1\n\nx= input()\nx=str(x)\nz=parity(x)\nz=str(z)\nframe=(x+z)\nprint(frame) #frame after parity bit addition\n\nm=frame.replace('010','0100') #bit stuffing\nu=m[-2:] #for accessing last 2 character\nd='01'\nd=str(d)\nif u == d:\n\tm=m+'0'\n\nfinal= m+'0101' #modified string received at other end \nprint(final)\n\n\t\n\n\n\n\n","sub_path":"ps1.py","file_name":"ps1.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"282768463","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Challenges of 10 Days of Statistics from HackerRank.\n\nThis is a basic tutorial challenge specified by `HackerRank`_.\n\n.. _HackerRank:\n https://www.hackerrank.com/challenges/s10-weighted-mean\n\"\"\"\n\ndef main():\n \"\"\"The main routine.\"\"\"\n n = int(input())\n x = list(map(int, input().strip().split()))\n w = list(map(int, input().strip().split()))\n\n sum(x[i]*w[i] for i in range(n))\n print(\"%.1f\" % (sum(x[i] * w[i] for i in range(n)) / sum(w)))\n\nif __name__ == '__main__':\n main()\n","sub_path":"hackerrank/practice/10_days_of_statistics/day00_weighted_mean.py","file_name":"day00_weighted_mean.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"172854566","text":"from __future__ import unicode_literals\nfrom .models import Course\nfrom django.contrib.messages import error\nfrom django.shortcuts import render, HttpResponse, redirect\n\ndef index(request):\n context = {\n 'courses': Course.objects.all()\n }\n return render(request, 'courses/index.html', context)\n\ndef create(request):\n errors = Course.objects.validate(request.POST)\n if len(errors):\n for field, message in errors.iteritems():\n error(request, message, extra_tags=field)\n \n else:\n Course.objects.create(\n name=request.POST['name'],\n description=request.POST['description']\n )\n return redirect('/')\n\ndef edit(request, course_id):\n context = {\n 'course': Course.objects.get(id=course_id)\n }\n return render(request, 'courses/confirm.html', context)\n\ndef destroy(request, course_id):\n Course.objects.get(id=course_id).delete()\n return redirect('/')\n","sub_path":"apps/courses/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"148281266","text":"import re\nimport warnings\n\nfrom django import template\n\nregister = template.Library()\n\n\n@register.filter\ndef receiptnumber(receipt):\n warnings.warn(\n 'Use receipt.formatted_number instead.',\n DeprecationWarning,\n stacklevel=2,\n )\n return receipt.formatted_number\n\n\n@register.filter\ndef format_cuit(cuit):\n numbers = re.sub('[^\\\\d]', '', str(cuit))\n if len(numbers) != 11:\n return cuit\n return '{}-{}-{}'.format(\n numbers[0:2],\n numbers[2:10],\n numbers[10:11]\n )\n","sub_path":"django_afip/templatetags/django_afip.py","file_name":"django_afip.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"388798880","text":"#!/usr/bin/env python\n\nimport os.path\nfrom pstats import Stats\nimport requests\nimport logging\n\ntry:\n from urllib.parse import unquote_plus\nexcept ImportError:\n from urllib import unquote_plus\n\nimport tornado.ioloop\nimport tornado.web\n\nfrom snakeviz.stats import table_rows, json_stats\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger('snakeviz')\n\nsettings = {\n 'static_path': os.path.join(os.path.dirname(__file__), 'static'),\n 'template_path': os.path.join(os.path.dirname(__file__), 'templates'),\n 'debug': True,\n 'gzip': True\n}\n\n\nclass VizStats(Stats):\n\n def load_stats(self, arg):\n import marshal\n if arg is None:\n self.stats = {}\n return\n elif isinstance(arg, bytes):\n self.stats = marshal.loads(arg)\n elif hasattr(arg, 'create_stats'):\n arg.create_stats()\n self.stats = arg.stats\n arg.stats = {}\n if not self.stats:\n raise TypeError(\"Cannot create or construct a %r object from %r\"\n % (self.__class__, arg))\n return\n\n\nclass VizHandler(tornado.web.RequestHandler):\n\n def get(self, profile_name):\n profile_name = unquote_plus(profile_name)\n\n try:\n proxy_headers = self.request.headers._dict\n proxy_headers.pop('Host', None)\n proxy_params = self.request.query_arguments\n proxy_params['profiling'] = '1'\n proxy_params['dump'] = '1'\n logger.info('headers:' + str(proxy_headers))\n logger.info('querys:' + str(proxy_params))\n r = requests.get(\n 'http://local-api2.cchan.tv/%s' % profile_name,\n headers=proxy_headers, params=proxy_params)\n try:\n json_resp = r.json()\n self.write(json_resp)\n except:\n s = VizStats(r.content)\n self.render(\n 'viz.html', profile_name=profile_name,\n table_rows=table_rows(s), callees=json_stats(s))\n except:\n raise RuntimeError('Could not read %s.' % profile_name)\n\n\nhandlers = [(r'/snakeviz/(.*)', VizHandler)]\n\napp = tornado.web.Application(handlers, **settings)\n\nif __name__ == '__main__':\n app.listen(8080)\n tornado.ioloop.IOLoop.instance().start()\n","sub_path":"snakeviz/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"391023873","text":"import os\nimport json\n\nfrom mongodb_backend.flaskr import models \n\nclass Case:\n '''Holds all data from the api_test_cases.csv file.'''\n def __init__(self, row):\n self.test_case = row[0]\n self.test_id = row[1]\n self.url = row[2]\n self.method = row[3]\n self.request = json.loads(row[4]) if row[4] else {}\n self.username = row[5]\n self.status_code = row[6]\n self.full_response = json.loads(row[7]) if row[7] else {} \n self.json_expression = row[8]\n self.parsed_response = json.loads(row[9]) if row[9] else {} \n\nclass Cases:\n '''Read in all the test cases in the api_test_cases.csv file and parse them into\n a dictionary keyed by test case name.'''\n def __init__(self):\n self.test_case_path = f\"{os.environ['CUSTOM_FF_PATH']}/common/api_test_cases.csv\"\n with open(self.test_case_path) as test_case_file:\n self.test_case_data = test_case_file.read().splitlines()\n if not self.test_case_data:\n raise ValueError(\"The test case data csv file is empty.\")\n self.test_case_data = self.test_case_data[1:]\n self.test_cases = {}\n def get(self):\n for row in self.test_case_data:\n row = row.split('\\t')\n this_test_case = Case(row)\n case_name = this_test_case.test_case\n if case_name not in self.test_cases:\n self.test_cases[case_name] = []\n self.test_cases[case_name].append(this_test_case)\n return self.test_cases","sub_path":"mongodb_backend/tests/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"347709831","text":"__author__ = 'ctp2888'\n'''\nNOTICE:\n\nIt is approaching time to make an \"intelligent\" database and incorporate revision controls.\n\nTake a day to centralize lists and dictionaries within the database file. Ammend code to pull data from there.\nThis should SIGNIFICANTLY reduce memory use and eliminate redundant code.\n'''\n\n# initialize interface and load database files\nfrom database import *\nfrom revision import *\nfrom ovenPrediction import *\nfrom inventory import *\nfrom search import *\n\nRange(\"Dashboard\", \"B5\").value = \"Today's Date: \"+str(todayDT)\n\n\ndef loadHelp(searchString):\n\n return null\n\ndef loadDatalogs():\n\n return null\n\n# Clear Dashboard\ndef resetGUI(newSearch):\n Range(\"Dashboard\", \"B24\").value = \"Ready To Search\"\n Range(\"Dashboard\", \"B29:B34\").value = \"\"\n\n # Clear search parameters ONLY if the \"new search\" button was clicked\n if newSearch:\n Range(\"Dashboard\", \"B13\").value = \"\"\n Range(\"Dashboard\", \"B16\").value = \"\"\n\n# Refresh the inventory panels\ndef resetInventory():\n # Import Lists of Tuples\n rodArray, tubeArray = getInventory()\n\n # Calculate ranges, then post to Excel for VBA calls\n rodRange = \"F4:H\"+str(len(rodArray)+3)\n tubeRange = \"I4:K\"+str(len(tubeArray)+3)\n Range(\"dashResources\", \"G2\").value = \"dashResources!\"+str(rodRange)\n Range(\"dashResources\", \"J2\").value = \"dashResources!\"+str(tubeRange)\n\n # Push results to Dashboard\n Range(\"dashResources\", \"F4\").value = rodArray\n Range(\"dashResources\", \"I4\").value = tubeArray\n\n# Call getDate and return results to dashboard\ndef search():\n resetGUI(False)\n\n # Initiate the 0th iteration, and show no productions scheduled to be completed before today\n Range(\"Dashboard\",\"B24\").value = getDate(getRod(),getLength(),getSize(),0, todayDT)\n '''this range call can and should be eliminated by having get date do it instead'''\n\n size = getSize()\n isRod = getRod()\n stock, price = findInInventory(size, isRod, \"both\")\n if getRod():\n Range(\"dashResources\", \"F4\").value = str(size)\n Range(\"dashResources\", \"G4\").value = int(stock)\n Range(\"dashResources\", \"H4\").value = float(price)\n else:\n Range(\"dashResources\", \"I4\").value = str(size)\n Range(\"dashResources\", \"J4\").value = int(stock)\n Range(\"dashResources\", \"K4\").value = float(price)\n\nresetGUI(False)\n\n","sub_path":"sandbox/ctp2888/Hawk-Resources/Revisions/v4.2.4 (Recent Excel Build)/dash_main.py","file_name":"dash_main.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"107877832","text":"\r\nfrom antlr4 import *\r\nfrom _ast import Expression\r\nimport os\r\nimport re\r\n\r\nfactBase = []\r\n\r\ndef clearFacts():\r\n global factBase #global variable\r\n factBase = []\r\n\r\ndef saveFact(fact):\r\n fact = fact + \".\"\r\n factBase.append(fact)\r\n\r\ndef popFact():\r\n fact = factBase.pop()\r\n return(fact) \r\n #这段代码注释掉了暂时没有什么影响 \r\n'''def printFacts(): \r\n s = len(factBase)\r\n print(\"%method\")\r\n print(\"%print all\") \r\n k = 0\r\n while (k1: #deal with previous expression \r\n pexp = self.expStack.pop() #get the previous expression ʽ\r\n nexp = self.expStack.pop() #get the numbers of labels in the expressioֵ\r\n bexp = self.expStack.pop() #get the current control branch\r\n if nexp == 0:# deal with the expression with no labels, such as assignmentֵ\r\n plabel = self.labelStack.pop()\r\n instr = \"instr(\" + plabel + \", \" + pexp + \", \" + label + \")\"\r\n saveFact(instr)\r\n else: # deal with the expression with labels, such as if, while �����б�ǩ�ı��ʽ ���� if while \r\n nexp = nexp - 1\r\n if nexp > 0:\r\n pexp = pexp + \", \" + label\r\n else:\r\n pexp = pexp + \", \" + label + \")\" \r\n self.expStack.append(bexp) \r\n self.expStack.append(nexp) \r\n self.expStack.append(pexp)\r\n \r\n\r\n ### to check expStack�����ʽջ�� whether there are some expression which without label \r\n ### and within higher level control structure \r\n def checkExpStack(self):\r\n if len(self.labelStack)>1: #deal with previous expression\r\n pexp = self.expStack[-1] #.pop() #get the expression\r\n nexp = self.expStack[-2] #.pop() #get the numbers of labels in the expression\r\n bexp = self.expStack[-3] #.pop() #get the control branch of the expression\r\n #self.expStack.append(bexp)\r\n #self.expStack.append(nexp)\r\n #self.expStack.append(pexp) \r\n if (nexp == 0) and (re.match(self.cbranch, bexp)!=None):\r\n return(True)\r\n return(False) \r\n \r\n #### clear exprssions which without label\r\n def clearPreviousExp(self, label):\r\n count = 0;\r\n while self.checkExpStack() == True: #check the previous expression\r\n pexp = self.expStack.pop() #get the expression\r\n nexp = self.expStack.pop() #get the numbers of labels in the expression\r\n bexp = self.expStack.pop()\r\n plabel = self.labelStack.pop()\r\n instr = \"instr(\" + plabel + \", \" + pexp + \", \" + label + \")\"\r\n print(\"clear:\"+instr)\r\n saveFact(instr)\r\n count = count + 1\r\n return(count) \r\n \r\n ### exchange label stack\r\n def exchangeLabelStack(self):\r\n thenlabel = self.labelStack.pop()\r\n conditionlabel = self.labelStack.pop()\r\n self.labelStack.append(thenlabel)\r\n self.labelStack.append(conditionlabel)\r\n \r\n ### exchange expression stcak\r\n def exchangeExpStack(self): \r\n thenexp = self.expStack.pop()\r\n nthenexp = self.expStack.pop()\r\n bthenexp = self.expStack.pop()\r\n conditionexp = self.expStack.pop()\r\n nconditionexp = self.expStack.pop()\r\n bconditionexp = self.expStack.pop()\r\n self.expStack.append(bthenexp)\r\n self.expStack.append(nthenexp)\r\n self.expStack.append(thenexp)\r\n self.expStack.append(bconditionexp)\r\n self.expStack.append(nconditionexp)\r\n self.expStack.append(conditionexp)\r\n \r\n # Visit a parse tree���������� produced by CParser#����translationUnit.\r\n def visitTranslationUnit(self, ctx):\r\n return self.visitChildren(ctx)\r\n \r\n # Visit a parse tree produced by CParser#����externalDeclaration.!!\r\n def visitExternalDeclaration(self, ctx):\r\n return self.visitChildren(ctx)\r\n \r\n \r\n # Visit a parse tree produced by CParser#functionDefinition.\r\n def visitFunctionDefinition(self, ctx):\r\n #construct method facts\r\n self.entry_point = self.entry_point + 1\r\n self.firstStatement = True\r\n self.cmethod = \"m\" + ctx.declarator().directDeclarator().directDeclarator().getText()\r\n self.visit(ctx.declarator().directDeclarator().parameterTypeList()) #get the parameter list which is save in self.cmethodp\r\n rettype = self.visit(ctx.declarationSpecifiers())\r\n cc = len(self.cmethodp)\r\n methodd = \"method(\" + self.filen + \", \" + self.cclass \r\n methodd = methodd + \", \" + \"t\" + rettype \r\n methodd = methodd + \"(\" + self.cmethod + \"), \" + \"[\"\r\n i = 0\r\n while ii:\r\n methodd = methodd + \" ,\"\r\n methodd = methodd + \"]\" + \" , \" + self.filen + \"ep_\" + str(self.entry_point) + \")\"\r\n saveFact(methodd)\r\n \r\n #construct entry_point facts and instr facts\r\n self.visit(ctx.compoundStatement())\r\n \r\n self.cmethodp = [] \r\n \r\n \r\n \r\n # Visit a parse tree produced by CParser#declarator.˵����\r\n def visitDeclarator(self, ctx):\r\n if ctx.getChildCount()==2:\r\n modif = self.visit(ctx.getChild(0)) #varaible modification notation such as * �����ķ�� ����*\r\n name = self.visit(ctx.getChild(1)) #variable name\r\n return modif+\"(i\"+name+\")\"\r\n return self.visitChildren(ctx)\r\n \r\n # Visit a parse tree produced by CParser#parameterDeclaration.\r\n def visitParameterDeclaration(self, ctx):\r\n type = self.visit(ctx.declarationSpecifiers()) #variable type\r\n name = self.visit(ctx.declarator()) #variable name \r\n self.cmethodp.append(\"t\"+type+\"(i\"+name+\")\") #method parameter\r\n self.declarations[name] = \"t\"+type+\"(i\"+name+\")\" #method parameter declaration\r\n \r\n \r\n # Visit a parse tree produced by CParser#typeSpecifier.����˵����\r\n def visitTypeSpecifier(self, ctx):\r\n return ctx.getText()\r\n \r\n # Visit a parse tree produced by CParser#pointer.ָ��\r\n def visitPointer(self, ctx):\r\n return \"pointer\"\r\n \r\n # Visit a parse tree produced by CParser#directDeclarator.ֱ��˵����\r\n def visitDirectDeclarator(self, ctx):\r\n if ctx.getChildCount()<2 :\r\n return ctx.getText()\r\n if (ctx.getChild(1).getText()==\"[\") and (ctx.getChild(2).getText()==\"]\"):\r\n name = ctx.getChild(0).getText()\r\n return \"array(i\"+name+\")\"\r\n return self.visitChildren(ctx)\r\n \r\n # Visit a parse tree produced by CParser#additiveExpression.\r\n def visitAdditiveExpression(self, ctx):\r\n if ctx.getChildCount()==3:\r\n op1 = self.visit(ctx.getChild(0))\r\n op2 = self.visit(ctx.getChild(2))\r\n op = ctx.getChild(1).getText()\r\n exp = \"\"\r\n if op == \"+\":\r\n exp = exp + \"plus(\" + op1 + \", \" + op2 + \")\"\r\n elif op == \"-\":\r\n exp = exp + \"minus(\" + op1 + \", \" + op2 + \")\" \r\n \r\n return(exp)\r\n else: \r\n return self.visitChildren(ctx)\r\n \r\n # Visit a parse tree produced by CParser#multiplicativeExpression.���˷���ţ�\r\n def visitMultiplicativeExpression(self, ctx):\r\n if ctx.getChildCount()==3:\r\n op1 = self.visit(ctx.getChild(0))\r\n op2 = self.visit(ctx.getChild(2))\r\n op = ctx.getChild(1).getText()\r\n exp = \"\"\r\n if op == \"*\":\r\n exp = exp + \"multiply(\" + op1 + \", \" + op2 + \")\" \r\n elif op == \"/\":\r\n exp = exp + \"divide(\" + op1 + \", \" + op2 + \")\" \r\n elif op == \"%\":\r\n exp = exp + \"mod(\" + op1 + \", \" + op2 + \")\" \r\n \r\n return(exp)\r\n else: \r\n return self.visitChildren(ctx)\r\n \r\n \r\n # Visit a parse tree produced by CParser#relationalExpression.��ϵ���\r\n def visitRelationalExpression(self, ctx):\r\n if ctx.getChildCount()==3:\r\n op = ctx.getChild(1).getText()\r\n if op == \">\":\r\n relation = \"great(\"\r\n relation = relation + self.visit(ctx.getChild(0)) + \", \"\r\n relation = relation + self.visit(ctx.getChild(2)) + \")\"\r\n return(relation)\r\n elif op == \"<\":\r\n relation = \"less(\"\r\n relation = relation + self.visit(ctx.getChild(0)) + \", \"\r\n relation = relation + self.visit(ctx.getChild(2)) + \")\"\r\n return(relation)\r\n elif op == \">=\":\r\n relation = \"greateq(\"\r\n relation = relation + self.visit(ctx.getChild(0)) + \", \"\r\n relation = relation + self.visit(ctx.getChild(2)) + \")\"\r\n return(relation)\r\n elif op == \"<=\":\r\n relation = \"lesseq(\"\r\n relation = relation + self.visit(ctx.getChild(0)) + \", \"\r\n relation = relation + self.visit(ctx.getChild(2)) + \")\"\r\n return(relation)\r\n \r\n return self.visitChildren(ctx)\r\n \r\n # Visit a parse tree produced by CParser#equalityExpression.��ȷ��\r\n def visitEqualityExpression(self, ctx):\r\n if ctx.getChildCount()==3:\r\n op = ctx.getChild(1).getText()\r\n if op == \"==\":\r\n relation = \"equal(\"\r\n relation = relation + self.visit(ctx.getChild(0)) + \", \"\r\n relation = relation + self.visit(ctx.getChild(2)) + \")\"\r\n return(relation)\r\n elif op == \"!=\":\r\n relation = \"notequal(\"\r\n relation = relation + self.visit(ctx.getChild(0)) + \", \"\r\n relation = relation + self.visit(ctx.getChild(2)) + \")\"\r\n return(relation)\r\n return self.visitChildren(ctx)\r\n \r\n # Visit a parse tree produced by CParser#unaryExpression. һԪ���\r\n def visitUnaryExpression(self, ctx):\r\n if ctx.getChildCount()==2:\r\n op = ctx.getChild(0).getText()\r\n if op == \"!\":\r\n relation = \"not(\"\r\n relation = relation + self.visit(ctx.getChild(1)) + \")\"\r\n return(relation)\r\n return self.visitChildren(ctx)\r\n \r\n # Visit a parse tree produced by CParser#logicalAndExpression.�������\r\n def visitLogicalAndExpression(self, ctx):\r\n if ctx.getChildCount() == 3:\r\n op = ctx.getChild(1).getText()\r\n if op == \"&&\":\r\n relation = \"and(\"\r\n relation = relation + self.visit(ctx.getChild(0)) + \", \"\r\n relation = relation + self.visit(ctx.getChild(2)) + \")\"\r\n return(relation) \r\n \r\n return self.visitChildren(ctx)\r\n\r\n # Visit a parse tree produced by CParser#logicalOrExpression.����\r\n def visitLogicalOrExpression(self, ctx):\r\n if ctx.getChildCount() == 3:\r\n op = ctx.getChild(1).getText()\r\n if op == \"||\":\r\n relation = \"or(\"\r\n relation = relation + self.visit(ctx.getChild(0)) + \", \"\r\n relation = relation + self.visit(ctx.getChild(2)) + \")\"\r\n return(relation) \r\n return self.visitChildren(ctx)\r\n\r\n\r\n # Visit a parse tree produced by CParser#compoundStatement. �������\r\n def visitCompoundStatement(self, ctx):\r\n return self.visit(ctx.getChild(1))\r\n \r\n \r\n ################ construct variable dictionary #############################################\r\n \r\n # Visit a parse tree produced by CParser#declaration.\r\n def visitDeclaration(self, ctx):\r\n type = self.visit(ctx.declarationSpecifiers())\r\n self.visit(ctx.initDeclaratorList())\r\n ks = self.declarations.keys()\r\n for key in ks:\r\n if self.declarations[key] == \"\":\r\n self.declarations[key] = \"t\" + type + \"(i\" + key + \")\"\r\n #print(self.declarations[key])\r\n #return self.visitChildren(ctx)\r\n \r\n # Visit a parse tree produced by CParser#declarationSpecifiers. ����˵����\r\n def visitDeclarationSpecifiers(self, ctx):\r\n return ctx.getText()\r\n \r\n # Visit a parse tree produced by CParser#initDeclarator.\r\n def visitInitDeclarator(self, ctx):\r\n self.declarations[ctx.getText()] = \"\"\r\n \r\n ############################### deal with assignment����ֵ�� expressions ##################################\r\n \r\n # Visit a parse tree produced by CParser#assignmentExpression.\r\n def visitAssignmentExpression(self, ctx):\r\n if ctx.getChildCount()==3:\r\n varn = self.visit(ctx.unaryExpression())\r\n exp = self.visit(ctx.assignmentExpression())\r\n assignExp = \"assign(\" + varn + \", \" + exp + \")\"\r\n symbol = ctx.getChild(0).getChild(0).getChild(0).getChild(0).getSymbol()\r\n label = self.getLabel(symbol)\r\n if (self.firstStatement==True):\r\n ep = self.getEntryPoint(label)\r\n self.firstStatement = False\r\n saveFact(ep)\r\n #print(label)\r\n self.processPreviousExp(label)\r\n self.clearPreviousExp(label)\r\n self.expStack.append(self.cbranch)\r\n self.expStack.append(0) \r\n self.expStack.append(assignExp) \r\n self.labelStack.append(label) \r\n else: \r\n return self.visitChildren(ctx)\r\n\r\n \r\n\r\n # Visit a parse tree produced by CParser#primaryExpression.\r\n def visitPrimaryExpression(self, ctx):\r\n if ctx.getChildCount() == 1: #deal with variable and constant\r\n operand = ctx.getText()\r\n ks = self.declarations.keys()\r\n if operand in ks:#deal with variable\r\n return(self.declarations[operand])\r\n else: #deal with constant\r\n return(\"const(\" + operand + \")\")\r\n elif ctx.getChildCount() == 3: #deal with (expression), such as return(expression)\r\n if ctx.getChild(0).getText() == \"(\" and ctx.getChild(2).getText() == \")\":\r\n result = self.visit(ctx.getChild(1))\r\n return(result) \r\n else: \r\n return(self.visitChildren(ctx)) \r\n \r\n \r\n ############################### deal with function call ##################################\r\n \r\n # Visit a parse tree produced by CParser#statement.\r\n def visitStatement(self, ctx):\r\n return self.visitChildren(ctx) \r\n \r\n # Visit a parse tree produced by CParser#postfixExpression.\r\n def visitPostfixExpression(self, ctx):\r\n if ctx.getChildCount()==4:#deal with function call\r\n if ctx.getChild(1).getText()==\"(\":\r\n self.functionCallLevel = self.functionCallLevel + 1\r\n arguments = ctx.getChild(2).getChildCount() \r\n funExp = \"m\"+ctx.getChild(0).getText()\r\n funExp = funExp + \"(\" \r\n carg = 0\r\n while carg < arguments: \r\n funExp = funExp + self.visit(ctx.getChild(2).getChild(carg))\r\n carg = carg + 2\r\n if carg < arguments:\r\n funExp = funExp + \", \" \r\n funExp = funExp + \")\"\r\n self.functionCallLevel = self.functionCallLevel - 1\r\n symbol = ctx.getChild(1).getSymbol() \r\n label = self.getLabel(symbol)\r\n if (self.firstStatement==True):\r\n ep = self.getEntryPoint(label)\r\n self.firstStatement = False\r\n saveFact(ep)\r\n if self.functionCallLevel == 0:\r\n self.processPreviousExp(label)\r\n self.clearPreviousExp(label)\r\n self.expStack.append(self.cbranch)\r\n self.expStack.append(0) \r\n self.expStack.append(funExp) \r\n self.labelStack.append(label)\r\n else: \r\n return(funExp) \r\n else:\r\n return self.visitChildren(ctx)\r\n\r\n \r\n ############################### deal with return ##################################\r\n \r\n # Visit a parse tree produced by CParser#jumpStatement.\r\n def visitJumpStatement(self, ctx):\r\n if ctx.getChild(0).getText() == \"return\":\r\n symbol = ctx.getChild(0).getSymbol() \r\n label = self.getLabel(symbol)\r\n if (self.firstStatement==True):\r\n ep = self.getEntryPoint(label)\r\n self.firstStatement = False\r\n saveFact(ep)\r\n \r\n # process all expression in expStack\r\n while True:\r\n self.processPreviousExp(label)\r\n self.clearPreviousExp(label)\r\n if len(self.expStack)==0:\r\n break;\r\n \r\n funExp = \"m\"+ctx.getChild(0).getText()\r\n funExp = funExp + \"(\" \r\n funExp = funExp + self.visit(ctx.getChild(1))\r\n funExp = funExp + \")\"\r\n instr = \"instr(\" + label + \", \" + funExp + \", ret)\"\r\n saveFact(instr)\r\n else: \r\n return self.visitChildren(ctx)\r\n\r\n \r\n ############################### deal with if ##################################\r\n \r\n # Visit a parse tree produced by CParser#selectionStatement.\r\n def visitSelectionStatement(self, ctx):\r\n # label for if\r\n symbol = ctx.getChild(0).getSymbol()\r\n print(symbol)\r\n label = self.getLabel(symbol)\r\n print(label)\r\n print(self.firstStatement)\r\n if (self.firstStatement==True):\r\n ep = self.getEntryPoint(label)\r\n print(\"visitSelectionStatement: \"+ ep)\r\n self.firstStatement = False\r\n saveFact(ep)\r\n if ctx.getChild(0).getText()==\"if\":\r\n print(ctx.getChild(0).getText()) #deal with if\r\n if ctx.getChildCount() == 7:\r\n print(ctx.getChildCount()) #if then else\r\n self.processPreviousExp(label)\r\n self.clearPreviousExp(label)\r\n condition = self.visit(ctx.expression())\r\n print(\"condition: \"+ condition)\r\n pexp = \"ite(\"+ condition \r\n self.expStack.append(self.cbranch)\r\n self.expStack.append(2)\r\n self.expStack.append(pexp)\r\n self.labelStack.append(label)\r\n print(\"pexp: \"+ pexp)\r\n print(\"end\")\r\n print(\"cbranch: \" + self.cbranch)\r\n self.cbranch = self.cbranch + \"-0\"\r\n cblist = list(self.cbranch) \r\n #print(self.cbranch) \r\n self.visit(ctx.getChild(4))\r\n print(\"cbranch+ -0: \" + self.cbranch)\r\n #prepare for the else processing: exchange label and exp in stack\r\n #exchange labelStack \r\n self.exchangeLabelStack()\r\n #exchange expStack\r\n self.exchangeExpStack()\r\n \r\n cblist[-1]=\"1\"\r\n cblist[-2]=\"-\"\r\n self.cbranch = \"\".join(cblist) \r\n #print(self.cbranch) \r\n self.visit(ctx.getChild(6))\r\n cblist[-1] = \"\"\r\n cblist[-2] = \"\"\r\n self.cbranch = \"\".join(cblist) \r\n print(\"cbranch: \" + self.cbranch)\r\n #print(self.cbranch) \r\n elif ctx.getChildCount() == 5: #if then \r\n self.processPreviousExp(label)\r\n self.clearPreviousExp(label)\r\n condition = self.visit(ctx.expression())\r\n pexp = \"ite(\"+ condition \r\n print(\"condition: \"+ condition)\r\n self.expStack.append(self.cbranch)\r\n self.expStack.append(2)\r\n self.expStack.append(pexp)\r\n self.labelStack.append(label)\r\n \r\n self.cbranch = self.cbranch + \"-0\"\r\n cblist = list(self.cbranch)\r\n #print(self.cbranch)\r\n self.visit(ctx.getChild(4)) \r\n cblist[-1] = \"\"\r\n cblist[-2] = \"\"\r\n self.cbranch = \"\".join(cblist) \r\n self.exchangeLabelStack()\r\n self.exchangeExpStack()\r\n #print(self.cbranch) \r\n else: \r\n return self.visitChildren(ctx)\r\n\r\n ###### deal with while #######################\r\n # Visit a parse tree produced by CParser#iterationStatement.\r\n def visitIterationStatement(self, ctx):\r\n # label for while\r\n symbol = ctx.getChild(0).getSymbol()\r\n label = self.getLabel(symbol)\r\n \r\n if ctx.getChild(0).getText()==\"while\": #deal with while\r\n self.processPreviousExp(label)\r\n self.clearPreviousExp(label)\r\n condition = self.visit(ctx.expression())\r\n pexp = \"while(\"+ condition \r\n self.expStack.append(self.cbranch)\r\n self.expStack.append(1)\r\n self.expStack.append(pexp)\r\n self.labelStack.append(label)\r\n \r\n self.cbranch = self.cbranch + \"-0\"\r\n cblist = list(self.cbranch)\r\n #print(self.cbranch)\r\n self.visit(ctx.getChild(4)) \r\n \r\n self.processPreviousExp(label)\r\n self.clearPreviousExp(label)\r\n cblist[-1] = \"\"\r\n cblist[-2] = \"\"\r\n self.cbranch = \"\".join(cblist)\r\n elif ctx.getChild(0).getText()==\"do\": #deal with do whlie\r\n self.processPreviousExp(label)\r\n self.clearPreviousExp(label)\r\n condition = self.visit(ctx.expression())\r\n pexp = \"dowhile(\"+ condition \r\n self.expStack.append(self.cbranch)\r\n self.expStack.append(1)\r\n self.expStack.append(pexp)\r\n self.labelStack.append(label)\r\n \r\n self.cbranch = self.cbranch + \"-0\"\r\n cblist = list(self.cbranch)\r\n #print(self.cbranch)\r\n self.visit(ctx.getChild(1)) \r\n \r\n self.processPreviousExp(label)\r\n self.clearPreviousExp(label)\r\n cblist[-1] = \"\"\r\n cblist[-2] = \"\"\r\n self.cbranch = \"\".join(cblist)\r\n elif ctx.getChild(0).getText()==\"for\": #deal with for\r\n self.processPreviousExp(label)\r\n self.clearPreviousExp(label)\r\n forcondition = self.visit(ctx.forCondition())\r\n pexp = \"for(\"+ forcondition \r\n self.expStack.append(self.cbranch)\r\n self.expStack.append(1)\r\n self.expStack.append(pexp)\r\n self.labelStack.append(label)\r\n \r\n self.cbranch = self.cbranch + \"-0\"\r\n cblist = list(self.cbranch)\r\n #print(self.cbranch)\r\n self.visit(ctx.getChild(4)) \r\n \r\n self.processPreviousExp(label)\r\n self.clearPreviousExp(label)\r\n cblist[-1] = \"\"\r\n cblist[-2] = \"\"\r\n self.cbranch = \"\".join(cblist)\r\n else:\r\n return self.visitChildren(ctx)\r\n","sub_path":"myantlr4c/C2CLP/C2CLPVisitor.py","file_name":"C2CLPVisitor.py","file_ext":"py","file_size_in_byte":26840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"207047625","text":"from django.conf.urls import patterns, url\n\n\nurlpatterns = patterns(\n 'api.views',\n url(r'^$', 'articles_list', name='articles_list'),\n url(r'^(?P[0-9]+)$', 'article_detail', name='article_detail'),\n url(r'^comments/(?P[0-9]+)$', 'comments_list', name='comments_list'),\n\n)\n","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"590735206","text":"inputMatrix = [[1, 2, 2, 1], [2, 1, 2, 1], [2, 2, 1, 2], [1, 1, 1, 0]]\r\n# inputMatrix is set as a row array of 4 columns with pre-filled data\r\n\r\nSUB = str.maketrans(\"0123456789\", \"₀₁₂₃₄₅₆₇₈₉\")\r\n# For edges to look pretty\r\n\r\nsubOne = \"\\N{SUBSCRIPT ONE}\"\r\nsubTwo = \"\\N{SUBSCRIPT TWO}\"\r\nsubThree = \"\\N{SUBSCRIPT THREE}\"\r\nsubFour = \"\\N{SUBSCRIPT FOUR}\"\r\n# For the vertices to look pretty\r\n\r\n\"\"\"\r\nCreate a path using the given directions from the matrix for a walk, trail, path, closed walk, circuit, \r\nand simple circuit.\r\n\"\"\"\r\n\r\n\r\ndef tuple_maker(opt):\r\n path_list = []\r\n if opt == 1:\r\n for x in range(len(inputMatrix)):\r\n if x == 0:\r\n start = f\"v{subOne}\"\r\n elif x == 1:\r\n start = f\"v{subTwo}\"\r\n elif x == 2:\r\n start = f\"v{subThree}\"\r\n else:\r\n start = f\"v{subFour}\"\r\n\r\n for y in range(len(inputMatrix[x])):\r\n if y == 0:\r\n end = f\"v{subOne}\"\r\n elif y == 1:\r\n end = f\"v{subTwo}\"\r\n elif y == 2:\r\n end = f\"v{subThree}\"\r\n else:\r\n end = f\"v{subFour}\"\r\n\r\n for z in range(inputMatrix[x][y]):\r\n path_list.append([start, end])\r\n return path_list\r\n if opt == 2:\r\n for rx in range(len(inputMatrix)):\r\n if rx == 0:\r\n start = f\"v{subOne}\"\r\n elif rx == 1:\r\n start = f\"v{subTwo}\"\r\n elif rx == 2:\r\n start = f\"v{subThree}\"\r\n else:\r\n start = f\"v{subFour}\"\r\n\r\n for y in range(len(inputMatrix[rx])):\r\n if y == 0:\r\n end = f\"v{subOne}\"\r\n elif y == 1:\r\n end = f\"v{subTwo}\"\r\n elif y == 2:\r\n end = f\"v{subThree}\"\r\n else:\r\n end = f\"v{subFour}\"\r\n\r\n for z in range(inputMatrix[rx][y]):\r\n path_list.append([end, start])\r\n return path_list\r\n\r\n\r\nif __name__ == '__main__':\r\n pathSet = tuple_maker(1)\r\n edge_count = 1\r\n rev_edge_count = 1\r\n for path in range(len(pathSet)):\r\n edgeNotation = f\"e{edge_count}\".translate(SUB)\r\n print(str(edgeNotation) + f\": {pathSet[path]}\")\r\n edge_count += 1\r\n\r\n print(\"\\nNow printing reverse...\\n\")\r\n\r\n revPathSet = tuple_maker(2)\r\n for revPath in range(len(revPathSet)):\r\n revEdgeNotation = f\"re{rev_edge_count}\".translate(SUB)\r\n print(str(revEdgeNotation + f\": {revPathSet[revPath]}\"))\r\n rev_edge_count += 1\r\n\r\n print(\"Now constructing walks...\\n\"\r\n \"Given walks desired: Basic Walk, Trail, Path, Closed Walk, Circuit, Simple Circuit\\n\"\r\n \"Constructing 3 parts long minimum\\n\")\r\n \"\"\"\r\n Basic Walk, repeat edges and repeat vertex OK, can start/end same vertex, doesn't need just one edge\r\n Trail, no repeat edges, repeat vertex OK, can start/end same vertex, doesn't need just one edge\r\n Path, no repeat or repeat vertex, can't start/end same vertex, doesn't need just one edge\r\n Closed walk, repeat edges and repeat vertex OK, MUST start/end same vertex, doesn't need just one edge\r\n Circuit, no repeat edges, repeat vertex OK, MUST start/end same vertex, DOES need at least one edge\r\n Simple Circuit, no repeat edges, first/last vertex can repeat, MUST start/end same vertex, DOES need least one edge\r\n \"\"\"\r\n print(f\"Basic Walk: \"\r\n f\"{pathSet[1][0]} -{f'e2'.translate(SUB)}-> {pathSet[1][1]} -{f'e7'.translate(SUB)}-> {pathSet[6][1]}\")\r\n print(f\"Trail: \"\r\n f\"{pathSet[1][0]} -{f'e2'.translate(SUB)}-> {pathSet[1][1]} -{f'e9'.translate(SUB)}-> {pathSet[1][1]}\")\r\n print(f\"Path: \"\r\n f\"{pathSet[1][0]} -{f'e2'.translate(SUB)}-> {pathSet[1][1]} -{f'e10'.translate(SUB)}-> {pathSet[9][1]}\")\r\n print(f\"Closed Walk: \"\r\n f\"{pathSet[1][0]} -{f'e2'.translate(SUB)}-> {pathSet[1][1]} -{f'e10'.translate(SUB)}-> {pathSet[9][1]} \"\r\n f\"-{f'e10'.translate(SUB)}-> {pathSet[9][0]} -{f'e2'.translate(SUB)}-> {pathSet[1][0]}\")\r\n print(f\"Circuit: \"\r\n f\"{pathSet[1][0]} -{f'e2'.translate(SUB)}-> {pathSet[1][1]} -{f'e10'.translate(SUB)}-> {pathSet[9][1]} \"\r\n f\"-{f'e15'.translate(SUB)}-> {pathSet[14][1]} -{f'e7'.translate(SUB)}-> {pathSet[6][1]}\")\r\n print(f\"Simple Circuit: \"\r\n f\"{pathSet[0][0]} -{f'e1'.translate(SUB)}-> {pathSet[0][1]}\")\r\n","sub_path":"MatrixWalking_InClass.py","file_name":"MatrixWalking_InClass.py","file_ext":"py","file_size_in_byte":4549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"71870235","text":"from isolation import Board\nfrom sample_players import RandomPlayer, HumanPlayer, GreedyPlayer, open_move_score, center_score, improved_score, null_score\nfrom game_agent import MinimaxPlayer, AlphaBetaPlayer\n\nplayer1 = MinimaxPlayer(search_depth=2, score_fn=improved_score)\nplayer2 = AlphaBetaPlayer(search_depth=3, score_fn=improved_score)\n\ngame = Board(player1, player2)\n\nwinner, history, outcome = game.play(time_limit=9999999)\n\nprint(\"\\nWinner: {}\\nOutcome: {}\".format(\"Player 1\" if winner == player1 else \"Player 2\", outcome))\nprint(game.to_string())\nprint(\"Move history:\\n{!s}\".format(history))\n","sub_path":"duel.py","file_name":"duel.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"604500399","text":"import torch\n\ndef get_model(model_name, num_class=2, model_file=None, **params):\n if 'drop_prob' not in params:\n drop_prob = 0\n else:\n drop_prob = params['drop_prob']\n\n if model_name == 'cls_3d':\n from libs.neural_networks.model.cls_3d import Cls_3d\n model = Cls_3d(n_class=num_class, dropout_prob=drop_prob)\n\n # region medical net\n if model_name == 'medical_net_resnet34':\n from libs.neural_networks.model.MedicalNet.resnet import resnet34, Resnet3d_cls\n base_model = resnet34(output_type='classification')\n model = Resnet3d_cls(base_model=base_model, n_class=num_class, block_type='BasicBlock',\n add_dense1=True, dropout_prob=drop_prob)\n if model_name == 'medical_net_resnet50':\n from libs.neural_networks.model.MedicalNet.resnet import resnet50, Resnet3d_cls\n base_model = resnet50(output_type='classification')\n model = Resnet3d_cls(base_model=base_model, n_class=num_class, block_type='Bottleneck',\n add_dense1=True, dropout_prob=drop_prob)\n if model_name == 'medical_net_resnet101':\n from libs.neural_networks.model.MedicalNet.resnet import resnet101, Resnet3d_cls\n base_model = resnet101(output_type='classification')\n model = Resnet3d_cls(base_model=base_model, n_class=num_class, block_type='Bottleneck',\n add_dense1=True, dropout_prob=drop_prob)\n # endregion\n\n if model_name == 'ModelsGenesis':\n from libs.neural_networks.model.ModelsGenesis.unet3d import UNet3D, TargetNet\n base_model = UNet3D()\n model = TargetNet(base_model, n_class=num_class)\n\n # region 3D ResNet [10, 18, 34, 50, 101, 152, 200]\n from libs.neural_networks.model.model_3d.resnet import generate_model\n \n if model_name == 'resnet18':\n model = generate_model(model_depth=18, n_classes=num_class, n_input_channels=1)\n if model_name == 'resnet34':\n model = generate_model(model_depth=32, n_classes=num_class, n_input_channels=1)\n if model_name == 'resnet50':\n model = generate_model(model_depth=50, n_classes=num_class, n_input_channels=1)\n if model_name == 'resnet101':\n model = generate_model(model_depth=101, n_classes=num_class, n_input_channels=1)\n # endregion\n\n\n if model_file is not None:\n state_dict = torch.load(model_file, map_location='cpu')\n model.load_state_dict(state_dict, strict=False)\n\n return model","sub_path":"libs/neural_networks/model/my_get_model.py","file_name":"my_get_model.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"337642624","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport math\nimport time\n\nimport numpy as np\nimport torch as th\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom ogb.nodeproppred import DglNodePropPredDataset, Evaluator\n\nfrom scipy import io\nfrom sklearn import metrics\nimport itertools\nfrom matplotlib import pyplot as plt\n\nfrom models import AttenNet\n\nglobal device, in_feats, n_classes, epsilon\n\nfoldername = 'GeniePath'\n\ndevice = None\nin_feats, n_classes = None, None\nepsilon = 1 - math.log(2)\n\n\ndef gen_model(args):\n norm = \"both\" if args.use_norm else \"none\"\n\n if args.use_labels:\n model = AttenNet(\n 'GeniePath',\n in_feats + n_classes,\n n_classes,\n n_hidden=args.n_hidden,\n n_layers=args.n_layers,\n n_heads=args.n_heads,\n activation=F.relu,\n dropout=args.dropout,\n attn_drop=args.attn_drop,\n norm=norm,\n )\n else:\n model = AttenNet(\n 'GeniePath',\n in_feats,\n n_classes,\n n_hidden=args.n_hidden,\n n_layers=args.n_layers,\n n_heads=args.n_heads,\n activation=F.relu,\n dropout=args.dropout,\n attn_drop=args.attn_drop,\n norm=norm,\n )\n\n return model\n\n\ndef cross_entropy(x, labels):\n y = F.cross_entropy(x, labels[:, 0], reduction=\"none\")\n y = th.log(epsilon + y) - math.log(epsilon)\n return th.mean(y)\n\n\ndef compute_acc(pred, labels, evaluator):\n return evaluator.eval({\"y_pred\": pred.argmax(dim=-1, keepdim=True), \"y_true\": labels})[\"acc\"]\n\n\ndef add_labels(feat, labels, idx):\n onehot = th.zeros([feat.shape[0], n_classes]).to(device)\n onehot[idx, labels[idx, 0]] = 1\n return th.cat([feat, onehot], dim=-1)\n\n\ndef adjust_learning_rate(optimizer, lr, epoch):\n if epoch <= 50:\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = lr * epoch / 50\n\n\ndef train(model, graph, labels, train_idx, optimizer, use_labels):\n model.train()\n\n feat = graph.ndata[\"feat\"]\n\n if use_labels:\n mask_rate = 0.5\n mask = th.rand(train_idx.shape) < mask_rate\n\n train_labels_idx = train_idx[mask]\n train_pred_idx = train_idx[~mask]\n\n feat = add_labels(feat, labels, train_labels_idx)\n else:\n mask_rate = 0.5\n mask = th.rand(train_idx.shape) < mask_rate\n\n train_pred_idx = train_idx[mask]\n\n optimizer.zero_grad()\n pred = model(graph, feat)\n loss = cross_entropy(pred[train_pred_idx], labels[train_pred_idx])\n loss.backward()\n optimizer.step()\n\n return loss, pred\n\n\n@th.no_grad()\ndef evaluate(model, graph, labels, train_idx, val_idx, test_idx, use_labels, evaluator):\n model.eval()\n\n feat = graph.ndata[\"feat\"]\n\n if use_labels:\n feat = add_labels(feat, labels, train_idx)\n\n pred = model(graph, feat)\n train_loss = cross_entropy(pred[train_idx], labels[train_idx])\n val_loss = cross_entropy(pred[val_idx], labels[val_idx])\n test_loss = cross_entropy(pred[test_idx], labels[test_idx])\n\n return (\n compute_acc(pred[train_idx], labels[train_idx], evaluator),\n compute_acc(pred[val_idx], labels[val_idx], evaluator),\n compute_acc(pred[test_idx], labels[test_idx], evaluator),\n train_loss,\n val_loss,\n test_loss,\n )\n\ndef count_parameters(args):\n model = gen_model(args)\n print([np.prod(p.size()) for p in model.parameters() if p.requires_grad])\n return sum([np.prod(p.size()) for p in model.parameters() if p.requires_grad])\n\n# %% Define class with the model arguments\nclass args:\n cpu = True #Run cpu only if true. This overrides the gpu value\n gpu = 0 #Change number if different GPU device ID\n n_runs = 1 #Number of model runs\n n_epochs = 100 #2000 #Number of epochs\n use_labels = False #Use labels in the training set as input features\n use_norm = False #Use symmetrically normalized adjacency matrix\n lr = 0.002 #Learning rate\n n_layers = 1 #3 #Number of layers\n n_heads = 1 #3\n n_hidden = 256 #256 \n dropout = 0.75\n attn_drop = 0.05\n wd = 0\n log_every = 10 #print result every log_every-th epoch\n #plot_curves = True\n\n# Define folder to save plots and model in\nsubfolder = '/layers' + str(args.n_layers) + '-heads' + str(args.n_heads) + '-epochs' + str(args.n_epochs)\n\n# set cpu or gpu\nif args.cpu:\n device = th.device(\"cpu\")\nelse:\n device = th.device(\"cuda:%d\" % args.gpu)\n\n# load data\ndata = DglNodePropPredDataset(name=\"ogbn-arxiv\")\nevaluator = Evaluator(name=\"ogbn-arxiv\")\n\nsplitted_idx = data.get_idx_split()\ntrain_idx, val_idx, test_idx = splitted_idx[\"train\"], splitted_idx[\"valid\"], splitted_idx[\"test\"]\ngraph, labels = data[0]\n\n# add reverse edges\nsrcs, dsts = graph.all_edges()\ngraph.add_edges(dsts, srcs)\n\n# add self-loop\nprint(f\"Total edges before adding self-loop {graph.number_of_edges()}\")\ngraph = graph.remove_self_loop().add_self_loop()\nprint(f\"Total edges after adding self-loop {graph.number_of_edges()}\")\n\nin_feats = graph.ndata[\"feat\"].shape[1]\nn_classes = (labels.max() + 1).item()\n# graph.create_format_()\n\ntrain_idx = train_idx.to(device)\nval_idx = val_idx.to(device)\ntest_idx = test_idx.to(device)\nlabels = labels.to(device)\ngraph = graph.to(device)\n\n# %% Run the model\nval_accs = []\ntest_accs = []\n\n# define model and optimizer\nmodel = gen_model(args)\nmodel = model.to(device)\n\noptimizer = optim.RMSprop(model.parameters(), lr=args.lr, weight_decay=args.wd)\n\n# training loop\ntotal_time = 0\nbest_val_acc, best_test_acc, best_val_loss = 0, 0, float(\"inf\")\n\n#save accuracy and loss values\naccs, train_accs, val_accs, test_accs = [], [], [], []\nlosses, train_losses, val_losses, test_losses = [], [], [], []\n\nfor epoch in range(1, args.n_epochs + 1):\n #print(\"Starting Epoch \", epoch)\n \n tic = time.time()\n\n adjust_learning_rate(optimizer, args.lr, epoch)\n \n loss, pred = train(model, graph, labels, train_idx, optimizer, args.use_labels)\n acc = compute_acc(pred[train_idx], labels[train_idx], evaluator)\n\n train_acc, val_acc, test_acc, train_loss, val_loss, test_loss = evaluate(\n model, graph, labels, train_idx, val_idx, test_idx, args.use_labels, evaluator\n )\n \n toc = time.time()\n total_time += toc - tic\n \n #print(\"Epoch run-time \", toc-tic)\n\n if val_loss < best_val_loss:\n best_val_loss = val_loss\n best_val_acc = val_acc\n best_test_acc = test_acc\n\n if epoch % args.log_every == 0:\n print(f\"\\nEpoch: {epoch}/{args.n_epochs}\")\n print(\n f\"Loss: {loss.item():.4f}, Acc: {acc:.4f}\\n\"\n f\"Train/Val/Test loss: {train_loss:.4f}/{val_loss:.4f}/{test_loss:.4f}\\n\"\n f\"Train/Val/Test/Best val/Best test acc: {train_acc:.4f}/{val_acc:.4f}/{test_acc:.4f}/{best_val_acc:.4f}/{best_test_acc:.4f}\\n\"\n )\n\n for l, e in zip(\n [accs, train_accs, val_accs, test_accs, losses, train_losses, val_losses, test_losses],\n [acc, train_acc, val_acc, test_acc, loss.item(), train_loss, val_loss, test_loss],\n ):\n l.append(e)\n\n# %% Printouts\n\nprint(\"*\" * 50)\nprint(f\"Average epoch time: {total_time / args.n_epochs}\")\nprint(f\"Total Time: {total_time}\")\nprint(f\"Test acc: {best_test_acc}\")\nprint()\nprint(\"Val Accs:\", best_val_acc)\nprint(\"Test Accs:\", best_test_acc)\nprint(f\"Number of params: {count_parameters(args)}\")\n\n\n# %% Generate plots of accuracy and loss vs epochs\n\nfig = plt.figure(figsize=(15, 12))\nax = fig.gca()\nax.tick_params(labelright=True)\nfor y, label in zip([train_accs, val_accs, test_accs], [\"train acc\", \"val acc\", \"test acc\"]):\n plt.plot(range(args.n_epochs), y, label=label)\nax.legend(prop={'size': 20})\nax.tick_params(axis='both', labelsize = 20)\nplt.title(\"Accuracy vs Epochs\", fontsize=30)\nplt.ylabel('Accuracy', fontsize=20)\nplt.xlabel('Epochs', fontsize=20)\nplt.grid(which=\"major\", color=\"silver\", linestyle=\"dotted\")\nplt.grid(which=\"minor\", color=\"silver\", linestyle=\"dotted\")\n#plt.tight_layout()\nplt.savefig(foldername + subfolder + \"/accuracy.png\", bbox_inches='tight')\nplt.show()\n\nfig = plt.figure(figsize=(15, 12))\nax = fig.gca()\nax.tick_params(labelright=True)\nfor y, label in zip([train_losses, val_losses, test_losses], \n [\"train loss\", \"val loss\", \"test loss\"]):\n plt.plot(range(args.n_epochs), y, label=label)\nax.legend(prop={'size': 20})\nax.tick_params(axis='both', labelsize = 20)\nplt.title(\"Loss vs Epochs\", fontsize=30)\nplt.ylabel('Loss', fontsize=20)\nplt.xlabel('Epochs', fontsize=20)\nplt.grid(which=\"major\", color=\"silver\", linestyle=\"dotted\")\nplt.grid(which=\"minor\", color=\"silver\", linestyle=\"dotted\")\n#plt.tight_layout()\nplt.savefig(foldername + subfolder + \"/loss.png\", bbox_inches='tight')\nplt.show()\n\n# %% Generate histogram of predicted labels\n\ncategory_names = [\"cs.AI\", \"cs.AR\", \"cs.CC\", \"cs.CE\", \"cs.CG\", \"cs.CL\", \"cs.CR\", \"cs.CV\", \"cs.CY\",\n \"cs.DB\", \"cs.DC\", \"cs.DL\", \"cs.DM\", \"cs.DS\", \"cs.ET\", \"cs.FL\", \"cs.GL\", \"cs.GR\",\n \"cs.GT\", \"cs.HC\", \"cs.IR\", \"cs.IT\", \"cs.LG\", \"cs.LO\", \"cs.MA\", \"cs.MM\", \"cs.MS\",\n \"cs.NA\", \"cs.NE\", \"cs.NI\", \"cs.OH\", \"cs.OS\", \"cs.PF\", \"cs.PL\", \"cs.RO\", \"cs.SC\",\n \"cs.SD\", \"cs.SE\", \"cs.SI\", \"cs.SY\"]\n\n# Get predicted categories\nfeat = graph.ndata[\"feat\"]\npred = model(graph, feat)\npred = pred.argmax(dim=-1, keepdim=True)\n\n# Split predicted cateogories by train, validate and test sets\ntrain_pred = th.flatten(pred[train_idx]).numpy()\nval_pred = th.flatten(pred[val_idx]).numpy()\ntest_pred = th.flatten(pred[test_idx]).numpy()\n\n# Get the ground truth labels for train set for sorting order later\ntrain_labels = th.flatten(labels[train_idx]).numpy()\n\ntrue_train_freq, train_freq, val_freq, test_freq = [], [], [], []\n\nfor i in range(n_classes):\n true_train_freq.append(np.count_nonzero(train_labels==i))\n train_freq.append(np.count_nonzero(train_pred==i))\n val_freq.append(np.count_nonzero(val_pred==i))\n test_freq.append(np.count_nonzero(test_pred==i))\n\ntrain_freq, val_freq, test_freq = np.array(train_freq), np.array(val_freq), np.array(test_freq)\n\n# Plot histogram in alphebetical order of paper categories\nfig, ax = plt.subplots(figsize=(15, 8))\nax.bar(category_names, train_freq, color = 'tab:blue')\nax.bar(category_names, val_freq, bottom = train_freq, color = 'tab:purple')\nax.bar(category_names, test_freq, bottom = (val_freq + train_freq), color = 'tab:red')\nax.legend(labels=['Train', 'Validate', 'Test'], prop={'size': 15})\nplt.setp(ax.get_xticklabels(), rotation = 90, horizontalalignment = 'center')\nax.tick_params(axis='both', labelsize = 13)\nplt.title(\"Distribution of Predicted Paper Categories\", fontdict={'fontsize':25})\nplt.ylabel('Frequency', fontdict={'fontsize':15})\nplt.savefig(foldername + subfolder + \"/pred_class_histogram.png\",bbox_inches='tight')\nplt.show()\n\n# Plot histogram in frequency order of ground truth paper categories for training set\n\nordering = np.argsort(np.array(true_train_freq))\nsorted_train_freq = train_freq[ordering] \nsorted_val_freq = val_freq[ordering]\nsorted_test_freq = test_freq[ordering]\nsorted_names = []\nfor i in ordering:\n sorted_names.append(category_names[i])\n\nfig, ax = plt.subplots(figsize=(15, 8))\nax.bar(sorted_names, sorted_train_freq, color = 'tab:blue')\nax.bar(sorted_names, sorted_val_freq, bottom = sorted_train_freq, color = 'tab:purple')\nax.bar(sorted_names, sorted_test_freq, bottom = (sorted_val_freq + sorted_train_freq), color = 'tab:red')\nax.legend(labels=['Train', 'Validate', 'Test'], prop={'size': 15})\nplt.setp(ax.get_xticklabels(), rotation = 90, horizontalalignment = 'center')\nax.tick_params(axis='both', labelsize = 13)\nplt.title(\"Distribution of Predicted Paper Categories\", fontdict={'fontsize':25})\nplt.ylabel('Frequency', fontdict={'fontsize':15})\nplt.savefig(foldername + subfolder + \"/pred_class_histogram_sorted.png\",bbox_inches='tight')\nplt.show()\n\n# %% Save the data\n\ndict = {'predicted':pred, 'args':args, 'num_params': count_parameters(args),\n 'accs':accs, 'train_accs':train_accs, 'val_accs':val_accs,'test_accs':test_accs, \n 'losses':losses, 'train_losses':train_losses, 'val_losses':val_losses, 'test_losses':test_losses}\nio.savemat(foldername + subfolder + \"/model_results.mat\", dict)\n\n#Info on saving/loading models: https://pytorch.org/tutorials/beginner/saving_loading_models.html\n\n#Save model state only to make predictions\nth.save(model.state_dict(), foldername + subfolder + \"/model_stateonly.pth\")\n\n#Save entire model and optimizer state so we can load and keep training\nth.save({\n 'epoch': epoch,\n 'args':args,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'accs':accs, 'train_accs':train_accs, 'val_accs':val_accs,'test_accs':test_accs, \n 'losses':losses, 'train_losses':train_losses, 'val_losses':val_losses, 'test_losses':test_losses\n }, foldername + subfolder + \"/checkpoint.pth\")\n\n# %% To load the model we would do:\n\n# #Get args and also unpack everything else\n# checkpoint = torch.load(foldername + \"/checkpoint.pth\")\n# args = checkpoint['args']\n# starting_epoch = checkpoint['epoch']\n# accs = checkpoint['accs']\n# train_accs, val_accs, test_accs = checkpoint['train_accs'], checkpoint['val_accs'], checkpoint['test_accs']\n# losses = checkpoint['losses']\n# train_losses, val_losses, test_losses = checkpoint['train_losses'], checkpoint['val_losses'], checkpoint['test_losses']\n\n# #Re-initialize the model and the optimizer\n# model = gen_model(args)\n# model = model.to(device)\n# optimizer = optim.RMSprop(model.parameters(), lr=args.lr, weight_decay=args.wd)\n\n# #Load the states saved in the checkpoint\n# model.load_state_dict(checkpoint['model_state_dict'])\n# optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n# %% Plot the contigency matrices\n\n# Function for plotting the confusion matrix. Borrowed from ECE 219 project 2\ndef plot_mat(mat, xticklabels = None, yticklabels = None, pic_fname = None, size=(-1,-1), if_show_values = True, \n num_decimals = 0, colorbar = True, grid = 'k', xlabel = None, ylabel = None, title = None, \n vmin=None, vmax=None, fontsize = {'title':15, 'axislabel': 15, 'small': 10}):\n if size == (-1, -1):\n size = (mat.shape[1] / 3, mat.shape[0] / 3)\n\n fig = plt.figure(figsize=size)\n ax = fig.add_subplot(1,1,1)\n\n # im = ax.imshow(mat, cmap=plt.cm.Blues)\n im = ax.pcolor(mat, cmap=plt.cm.Blues, linestyle='-', linewidth=0.5, edgecolor=grid, vmin=vmin, vmax=vmax)\n \n if colorbar:\n cbar = plt.colorbar(im, aspect = 30) #fraction=0.046, pad=0.07)\n cbar.ax.tick_params(labelsize=fontsize['axislabel']) \n # tick_marks = np.arange(len(classes))\n # Ticks\n lda_num_topics = mat.shape[0]\n nmf_num_topics = mat.shape[1]\n yticks = np.arange(lda_num_topics)\n xticks = np.arange(nmf_num_topics)\n ax.set_xticks(xticks + 0.5)\n ax.set_yticks(yticks + 0.5)\n if xticklabels:\n ax.tick_params(axis='x', labelrotation = 90)\n if xticklabels is None:\n xticklabels = [str(i) for i in xticks]\n if yticklabels is None:\n yticklabels = [str(i) for i in yticks]\n ax.set_xticklabels(xticklabels, fontsize = fontsize['small'])\n ax.set_yticklabels(yticklabels, fontsize = fontsize['small'])\n\n # Minor ticks\n # ax.set_xticks(xticks, minor=True);\n # ax.set_yticks(yticks, minor=True);\n # ax.set_xticklabels([], minor=True)\n # ax.set_yticklabels([], minor=True)\n\n # ax.grid(which='minor', color='k', linestyle='-', linewidth=0.5)\n\n # tick labels on left, right and bottom\n ax.tick_params(labelright = True, labeltop = False)\n\n if ylabel:\n plt.ylabel(ylabel, fontsize=fontsize['axislabel'])\n if xlabel:\n plt.xlabel(xlabel, fontsize=fontsize['axislabel'])\n if title:\n plt.title(title, fontsize=fontsize['title'])\n\n # im = ax.imshow(mat, interpolation='nearest', cmap=plt.cm.Blues)\n ax.invert_yaxis()\n\n # thresh = mat.max() / 2\n\n def show_values(pc, fmt=\"%.\" + str(num_decimals) + \"f\", **kw):\n pc.update_scalarmappable()\n ax = pc.axes\n for p, color, value in itertools.zip_longest(pc.get_paths(), pc.get_facecolors(), pc.get_array()):\n x, y = p.vertices[:-2, :].mean(0)\n if np.all(color[:3] > 0.5):\n color = (0.0, 0.0, 0.0)\n else:\n color = (1.0, 1.0, 1.0)\n ax.text(x, y, fmt % value, ha=\"center\", va=\"center\", color=color, **kw, fontsize=fontsize['small'])\n if if_show_values:\n show_values(im)\n # for i, j in itertools.product(range(mat.shape[0]), range(mat.shape[1])):\n # ax.text(j, i, \"{:.2f}\".format(mat[i, j]), fontsize = 4,\n # horizontalalignment=\"center\",\n # color=\"white\" if mat[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n if pic_fname:\n plt.savefig(pic_fname, dpi=200, facecolor='w', bbox_inches='tight')\n plt.show()\n\n#sklearn documentation: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html\n#We normalize against the true labels, so each matrix entry is divided by its row sum\n\n# Get the ground truth labels\n# From the histogram plot, the predicted labels are already in train_pred, val_pred, and test_pred\ntrain_labels = th.flatten(labels[train_idx]).numpy()\nval_labels = th.flatten(labels[val_idx]).numpy()\ntest_labels = th.flatten(labels[test_idx]).numpy()\n\n# Generate the contingency matrix for training set\ntrain_matrix = metrics.confusion_matrix(train_labels, train_pred, normalize='true')\npic_fname = foldername + subfolder + \"/train_matrix.png\"\nplot_mat(train_matrix,xlabel='Cluster Class', ylabel='Actual Class', title='Normalized Confusion Matrix', num_decimals = 2,\n xticklabels = category_names, yticklabels = category_names,\n size=(35,30), fontsize = {'title':35, 'axislabel':25, 'small':15}, pic_fname = pic_fname)\n\n# Generate the contingency matrix for valation set\nval_matrix = metrics.confusion_matrix(val_labels, val_pred, normalize='true')\npic_fname = foldername + subfolder + \"/val_matrix.png\"\nplot_mat(val_matrix, xlabel='Cluster Class', ylabel='Actual Class', title='Normalized Confusion Matrix', num_decimals = 2,\n xticklabels = category_names, yticklabels = category_names,\n size=(35,30), fontsize = {'title':35, 'axislabel':25, 'small':15}, pic_fname = pic_fname)\n\n# Generate the contingency matrix for test set\ntest_matrix = metrics.confusion_matrix(test_labels, test_pred, normalize='true')\npic_fname = foldername + subfolder + \"/test_matrix.png\"\nplot_mat(train_matrix, xlabel='Cluster Class', ylabel='Actual Class', title='Normalized Confusion Matrix', num_decimals = 2,\n xticklabels = category_names, yticklabels = category_names,\n size=(35,30), fontsize = {'title':35, 'axislabel':25, 'small':15}, pic_fname = pic_fname)","sub_path":"Attentional Graph Models/genie_path.py","file_name":"genie_path.py","file_ext":"py","file_size_in_byte":18920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"245703667","text":"#!/usr/local/bin/python\n\n\"\"\" assert_first_csv_row.py: \n\t\tAssert the first entry of a specified column in a csv file against a provided value. \n\"\"\"\n\nimport pandas as pd\nimport argparse\n\ndef assert_first_csv_row(filename, field, value):\n\tdf = pd.read_csv(filename, dtype=str, keep_default_na=False)\n\n\tif field not in df:\n\t\traise Exception('CSV does not contain field: %s (%s)'%(field, filename))\n\n\tif not len(df):\n\t\traise Exception('No rows found in %s'%filename)\n\n\tif not df[field][0] == value:\n\t\traise Exception(\"First entry in %s field mismatch \\nGot: %s \\nExpected: %s\" % (field, df[field][0], value))\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser(description=\"\"\"\n\t\tAssert the first entry of a specified column in a csv file against a provided value.\n\t\texitcode 1 on failure and 0 otherwise\n\t\"\"\")\n\tparser.add_argument('filename', help='path to csv')\n\tparser.add_argument('field', help='name of column to check')\n\tparser.add_argument('value', help='what the column should read')\n\n\targs = parser.parse_args()\n\n\tassert_first_csv_row(**vars(args))\n","sub_path":"tests/utils/assert_first_csv_row.py","file_name":"assert_first_csv_row.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"599957845","text":"import os\nimport subprocess\nfrom os import chdir\nfrom os.path import exists\n\n# 실행 예시:\n# python txt_to_hwp.py --> 받아올 텍스트파일입력(txt) --> 결과물 저장할파일입력(txt)\n\n# 이 파이썬 코드는 텍스트 형식의 내용을 받아 속기사가 작성하는 회의록 형식의 한글파일로 만들어주는 코드입니다.\n# 기존의 한글변환 자바코드가 있기때문에, 그 코드를 명령어 방식으로 파이썬안에서 실행시켰습니다.(명령어를 이용해 자바의 jar파일을 실행시키는 방식)\n# 명령어를 실행시키기 위해 os와 subprocess라는 라이브러리를 사용했습니다.\n# 주석은 해당하는 코드 밑에 작성했습니다.\n# 순서\n# 1: 초기 디렉토리 설정\n# 2: jar파일을 실행하는 함수 정의(execute_jar)\n# 2.1: 텍스트파일과 결과파일 입력 및 예외처리\n# 2.2: 명령어를 통한 jar 파일 실행(중요)\n# 3: 실행\n\n\n# 1)\nmodified_osgetcwd = os.getcwd().replace(\"\\\\\", \"/\")\n# 현재 디렉토리를 사용하기 위해 변수에 저장\n\njava_src_dir = \"test_hwp\"\njar_src_dir = \"test_hwp/out/artifacts/test_jar/test.jar\"\n# jar파일을 실행시키기 위한 디렉토리, 명령어로 java 실행을 위해서는 디렉토리가 필요\n\ndef move_dir():\n if exists(java_src_dir):\n chdir(java_src_dir)\n # 디렉토리 이동을 위한 함수\n\n# 2)\ndef execute_jar(java_file):\n # 2.1)\n print(\"Enter the source file name\")\n source_name = input()\n\n try:\n f = open(source_name, 'rt', encoding='utf-8')\n # input으로 받은 텍스트가 실제로 존재하고 열리는 파일인지 확인\n except FileNotFoundError:\n print(\"Cannot find the file\")\n return\n\n print(\"Enter the name of the result file\")\n result_name = input()\n\n # 2.2)\n subprocess.check_call(['java', \"-jar\", \"-Dfile.encoding=UTF-8\", java_file, source_name, result_name])\n # 명령어로 jar파일 실행\n # \"java -jar 실행하는_자바파일\"의 형식\n # 여기서 \"-Dfile.encoding=UTF-8\"은 한글을 깨지지않게 인코딩하는 실행옵션\n # source_name과 result_name을 뒤에 붙혀주어 실행시 자바 main함수의 인자로 넘겨줌\n\n# 3)\nmove_dir()\nexecute_jar(modified_osgetcwd + \"/\" + jar_src_dir)","sub_path":"txt_to_hwp.py","file_name":"txt_to_hwp.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"545908430","text":"import sys\nimport pandas as pd\nimport module\nfrom sklearn import preprocessing\nimport numpy as np\n\ndef findMax(series):\n max = series[0]\n for i in range(len(series)-1):\n if(series[i+1] > max):\n max = series[i+1]\n\n #print(max)\n return max\n\ndef findMin(series):\n min = series[0]\n for i in range(len(series)-1):\n if(series[i+1] < min):\n min = series[i+1]\n\n #print(min)\n return min\n \ndef normalization(d):\n \n #print(type(d.X))\n \n cols = d.X.shape[1]\n #print(cols)\n \n for c in range(cols):\n series = d.X.iloc[:,c]\n max = findMax(series)\n min = findMin(series)\n for i in range(len(series)):\n d.X.iloc[i,c] = ((d.X.iloc[i,c]) - min)/ (max - min)\n #print(d.X.iloc[i,c])\n\ndef encode(file):\n \n categorical_cols=[]\n for column_name in file.columns:\n if file[column_name].dtype == object:\n categorical_cols.append(column_name) \n \n file=pd.get_dummies(file,columns=categorical_cols)\n \n return file\n\ndef labelEncode(file):\n \n for column_name in file.columns:\n if file[column_name].dtype == object:\n labelEncoder = preprocessing.LabelEncoder()\n labelEncoder.fit_transform(file[column_name])\n \n #print('inside label encode')\n return file\n \ndef preprocess(m,d):\n \n if m.dataset in d.filenames: # built-in files\n data=d.filenames[m.dataset]()\n file = d.convertToDataFrame(data)\n \n else: # user-given files\n file=pd.read_csv(m.dataset+\".csv\")\n \n \n #print(file)\n n=len(file.columns)\n d.setX(labelEncode(file))\n \n m.setObservations(file.shape[0])\n m.setFeatures(file.shape[1])\n \n \n \ndef setValue(m,arg,j,d):\n \n if arg== \"-d\":\n m.setDataset(sys.argv[j+1])\n preprocess(m,d) \n elif arg==\"-k\":\n m.setK(int(sys.argv[j+1]))\n \n elif arg== \"-en\":\n m.setEpochNumber(sys.argv[j+1])\n \n elif arg==\"-normalize\":\n m.setNormalize(sys.argv[j+1])\n \n else:\n print(arg)\n print(\"Wrong argument given. The accepted flags are: -d, -k, -en, -normalize.\");\n\ndef findMinArg(series):\n min = series[0]\n minArg = 0\n for i in range(len(series)-1):\n if(series[i+1] < min):\n min = series[i+1]\n minArg = i+1\n return minArg\n\ndef squaredSum (a, b, m):\n sum = 0\n for i in range(m.features):\n sum += (a[i] - b[i])**2\n return sum\n\ndef euclidean(a, b, m):\n temp = squaredSum(a, b, m)\n return temp**0.5\n\ndef clusterAssignment(m, d):\n m.clusterAssign.clear()\n for j in range(m.observations):\n series = []\n for i in range(m.K): \n series.append(m.distance[i][j])\n selectCluster = findMinArg(series)\n m.clusterAssign.append(selectCluster)\n\ndef SSECalculation(m,d):\n sse = 0\n for i in range(m.observations):\n sse += squaredSum(d.X.iloc[i], m.centroid[m.clusterAssign[i]], m)\n #print(sse)\n return sse\n\ndef findMean(series, m):\n point = []\n for j in range(m.features):\n sum = 0\n for i in range(len(series)):\n #print( i, j)\n sum += series[i][j]\n \n if len(series) != 0:\n point.append(sum/len(series))\n \n return point\n \ndef centroidUpdate(m, d):\n m.centroid.clear()\n for i in range(m.K):\n series = []\n for j in range(m.observations):\n if m.clusterAssign[j] == i:\n series.append(d.X.iloc[j].values.tolist())\n \n print(series)\n m.centroid.append(findMean(series, m))\n\ndef equalCentroid(previous, current):\n flag = True\n for i in range(m.K):\n for j in range(m.features):\n if current[i][j] <= previous[i][j] + m.tol and current[i][j] >= previous[i][j] - m.tol:\n continue\n else:\n flag = False\n break\n \n return flag\n \ndef clusterCalculation(m, d):\n \n en = 1\n dist = []\n while(True):\n if(en > m.epochNumber):\n break\n \n print(\"Epoch no.: \", en)\n m.distance.clear()\n for i in range(len(m.centroid)):\n dist = []\n for j in range(m.observations): \n dist.append(euclidean(m.centroid[i],d.X.iloc[j].values.tolist(), m))\n #print(dist)\n m.distance.append(dist)\n \n print(\"Distance Matrix ... \")\n print(m.distance)\n \n \n clusterAssignment(m, d)\n print(\"Cluster Assignment ... \")\n print(m.clusterAssign)\n \n \n print(\"Sum of Squared Distance\", SSECalculation(m,d))\n \n # keep previous centroid for checking\n previousCentroid = m.centroid.copy()\n centroidUpdate(m,d)\n \n print(\"Updated Centroids ... \")\n print(m.centroid)\n \n #print(previousCentroid)\n #print(m.centroid)\n flag = equalCentroid(previousCentroid, m.centroid)\n \n if flag == True:\n break\n \n en = en+1 #increasing epoch\n \ndef printCluster(m, d):\n for i in range(m.K):\n print(\"Cluster # \", i+1)\n for j in range(m.observations):\n if m.clusterAssign[j] == i:\n print(\"Point # \", j , \": \" ,d.X.iloc[j].values.tolist())\n \n \nif __name__==\"__main__\":\n \n print(\"Program name: \",sys.argv[0].replace('py',''))\n print(\"Program name(with type): \",sys.argv[0])\n print(\"Element number of program: \",len(sys.argv))\n print(\"Argument list:\", sys.argv)\n NumofParam= len(sys.argv)\n #print(\"Num of Params= \",NumofParam)\n list=sys.argv\n \n \n m=module.KMeans()\n d=module.Dataset()\n preprocess(m,d)\n \n for i in range(1,NumofParam,2):\n argument=list[i].replace(' ','')\n setValue(m,argument,i,d)\n \n print(\"------------------------\")\n print(\"Parameters Description\")\n print(\"------------------------\")\n print(\"Dataset: \",m.dataset)\n print(\"K: \", m.K)\n print(\"Number of Observations: \", m.observations)\n print(\"Number of Features: \", m.features)\n print(\"Epoch Number: \",m.epochNumber)\n print(\"Normalization: \", m.normalize)\n \n print(\"------------------------\")\n \n #print(m.normalize)\n if m.normalize == \"true\":\n normalization(d)\n \n print(\"After encoding and Normalization - X\")\n print(d.X)\n \n m.initialize(d)\n print(\"Initial Centroids: \", m.centroid)\n \n \n \n \n clusterCalculation(m,d)\n \n print(\"Final Cluster Formation ... \")\n printCluster(m, d)\n \n \n \n","sub_path":"kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":6724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"85079901","text":"import numpy as np\nimport os\n\nfrom PIL import Image\nfrom skimage.transform import resize\nfrom skimage import io\nfrom tensorflow.keras.applications.imagenet_utils import preprocess_input\n\nfrom config import VOC_CLASSES, bbox_util, model\nfrom utils import get_color\n\n\ndef detection_cast(detections):\n \"\"\"Helper to cast any array to detections numpy array.\n Even empty.\n \"\"\"\n return np.array(detections, dtype=np.int32).reshape((-1, 5))\n\n\ndef rectangle(shape, ll, rr, line_width=5):\n \"\"\"Draw rectangle on numpy array.\n\n rr, cc = rectangle(frame.shape, (ymin, xmin), (ymax, xmax))\n frame[rr, cc] = [0, 255, 0] # Draw green bbox\n \"\"\"\n ll = np.minimum(np.array(shape[:2], dtype=np.int32) - 1, np.maximum(ll, 0))\n rr = np.minimum(np.array(shape[:2], dtype=np.int32) - 1, np.maximum(rr, 0))\n result = []\n\n for c in range(line_width):\n for i in range(ll[0] + c, rr[0] - c + 1):\n result.append((i, ll[1] + c))\n result.append((i, rr[1] - c))\n for j in range(ll[1] + c + 1, rr[1] - c):\n result.append((ll[0] + c, j))\n result.append((rr[0] - c, j))\n\n return tuple(zip(*result))\n\n\ndef extract_detections(frame, min_confidence=0.6, labels=None):\n \"\"\"Extract detections from frame.\n\n frame: numpy array WxHx3\n returns: numpy int array Cx5 [[label_id, xmin, ymin, xmax, ymax]]\n \"\"\"\n # Write code here\n # First, convert frame to float and resize to 300x300\n \n shape = frame.shape\n \n frame = preprocess_input(resize(frame, (300, 300)) * 255)\n \n # Then use preprocess_input, model.predict and bbox_util.detection_out\n # Use help(...) function to help\n \n prediction = model.predict(frame[None, ...])\n \n results = bbox_util.detection_out(prediction, confidence_threshold=min_confidence)[0]\n\n # Select detections with confidence > min_confidence\n\n # If label set is known, use it\n if labels is not None:\n result_labels = results[:, 0].astype(np.int32)\n indeces = [i for i, l in enumerate(result_labels) if VOC_CLASSES[l - 1] in labels]\n results = results[indeces]\n\n # Remove confidence column from result\n \n results = np.asarray(results).reshape((-1, 6))[:, [0, 2, 3, 4, 5]]\n \n # Resize detection coords to input image shape.\n # Didn't you forget to save it before resize?\n \n results[:, 1] *= shape[1]\n results[:, 2] *= shape[0]\n results[:, 3] *= shape[1]\n results[:, 4] *= shape[0]\n\n # Return result\n return detection_cast(results)\n\n\ndef draw_detections(frame, detections):\n \"\"\"Draw detections on frame.\n\n Hint: help(rectangle) would help you.\n Use get_color(label) to select color for detection.\n \"\"\"\n frame = frame.copy()\n\n # Write code here\n\n return frame\n\n\ndef main():\n dirname = os.path.dirname(__file__)\n frame = Image.open(os.path.join(dirname, 'data', 'test.png'))\n frame = np.array(frame)\n\n detections = extract_detections(frame)\n frame = draw_detections(frame, detections)\n\n io.imshow(frame)\n io.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tracking/detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"645273816","text":"from django.urls import path\nfrom .views import ShopListCreateView, ShopDetailView, StreetView, CityView\n\n\nurlpatterns = [\n path('shop', ShopListCreateView.as_view()),\n path('shop/detail/', ShopDetailView.as_view()),\n path('/street', StreetView.as_view()),\n path('city', CityView.as_view()),\n]\n","sub_path":"rest_api/shops/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"448222920","text":"#!/usr/bin/env python\n# ------------------------------------------------------------------------ 79->\n# Author: ${name=Kelcey Damage}\n# Python: 3.5+\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Doc\n# ------------------------------------------------------------------------ 79->\n# Dependancies:\n#\n# Imports\n# ------------------------------------------------------------------------ 79->\n\n# Globals\n# ------------------------------------------------------------------------ 79->\n# Logging\nLOG_LEVEL = 9\nPROFILE = False\n\n# Workers\nSTARTING_PORT = 10000\nTASK_WORKERS = 3 # Worker processes per node (per physical\n # server)\nCACHE_WORKERS = 1\nRESPONSE_TIME = 0.005 # Controls the rate at which tasks are sent\n # to the workers,\n # and in doing so, the size of the queue.\n # Example:\n # 1000 req @0.01 = ~100 tasks per\n # queue\n # 1000 reg @0.001 = ~10 tasks per\n # queue\n # A higher response time increases\n # throughput at the cost of the systems\n # responsiveness.\n\n# Router\nRELAY_LISTEN = '0.0.0.0'\nRELAY_ADDR = '127.0.0.1'\nRELAY_RECV = 19000\nRELAY_SEND = 19001\nRELAY_PUBLISHER = 19300\nCHUNKING = True # Chunking determines if and how much the\n # router breaks up queues in order the\n # better balance worker loads.\n # Example:\n # chunking = 10 will break up all\n # queues int ~ 10 tasks per worker.\n # This will negativly affect response\n # time since it adds delay at the\n # router, and extra network activity.\n # RESPONSE_TIME and CHUNKING should be\n # balanced to get an Optimal throughput\n # and worker load balance.\nCHUNKING_SIZE = 5000\n\n# Cache\nCACHE_LISTEN = '0.0.0.0'\nCACHE_ADDR = '127.0.0.1'\nCACHE_RECV = 19002\nCACHE_PATH = '/tmp/transport'\nCACHE_MAP_SIZE = 512*1024**2 # 512NB for embedded systems\n\nCACHED = False\n\n\n# Classes\n# ------------------------------------------------------------------------ 79->\n\n# Functions\n# ------------------------------------------------------------------------ 79->\n\n# Main\n# ------------------------------------------------------------------------ 79->\n","sub_path":"transport/conf/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"25262349","text":"from typing import Tuple, Union\nfrom ursina import *\n\nCo = Union[Tuple[int, int, int], Tuple[int, int]]\n\n\nclass CellControllException(Exception):\n pass\n\n\nclass CellController(Entity):\n \"\"\" 유의: 외부에서 cell이라고 부르는 숫자를 여기서는 number라고 부른다. \"\"\"\n\n def __init__(self, finite_space_size=101):\n super().__init__()\n self.space_size = finite_space_size\n self.cell_scale = 0.5\n self.cell_cubic_thickness = 1.15\n self.scale = finite_space_size * self.cell_scale\n self.cubic(self, color.white10, thickness=1.5, segments=0)\n self.field = {}\n\n def __call__(self, space_dict):\n \"\"\"\n 좌표,number쌍이 들어있는 delta_dict를 입력받아서 세포들을 전개시킨다.\n 0=delete\n 1=blue_cell\n 2=red_cell\n \"\"\"\n for co, number in space_dict.items():\n co = self.co_convert(co)\n if not number:\n destroy(self.field[co])\n del self.field[co]\n elif number == 1:\n self.blue_cell(co)\n elif number == 2:\n self.red_cell(co)\n else:\n CellControllException(\"세포값이 잘못되었습니다\")\n\n def red_cell(self, co):\n cell = Entity(\n model=\"sphere\",\n texture=load_texture(\"source/red.jpg\"),\n scale=self.cell_scale,\n position=co,\n )\n self.fixed_cubic(cell, color.hex(\"aefff1\"), self.cell_cubic_thickness)\n cell.rotation_z = -90\n cell.update = self.cell_moving(cell, creating=True)\n\n def default_moving():\n cell.update = self.cell_moving(cell)\n\n invoke(default_moving, delay=3)\n self.field[co] = cell\n\n def blue_cell(self, co):\n cell = Entity(\n model=\"sphere\",\n texture=load_texture(\"source/blue.jpg\"),\n scale=self.cell_scale,\n position=co,\n )\n self.fixed_cubic(cell, color.hex(\"aef4ff\"), self.cell_cubic_thickness)\n cell.rotation_z = -90\n cell.update = self.cell_moving(cell, creating=True)\n\n def default_moving():\n cell.update = self.cell_moving(cell)\n\n invoke(default_moving, delay=3)\n self.field[co] = cell\n\n def cell_moving(self, cell, creating=False):\n if creating:\n\n def func():\n cell.rotation_y += time.dt * 3000\n\n else:\n\n def func():\n cell.rotation_y += time.dt * 220\n\n return func\n\n def fixed_cubic(self, parent, color, thickness, segments=5):\n outline = lambda co, deg: Entity(\n parent=scene,\n model=Quad(segments=segments, mode=\"line\", thickness=thickness),\n color=color,\n position=co,\n rotation=deg,\n scale=parent.scale,\n )\n {\n \"front\": outline((parent.x, parent.y, parent.z + -parent.scale.z * 0.5), (0, 0, 0)),\n \"back\": outline((parent.x, parent.y, parent.z + parent.scale.z * 0.5), (0, 0, 0)),\n \"right\": outline((parent.x + parent.scale.x * 0.5, parent.y, parent.z), (0, 90, 0)),\n \"left\": outline((parent.x + -parent.scale.x * 0.5, parent.y, parent.z), (0, 90, 0)),\n \"top\": outline((parent.x, parent.y + parent.scale.y * 0.5, parent.z), (90, 0, 0)),\n \"bottom\": outline((parent.x, parent.y + -parent.scale.y * 0.5, parent.z), (90, 0, 0)),\n }\n\n def cubic(self, parent, color, thickness, segments=5):\n outline = lambda co, deg: Entity(\n parent=parent,\n model=Quad(segments=segments, mode=\"line\", thickness=thickness),\n color=color,\n position=co,\n rotation=deg,\n )\n {\n \"front\": outline((0, 0, -0.5), (0, 0, 0)),\n \"back\": outline((0, 0, 0.5), (0, 0, 0)),\n \"right\": outline((0.5, 0, 0), (0, 90, 0)),\n \"left\": outline((-0.5, 0, 0), (0, 90, 0)),\n \"top\": outline((0, 0.5, 0), (90, 0, 0)),\n \"bottom\": outline((0, -0.5, 0), (90, 0, 0)),\n }\n\n def co_convert(self, co: Co):\n \"\"\"\n 좌표는 입력 전에 반드시 이 함수를 거쳐야 합니다\n\n ---\n grid_scale(한 변의 길이)은 홀수여야 합니다\n ---\n 0,0을 포함하기때문입니다\n\n 반환값: Grid내부 좌표\n \"\"\"\n grid_scale = self.space_size\n convert_yz = lambda x, y, z: tuple(map(lambda x: x * self.cell_scale, (x, z, y)))\n if len(co) == 3:\n co = convert_yz(*co)\n else:\n co = convert_yz(*co, 0)\n threshold, zero = divmod(grid_scale, 2)\n if not zero:\n raise CellControllException(\"grid_scale가 짝수이면 안됩니다!\")\n\n def calculation(value):\n \"\"\" 1차원 수준에서 좌표연산입니다. \"\"\"\n if value < -threshold:\n return threshold - (threshold - value) % grid_scale\n if value > threshold:\n return -threshold + (value + threshold) % grid_scale\n return value\n\n return tuple(calculation(co) for co in co)\n\n\nif __name__ == \"__main__\":\n app = Ursina()\n controller = CellController()\n controller(\n {\n (1, 1): 1,\n (1, 2): 2,\n (2, 1): 1,\n (0, 0): 2,\n }\n )\n EditorCamera()\n app.run()","sub_path":"client/core/artifacts/cell_controll.py","file_name":"cell_controll.py","file_ext":"py","file_size_in_byte":5447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"351019285","text":"\"\"\"\nMigration script to add 'info' column to the task table.\n\"\"\"\nfrom __future__ import print_function\n\nimport logging\n\nfrom sqlalchemy import Column, MetaData, Table\n\nfrom galaxy.model.custom_types import TrimmedString\n\nlog = logging.getLogger(__name__)\nmetadata = MetaData()\n\n\ndef upgrade(migrate_engine):\n metadata.bind = migrate_engine\n print(__doc__)\n metadata.reflect()\n try:\n task_table = Table(\"task\", metadata, autoload=True)\n c = Column(\"info\", TrimmedString(255), nullable=True)\n c.create(task_table)\n assert c is task_table.c.info\n except Exception:\n log.exception(\"Adding info column to task table failed.\")\n\n\ndef downgrade(migrate_engine):\n metadata.bind = migrate_engine\n metadata.reflect()\n try:\n task_table = Table(\"task\", metadata, autoload=True)\n task_table.c.info.drop()\n except Exception:\n log.exception(\"Dropping info column from task table failed.\")\n","sub_path":"lib/galaxy/model/migrate/versions/0085_add_task_info.py","file_name":"0085_add_task_info.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"203629391","text":"# -*- coding: utf-8 -*-\n\"\"\"\n problem:\n https://projecteuler.net/problem=81\n\n preprocessing:\n Read 'p081_matrix.txt' and create the matrix a(m,n).\n\n main:\n Let sol(m,n) be the \"minimal path sum\" in the small matrix\n\n a(0,0), a(0,1), ..., a(0,n)\n a(1,0), a(1,1), ..., a(1,n)\n ...\n a(m,0), a(m,1), ..., a(m,n)\n\n where m, n = 0, 1, 2, ... N.\n\n recursive expression:\n\n sol(m,n) = min{ sol(m-1,n), sol(m,n-1)} + a(m,n)\n m, n = 0, 1, 2, ..., N.\n\n with initial contitions:\n\n sol(0,0) = a(0,0),\n sol(m,-1) = sol(-1,n) = 0.\n\"\"\"\n\nfrom __future__ import print_function\n\n\ndef solve(_input='p081_matrix.txt'):\n \"\"\"\n main function.\n\n Arguments:\n _input: input file.\n \"\"\"\n\n # input validation\n try:\n _input_file = open(_input, 'r')\n except IOError:\n print('[ERROR] failed to open file %s' % (_input))\n return -1\n\n # preprocessing\n # read file & prepare matrix\n _a = []\n for line in _input_file:\n line = line.rstrip('\\n').split(',')\n _line = []\n for _el in line:\n _line.append(int(_el))\n _a.append(_line)\n\n _max = len(_a) - 1\n\n # initialize sol matrix\n sol = []\n for _m in xrange(0, _max + 1, 1):\n _li = []\n for _n in xrange(0, _max + 1, 1):\n _li.append(0)\n sol.append(_li)\n\n # main\n # initial condition\n sol[0][0] = _a[0][0]\n\n # calculate sol[_m][_n] for fixed _m + _n = _npm\n for _mpn in xrange(1, _max + 1, 1):\n\n # _m = 0\n sol[0][_mpn] = sol[0][_mpn - 1] + _a[0][_mpn]\n\n # 0 < _m < _npm\n _m = 1\n while _m < _mpn:\n _n = _mpn - _m\n sol[_m][_n] = min(sol[_m - 1][_n], sol[_m][_n - 1]) + _a[_m][_n]\n _m += 1\n\n # _m = _mpn\n sol[_mpn][0] = sol[_mpn - 1][0] + _a[_mpn][0]\n\n for _mpn in xrange(_max + 1, 2 * _max + 1, 1):\n # _npm - _max <= _m <= _max\n _m = _mpn - _max\n while _m <= _max:\n _n = _mpn - _m\n sol[_m][_n] = min(sol[_m - 1][_n], sol[_m][_n - 1]) + _a[_m][_n]\n _m += 1\n\n return sol[_max][_max]\n\n\n# call\nprint (solve())\n","sub_path":"project-euler/project-euler_0081_path-sum-two-ways/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"277439716","text":"from django.conf.urls import url\nfrom django.contrib import admin\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.views.generic import FormView\n\nfrom enrolment.models import PreVerifiedEnrolment\nfrom enrolment import forms\n\n\nclass GeneratePreVerifiedCompaniesFormView(FormView):\n form_class = forms.GeneratePreVerifiedCompanies\n template_name = 'admin/enrolment/company_csv_upload_form.html'\n success_url = reverse_lazy(\n 'admin:enrolment_preverifiedenrolment_changelist'\n )\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['user'] = self.request.user\n return kwargs\n\n\n@admin.register(PreVerifiedEnrolment)\nclass PreVerifiedEnrolmentAdmin(admin.ModelAdmin):\n search_fields = (\n 'company_number',\n 'email_address',\n 'generated_for',\n 'generated_by',\n )\n list_display = ('company_number', 'email_address', 'generated_for')\n list_filter = ('is_active', 'generated_for')\n\n def get_urls(self):\n urls = super(PreVerifiedEnrolmentAdmin, self).get_urls()\n additional_urls = [\n url(\n r'^pre-verify-companies/$',\n self.admin_site.admin_view(\n GeneratePreVerifiedCompaniesFormView.as_view()\n ),\n name=\"pre-verify-companies\"\n ),\n ]\n return additional_urls + urls\n","sub_path":"enrolment/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"127099994","text":"import os\nimport time\n\ndef login():\n os.system(\"clear\")\n print(\"Bom vindo\")\n nome = str(input(\"Digite o seu login: \"))\n senha = str(input(\"Digite a sua senha: \"))\n validacao(nome, senha)\n\ndef validacao(x, y):\n if x == \"Gustavo\" and y == \"123456\":\n os.system(\"clear\")\n print(\"Usuario e senha corretos\\nBem Vindo !!\")\n time.sleep(2.5)\n os.system(\"python3 menu.py\")\n else:\n print (\"Usuario ou senha incorretos\\n tente novamente!\")\n login()\n\nlogin()","sub_path":"login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"238511768","text":"from django.contrib.auth.models import User\nfrom django.test import TestCase\nfrom shop.models import Category, Company, Dish, Location, UserProfile, Kit, Cart, CartContent\n\n\nclass TestDish(TestCase):\n def setUp(self):\n self.cot = Category.objects.create()\n self.comp = Company.objects.first()\n self.payload = {\n 'title': 'title',\n 'dish_type': 'dish_type',\n 'image': 'img.jpg',\n 'description': 'descriptions',\n 'company': self.comp,\n 'price': 200\n }\n\n def test_dish_create(self):\n dish = Dish.objects.create(**self.payload)\n dish.categories.add(self.cot)\n dish.save()\n self.assertEqual(dish.title, self.payload['title'])\n\n def test_update_dish(self):\n new_title = 'new_title'\n dish = Dish.objects.create(**self.payload)\n dish.categories.add(self.cot)\n dish.title = new_title\n dish.save()\n dish.refresh_from_db()\n self.assertEqual(dish.title, new_title)\n\n def test_dish_delete(self):\n dish = Dish.objects.create(**self.payload)\n dish.categories.add(self.cot)\n pk = dish.pk\n dish.delete()\n with self.assertRaises(Dish.DoesNotExist):\n dish = Dish.objects.get(pk=pk)\n\n\nclass TestCategory(TestCase):\n def test_create_category_success(self):\n payload = {\n 'title': 'test_title',\n 'description': 'test_description'\n }\n category = Category.objects.create(**payload)\n self.assertEqual(category.title, payload['title'])\n\n def test_category_update(self):\n new_title = 'new_title'\n payload = {\n 'title': 'test_title',\n 'description': 'test_description'\n }\n category = Category.objects.create(**payload)\n category.title = new_title\n category.save()\n self.assertEqual(category.title, new_title)\n\n def test_category_delete(self):\n payload = {\n 'title': 'test_title',\n 'description': 'test_description'\n }\n category = Category.objects.create(**payload)\n pk = category.pk\n category.delete()\n with self.assertRaises(Category.DoesNotExist):\n category = Category.objects.get(pk=pk)\n\n\nclass TestCompany(TestCase):\n def test_create(self):\n payload = {\n 'title': 'test_title'\n }\n company = Company.objects.create(**payload)\n self.assertEqual(company.title, payload['title'])\n\n def test_update_company(self):\n\n new_title ='new_test_title'\n payload = {\n 'title': \"test_title\"\n }\n\n category = Category.objects.create(**payload)\n\n category.title = new_title\n category.save()\n self.assertEqual(category.title, new_title)\n\n def test_company_delete(self):\n payload = {\n 'title': 'test_title',\n }\n company = Company.objects.create(**payload)\n pk = company.pk\n company.delete()\n with self.assertRaises(Company.DoesNotExist):\n company = Company.objects.get(pk=pk)\n\n\nclass TestLocation(TestCase):\n def setUp(self):\n self.payload = {\n 'country': 'test_country'\n }\n\n def test_loc_create(self):\n loc = Location.objects.create(**self.payload)\n loc.save()\n self.assertEqual(loc.country, self.payload['country'])\n\n def test_loc_update(self):\n new_country = 'new_country'\n loc = Location.objects.create(**self.payload)\n loc.country = new_country\n loc.save()\n self.assertEqual(loc.country, new_country)\n\n def test_loc_delete(self):\n loc = Location.objects.create(**self.payload)\n pk = loc.pk\n loc.delete()\n with self.assertRaises(Location.DoesNotExist):\n loc = Location.objects.get(pk=pk)\n\n\nclass TestCart(TestCase):\n def setUp(self):\n self.user = User.objects.create_user(username='zefit', password='oladyk123456')\n\n def test_cart_create(self):\n payload = {\n 'user': self.user,\n 'total_cost': 222\n }\n cart = Cart.objects.create(**payload)\n cart.save()\n self.assertEqual(cart.total_cost, 222)\n\n def test_cart_update(self):\n new_cost = 2222\n payload = {\n 'user': self.user,\n 'total_cost': 22\n }\n cart = Cart.objects.create(**payload)\n cart.total_cost = new_cost\n cart.save()\n self.assertEqual(cart.total_cost, new_cost)\n\n def test_cart_delete(self):\n payload = {\n 'user': self.user,\n 'total_cost': 222\n }\n cart = Cart.objects.create(**payload)\n pk = cart.pk\n cart.delete()\n with self.assertRaises(Cart.DoesNotExist):\n cart = Cart.objects.get(pk=pk)\n\n\nclass TestCartContent(TestCase):\n def setUp(self):\n self.category = Category.objects.create()\n self.company = Company.objects.first()\n self.user = User.objects.create_user(username='zefit', password='oladyk123456')\n self.data = {\n 'title': 'test_title',\n 'dish_type': 'test_dish_type',\n 'description': 'test_description',\n 'company': self.company,\n 'price': 222\n }\n self.dish = Dish.objects.create(**self.data)\n self.dish.categories.add(self.category)\n self.dish.save()\n\n self.payload = {\n 'user': self.user,\n 'total_cost': 222\n }\n self.cart = Cart.objects.create(**self.payload)\n\n def test_create(self):\n data = {\n 'cart': self.cart,\n 'product': self.dish,\n 'qty': 222\n }\n cartContent = CartContent.objects.create(**data)\n self.assertEqual(cartContent.qty, 222)\n\n def test_update(self):\n new_qty = 111\n data = {\n 'cart': self.cart,\n 'product': self.dish,\n 'qty': 222\n }\n cartContent = CartContent.objects.create(**data)\n cartContent.qty = new_qty\n self.assertEqual(cartContent.qty, new_qty)\n\n def test_delete(self):\n data = {\n 'cart': self.cart,\n 'product': self.dish,\n 'qty': 222\n }\n cartContent = CartContent.objects.create(**data)\n pk = cartContent.pk\n cartContent.delete()\n with self.assertRaises(CartContent.DoesNotExist):\n cartContent = CartContent.objects.get(pk=pk)\n\n\n","sub_path":"shop/test/test_mod.py","file_name":"test_mod.py","file_ext":"py","file_size_in_byte":6534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"27278360","text":"import h3\nimport Neo4jConnection as Neo4jConnection\nimport PipelineImporter as PipelineImporter\nfrom pyspark.sql.functions import lit\n\n\n# Sets uniqueness constraint for population values\ndef add_constraints():\n Neo4jConnection.connect_to_graph()\n \n Neo4jConnection.query_graph(\n 'CREATE CONSTRAINT population IF NOT EXISTS ON (p:Population) ASSERT p.value IS UNIQUE')\n\n Neo4jConnection.close_connection()\n\n\ndef add_cells(df):\n query = '''\n UNWIND $rows AS row \n MERGE (h:H3Index { h3Index: row.h3Index })\n ON CREATE SET h.resolution = row.resolution\n MERGE (pt:Population { value: row.total_population })\n MERGE (pw:Population { value: row.women })\n MERGE (pm:Population { value: row.men })\n MERGE (pc:Population { value: row.children_under_five })\n MERGE (py:Population { value: row.youth_15_24} )\n MERGE (pe:Population { value: row.elderly_60_plus })\n MERGE (pwr:Population { value: row.women_of_reproductive_age_15_49 })\n MERGE (pt)-[:POPULATION_AT { type: \"total_population\" }]->(h)\n MERGE (pw)-[:POPULATION_AT { type: \"women\" }]->(h)\n MERGE (pm)-[:POPULATION_AT { type: \"men\" }]->(h)\n MERGE (pc)-[:POPULATION_AT { type: \"children_under_five\" }]->(h)\n MERGE (py)-[:POPULATION_AT { type: \"youth_15_24\" }]->(h)\n MERGE (pe)-[:POPULATION_AT { type: \"elderly_60_plus\" }]->(h)\n MERGE (pwr)-[:POPULATION_AT { type: \"women_of_reproductive_age_15_49\" }]->(h)\n '''\n\n df.foreachPartition(lambda partition: Neo4jConnection.batch_insert_data(partition, query))\n\n\ndef import_population_density(limit=None):\n spark = PipelineImporter.connect_to_mongo('population', 'cells')\n df = spark.read.format('com.mongodb.spark.sql.DefaultSource').load().withColumnRenamed('_id', 'h3Index')\n\n if len(df.columns) < 1:\n print('No population data available. You first need to run the population-density processing pipeline before '\n 'loading it into the graph')\n\n return\n \n # noinspection PyUnresolvedReferences\n resolution = h3.h3_get_resolution(df.first()['h3Index'])\n cells = df.select('h3Index', 'population.*').withColumn('resolution', lit(resolution))\n\n if limit is not None:\n cells = cells.limit(limit)\n\n add_constraints()\n add_cells(cells)\n\n","sub_path":"kuwala/core/neo4j/importer/src/PopulationDensityImporter.py","file_name":"PopulationDensityImporter.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"244262329","text":"from swampy.TurtleWorld import *\nimport math\n\nworld = TurtleWorld()\nn = 60\n\ndef square(t, length):\n for i in range(4):\n fd(t, length)\n lt(t)\n\ndef polygon(t, length, n):\n for i in range(n):\n fd(t, length)\n lt(t, 360/n)\n\ndef circle(t, r):\n n = 60\n circumference = math.pi * 2 * r\n polygon(t, circumference/n, n)\n\ndef arc(t, r, angle):\n circumference = math.pi * 2 * r\n steps = int(1.0*angle/360 * n)\n for i in range(steps):\n fd(t, circumference/n)\n lt(t, 360/n)\n\n\nbob = Turtle()\nbob.delay = 0.01\n# square(bob, 200)\n# polygon(bob, 10, 60)\n# circle(bob, 100)\n# arc(bob, 100, 240)\n\nfor i in range(11):\n arc(bob, 200, 40)\n lt(bob,145)\n arc(bob, 200, 40)\n lt(bob,45)\n # c = raw_input()\n\nwait_for_user()\n","sub_path":"thinkpython/exer4.2.py","file_name":"exer4.2.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"401711929","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n#初期化\ndt = 0.05\ndx = 0.1\n\njmax = 21\nnmax = 6\n\n#x,qの初期化\ndef init(q1, q2, dx, jmax):\n x = np.linspace(0, dx * (jmax - 1), jmax)\n q = np.array([(float(q1) if i < 0.5 * jmax else float(q2)) for i in range(jmax)])\n return (x, q)\n\n#それぞれの方法で流束を返す関数を作る\n#一次精度風上法\ndef UPWIND1(q, c, dt, dx, j):\n ur = q[j + 1]\n ul = q[j]\n fr = c * ur\n fl = c * ul\n return 0.5 * (fr + fl - abs(c) * (ur - ul))\n\n#二次精度風上法\ndef UPWIND2(q, c, dt, dx, j):\n ur = 1.5 * q[j + 1] - 0.5 * q[j+2]\n ul = 1.5 * q[j] - 0.5 * q[j-1]\n fr = c * ur\n fl = c * ul\n return 0.5 * (fr + fl - abs(c) * (ur - ul))\n\n#グラフを描く\ndef do_computing(q1, q2, c, dt, dx, nmax, f):\n fig = plt.figure(figsize=(9, 6), dpi=100)\n plt.rcParams[\"font.size\"] = 10\n for k in range(2):\n #qの初期化\n x, q = init(q1, q2, dx, jmax)\n\n #初期分布\n plt.subplots_adjust(wspace=0.4, hspace=0.4)\n\n ax = fig.add_subplot(2, 2, k + 1)\n ax.plot(x, q, marker='o', markersize=1, lw=2, label='n=0')\n\n #各場合の計算\n ff = f[k]\n for n in range(1, nmax + 1):\n qold = q.copy()\n for j in range(k + 1, jmax - (k + 1)):\n ff1 = ff(qold, c, dt, dx, j)\n ff2 = ff(qold, c, dt, dx, j-1)\n q[j] = qold[j] - dt / dx * (ff1 - ff2)\n\n #各ステップのプロット\n if n % 2 == 0:\n ax.plot(x, q, marker='o', markersize=1, lw=1, label=f'n={n}')\n \n #グラフの描画\n ax.grid(color='black', linestyle='dashed', linewidth=0.5)\n plt.xlim([0, jmax * dx])\n plt.xlabel('x')\n plt.ylabel('q')\n ax.legend()\n plt.ylim([-1, 1.1])\n plt.yticks(np.arange(-1, 1.1, 0.2))\n\ndt = 0.05\ndx = 0.1\n\njmax = 21\nnmax = 6\n\nq1 = 1\nq2 = 0\ndo_computing(q1, q2, 1, dt, dx, nmax, [UPWIND1, UPWIND2])\n\nq1 = 0\nq2 = 1\ndo_computing(q1, q2, -1, dt, dx, nmax, [UPWIND1, UPWIND2])\n\nplt.show()","sub_path":"0428/zemi3-2.py","file_name":"zemi3-2.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"296329973","text":"# https://atcoder.jp/contests/abc040/tasks/abc040_c\nimport sys\nsys.setrecursionlimit(2147483647)\nINF=float(\"inf\")\nMOD=10**9+7\n# input=sys.stdin.readline\ndef resolve():\n n=int(input())\n A=list(map(int,input().split()))\n dp=[0]*n\n dp[1]=abs(A[1]-A[0])\n for i in range(2,n):\n dp[i]=min(dp[i-1]+abs(A[i]-A[i-1]),dp[i-2]+abs(A[i]-A[i-2]))\n print(dp[n-1])\nresolve()\n","sub_path":"ABC040/c_柱柱柱柱柱.py","file_name":"c_柱柱柱柱柱.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"491874175","text":"import os\nimport click\n\nfrom aim.engine.aim_profile import AimProfile\n\n\n@click.command()\n@click.option('-a', '--address', required=True, type=str)\ndef auth(address):\n profile = AimProfile()\n\n keys = profile.auth(address)\n public_key = keys['public_key']\n\n click.echo(click.style('Your public key for {}'.format(address),\n fg='yellow'), err=True)\n click.echo(public_key)\n click.echo()\n","sub_path":"aim/cli/auth/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"641222767","text":"import tensorflow as tf\nfrom framework import tfNDT\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom framework.ndtFunc import statModel\nfrom framework import plotsFunc\n\n# from sklearn.datasets import fetch_rcv1\n# rcv1 = fetch_rcv1()\ndataName = 'Reuters'\nmax_word = 2000\n(X_train, Y_train), (X_test, Y_test) = tf.keras.datasets.reuters.load_data(num_words=max_word)\nword_idx = tf.keras.datasets.reuters.get_word_index()\nidx_to_word = dict([(value, key) for (key, value) in word_idx.items()])\nprint(' '.join([idx_to_word.get(x - 3, '?') for x in X_train[0]]))\n\n# ---------------------------------------------------------------------------------\n# plt.figure(2)\n# plt.hist(Y_train, bins=45)\n# label = 4 # binary classification of selected label vs others\n# X_train_selected, Y_train_selected = X_train, tfNDT.ndtFunc.each_label(Y_train, label)\n# X_test_selected, Y_test_selected = X_test, tfNDT.ndtFunc.each_label(Y_test, label)\n# ---------------------------------------------------------------------------------\nX_train_selected, Y_train_selected = X_train, Y_train # multi-classification\nX_test_selected, Y_test_selected = X_test, Y_test\n# ---------------------------------------------------------------------------------\nd_train = tfNDT.DataProcess(X_train_selected, Y_train_selected)\nd_test = tfNDT.DataProcess(X_test_selected, Y_test_selected)\nd_train.sequenceToMatrix(dimension=max_word)\nd_train.oneHotLabel()\nd_test.sequenceToMatrix(dimension=max_word)\nd_test.oneHotLabel()\nruns = 10\n\n\n# ---------------------------------------------------------------------------------\n\n\ndef mainPlot(i):\n plt.figure(i)\n plt.plot(ndt_record.epoch_range, E_tree_test * np.ones((ndt.epochs + 1, 1)), color='k', label='DT')\n plt.plot(ndt_record.epoch_range, ndt_record.tClf_error, marker='o', color='b', label='NDT')\n plt.plot(ndtP_record.epoch_range, ndtP_record.tClf_error, marker='x', color='b', linestyle='--', label='NDT-P')\n plt.plot(nn_record.epoch_range, nn_record.tClf_error, marker='o', color='g', label='NN')\n plt.plot(nnD_record.epoch_range, nnD_record.tClf_error, marker='o', color='r', label='NN-D')\n plt.plot(nnH_record.epoch_range, nnH_record.tClf_error, marker='o', color='c', label='NN-H')\n plt.plot(nn1_record.epoch_range, nn1_record.tClf_error, marker='o', color='m', label='NN-1')\n plt.plot(nn3_record.epoch_range, nn3_record.tClf_error, marker='o', color='y', label='NN-3')\n\n # plt.ylim(0, 10)\n plt.xlabel('Epochs ')\n plt.ylabel('Classification Error in %')\n plt.title(dataName+' Testing')\n plt.legend()\n plt.show()\n plt.savefig('Improved-NDT/main/figure/Reuters_testing.png', format='png', dpi=100)\n\n\ndef avgPlot(i):\n plt.figure(i)\n plt.plot(ndt_avgR.epoch_range, E_tree_test * np.ones((ndt.epochs + 1, 1)), color='k', label='DT')\n plt.plot(ndt_avgR.epoch_range, ndt_avgR.tClf_error, marker='o', color='b', label='NDT')\n plt.plot(nn_avgR.epoch_range, nn_avgR.tClf_error, marker='o', color='g', label='NN')\n plt.plot(nnD_avgR.epoch_range, nnD_avgR.tClf_error, marker='o', color='r', label='NN-D')\n plt.plot(nnH_avgR.epoch_range, nnH_avgR.tClf_error, marker='o', color='c', label='NN-H')\n plt.plot(nn1_avgR.epoch_range, nn1_avgR.tClf_error, marker='o', color='m', label='NN-1')\n plt.plot(nn3_avgR.epoch_range, nn3_avgR.tClf_error, marker='o', color='y', label='NN-3')\n\n # plt.ylim(0, 10)\n plt.xticks(np.arange(ndt.epochs + 1, step=5))\n plt.xlabel('Epochs ')\n plt.ylabel('Classification Error in %')\n plt.title('Reuters Testing')\n plt.legend(loc=1)\n plt.show()\n plt.savefig('Improved-NDT/main/figure/'+dataName+'_testing.png', format='png', dpi=100)\n\n\ndef trainPlot(i):\n plt.figure(i)\n # plt.plot(ndt_record.epoch_range, E_tree_train * np.ones((ndt.epochs + 1, 1)), color='k', label='DT')\n # plt.plot(ndt_record.epoch_range, E_treeP_train * np.ones((ndt.epochs + 1, 1)), color='k', linestyle='--',\n # label='DT-P')\n plt.plot(ndt_record.epoch_range, ndt_record.Cost, marker='o', color='b', label='NDT')\n plt.plot(ndtP_record.epoch_range, ndtP_record.Cost, marker='x', color='b', linestyle='--', label='NDT-P')\n plt.plot(nn_record.epoch_range, nn_record.Cost, marker='o', color='g', label='NN')\n plt.plot(nnD_record.epoch_range, nnD_record.Cost, marker='o', color='r', label='NN-D')\n plt.plot(nnH_record.epoch_range, nnH_record.Cost, marker='o', color='c', label='NN-H')\n plt.plot(nn1_record.epoch_range, nn1_record.Cost, marker='o', color='m', label='NN-1')\n plt.plot(nn3_record.epoch_range, nn3_record.Cost, marker='o', color='y', label='NN-3')\n\n # plt.ylim(0, 10)\n plt.xlabel('Epochs')\n plt.ylabel('L2 cost')\n plt.title('Reuters Training')\n plt.legend()\n plt.show()\n\n\ndef ndtComp(i, datasetName):\n plt.figure(i)\n plt.plot(ndt_record.epoch_range, E_tree_test * np.ones((ndt.epochs + 1, 1)), color='tab:blue', label='DT')\n plt.plot(ndt_record.epoch_range, E_treeH_test * np.ones((ndt.epochs + 1, 1)), color='tab:orange',\n label='DT-P')\n plt.plot(ndt_record.epoch_range, E_tree8_test * np.ones((ndt.epochs + 1, 1)), color='tab:green',\n label='DT-8')\n plt.plot(ndt_record.epoch_range, ndt_record.tClf_error, marker='o', color='tab:blue', label='NDT')\n plt.plot(ndtP_record.epoch_range, ndtP_record.tClf_error, marker='o', color='tab:orange', label='NDT-P')\n plt.plot(ndt8_record.epoch_range, ndt8_record.tClf_error, marker='o', color='tab:green', label='NDT-8')\n\n # plt.ylim(0, 10)\n plt.xlabel('Epochs ')\n plt.ylabel('Classification Error in %')\n plt.title(datasetName+' Testing')\n plt.legend()\n plt.show()\n plt.savefig('Improved-NDT/main/figure/'+datasetName+'_testing_ndtComp.png', format='png', dpi=100)\n\n\n# ---------------------------------------------------------------------------------\n\nndt = tfNDT.NeuralDecisionTreeClassification()\nndt.d_train, ndt.d_test = d_train, d_test\nclf = ndt.treeConfig()\ny_pred = clf.predict(ndt.d_test.X)\ny_pred = y_pred.reshape((len(y_pred), 1))\nE_tree_test = np.mean(np.not_equal(y_pred, ndt.d_test.Y)) * 100\n\ny_pred_ = clf.predict(ndt.d_train.X)\ny_pred_ = y_pred_.reshape((len(y_pred_), 1))\nE_tree_train = np.mean(np.not_equal(y_pred_, ndt.d_train.Y))\n\nndt.activation1 = tfNDT.r1\nndt.learning_rate = 50\nndt.batch_size = 1000\n\nndt, ndt_record = ndt.train()\nndt_avgR = ndt_record\n\n# ---------------------------------------------------------------------------------\n\nndtP = tfNDT.NeuralDecisionTreeClassification()\nndtP.d_train, ndtP.d_test = d_train, d_test\nclfP = ndtP.treeConfig(init_frac=0.5)\nyP_pred = clfP.predict(ndtP.d_test.X)\nyP_pred = yP_pred.reshape((len(yP_pred), 1))\nE_treeH_test = np.mean(np.not_equal(yP_pred, ndtP.d_test.Y)) * 100\n\nndtP.activation1 = tfNDT.r1\nndtP.learning_rate = 50\nndtP.batch_size = 1000\nndtP, ndtP_record = ndtP.train()\n# ---------------------------------------------------------------------------------\n\nndt8 = tfNDT.NeuralDecisionTreeClassification()\nndt8.tree_max_depth = 8\nndt8.d_train, ndt8.d_test = d_train, d_test\nclf8 = ndt8.treeConfig()\ny8_pred = clf8.predict(ndt8.d_test.X)\ny8_pred = y8_pred.reshape((len(y8_pred), 1))\nE_tree8_test = np.mean(np.not_equal(y8_pred, ndt8.d_test.Y)) * 100\n\nndt8.activation1 = tfNDT.r1\nndt8.learning_rate = 50\nndt8.batch_size = 1000\nndt8, ndt8_record = ndt8.train()\n\nndtComp(34, dataName)\n# ---------------------------------------------------------------------------------\n\nnn = tfNDT.NeuralDecisionTreeClassification()\nnn.d_train, nn.d_test = d_train, d_test\nnn.learning_rate = 50\nnn.batch_size = 1000\n\nnn_s, nn_records = [], []\nfor _ in range(runs):\n nn.Wb = ndt.Wb # borrow the shape of Wb\n nn.netConfig(option='randN') # reinitialize Wb using rand normal\n nn, nn_record = nn.train()\n nn_s.append(nn)\n nn_records.append(nn_record)\nnn_avgR = statModel(nn_records)\n\n# ---------------------------------------------------------------------------------\n\nnnD = tfNDT.NeuralDecisionTreeClassification()\nnnD.d_train, nnD.d_test = d_train, d_test\nnnD.learning_rate = 50\nnnD.batch_size = 1000\n\nnnD_s, nnD_records = [], []\nfor _ in range(runs):\n nnD.Wb = ndt.Wb\n nnD.netConfig(option='double')\n nnD, nnD_record = nnD.train()\n nnD_s.append(nnD)\n nnD_records.append(nnD_record)\nnnD_avgR = statModel(nnD_records)\n\n# ---------------------------------------------------------------------------------\n\nnnH = tfNDT.NeuralDecisionTreeClassification()\nnnH.d_train, nnH.d_test = d_train, d_test\nnnH.learning_rate = 50\nnnH.batch_size = 1000\n\nnnH_s, nnH_records = [], []\nfor _ in range(runs):\n nnH.Wb = ndt.Wb\n nnH.netConfig(option='half')\n nnH, nnH_record = nnH.train()\n nnH_s.append(nnH)\n nnH_records.append(nnH_record)\nnnH_avgR = statModel(nnH_records)\n\n# ---------------------------------------------------------------------------------\n\nnum_neurons = 2 * ndt.Wb[0].shape[1] + 1\nnn1 = tfNDT.OneLayersNetworkClassification(num_neurons=num_neurons)\nnn1.d_train, nn1.d_test = d_train, d_test\nnn1.learning_rate = 50\nnn1.batch_size = 1000\n\nnn1_s, nn1_records = [], []\nfor _ in range(runs):\n nn1.netConfig()\n nn1, nn1_record = nn1.train()\n nn1_s.append(nn1)\n nn1_records.append(nn1_record)\nnn1_avgR = statModel(nn1_records)\n\n# ---------------------------------------------------------------------------------\n\nnn3 = tfNDT.ThreeLayersNetworkClassification(\n num_neurons_list=[int(num_neurons / 3), int(num_neurons / 3), num_neurons - 2 * int(num_neurons / 3)])\nnn3.d_train, nn3.d_test = d_train, d_test\nnn3.learning_rate = 50\nnn3.batch_size = 1000\n\nnn3_s, nn3_records = [], []\nfor _ in range(runs):\n nn3.netConfig()\n nn3, nn3_record = nn3.train()\n nn3_s.append(nn3)\n nn3_records.append(nn3_record)\nnn3_avgR = statModel(nn3_records)\n\n# ---------------------------------------------------------------------------------\n\n# mainPlot(31)\n# trainPlot(32)\navgPlot(33)\n","sub_path":"tf_Reuters.py","file_name":"tf_Reuters.py","file_ext":"py","file_size_in_byte":9856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"464750224","text":"# coding=utf-8\n#!/bin/python3\nimport pygame\nfrom sklearn.neighbors import KNeighborsRegressor\nimport pandas\n\n\n\n# CONFIGURACIÓN\nclock = pygame.time.Clock()\ndatos = open\nWIDTH = 1200\nHEIGHT = 600\nBORDER = 20\nCOLECTA_DATOS=0\nif COLECTA_DATOS:\n\n archivo = open(\"datos.csv\", \"w\")\n archivo.write(\"x,y,vx,vy, raqueta.y \\n\")\n\nVELOCITY = 1\nfuncionando=True\n# Dibujar el escenario\n\npygame.init()\npygame.font.init()\nmyfont = pygame.font.SysFont('Comic Sans MS', 30)\n\nfgColor = pygame.Color(\"white\")\nbgColor = pygame.Color(\"black\")\n\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.mouse.set_visible(False)\n# definición de clases\n\n\nclass Pelota:\n RADIUS = 15\n \n global bgColor, funcionando\n\n def __init__(self, x, y, colour, vx, vy):\n self.x = x\n self.y = y\n self.colour = colour\n self.vx = vx\n self.vy = vy\n self.vidas = 2\n\n def mostrar(self, colour):\n global screen # Para que coja la variable global\n self.colour = colour\n \n\n \n \n pygame.draw.circle(screen, self.colour, (self.x, self.y), self.RADIUS)\n\n def actualizar(self):\n\n newx = self.x + self.vx\n newy = self.y + self.vy\n pygame.draw.rect(screen, bgColor, pygame.Rect(\n (BORDER,BORDER,500,100)))\n textsurface = myfont.render(f\"VIDAS RESTANTES {self.vidas}\", False, (220, 220, 220))\n screen.blit(textsurface,(BORDER,BORDER))\n \n if newx < BORDER + self.RADIUS:\n self.vx = -self.vx\n elif newy < BORDER + self.RADIUS or newy > HEIGHT - BORDER - self.RADIUS:\n self.vy = -self.vy\n \n elif newx > WIDTH-raqueta.WIDTH and newx < WIDTH and newy > raqueta.y and newy < raqueta.y+raqueta.HEIGHT:\n temporal=self.vx\n #self.vx=0\n #self.x = WIDTH-raqueta.WIDTH-pelota.RADIUS*2\n self.vx = -abs(temporal)\n elif newx > WIDTH-raqueta.WIDTH:\n self.x = WIDTH-raqueta.WIDTH-self.RADIUS\n self.vx = -self.vx\n self.vidas = self.vidas - 1\n pygame.draw.rect(screen, bgColor, pygame.Rect(\n (BORDER,BORDER,500,100)))\n textsurface = myfont.render(f\"VIDAS RESTANTES {self.vidas}\", False, (220, 220, 220))\n \n \n else:\n self.mostrar(bgColor)\n self.x = self.x + self.vx\n self.y = self.y + self.vy\n self.mostrar(fgColor)\n\n\nclass Raqueta:\n WIDTH = 20\n HEIGHT = 100\n\n def __init__(self, y):\n self.y = y\n \n\n def show(self, colour):\n global screen, fgColor, bgColor\n pygame.draw.rect(screen, colour, pygame.Rect(\n (WIDTH - self.WIDTH, self.y, self.WIDTH, self.HEIGHT)))\n\n def update(self, newY):\n self.show(bgColor)\n #raton = pygame.mouse.get_pos()[1]\n raton = newY\n if raton < BORDER or raton > HEIGHT - BORDER - self.HEIGHT:\n self.y = self.y\n else:\n self.y = raton\n self.show(fgColor)\n\n\n# Crear objetos\npelota = Pelota(WIDTH // 2, HEIGHT //\n 2, fgColor, -VELOCITY, -VELOCITY)\nraqueta = Raqueta(HEIGHT // 2)\n\n# Bordes del juego\npygame.draw.rect(screen, fgColor, pygame.Rect(0, 0, WIDTH, BORDER))\npygame.draw.rect(screen, fgColor, pygame.Rect(0, 0, BORDER, WIDTH))\npygame.draw.rect(screen, fgColor, pygame.Rect(\n 0, HEIGHT - BORDER, WIDTH, HEIGHT))\n# Mostrar pantalla\n\n# IA\ndatos = pandas.read_csv(\"datos.csv\")\ndatos = datos.drop_duplicates()\n\nX = datos.drop(columns='raqueta.y')\ny = datos['raqueta.y']\n\nclf = KneigborsRegressor (n_neighbors = 3)\nclf = clf.fit(X, y)\n\ndf = pandas.DataFrame(columns=['x', 'y', 'vx', 'vy'])\n# Bucle principal\n\nwhile funcionando:\n e = pygame.event.poll()\n clock.tick(2000)\n if e.type == pygame.QUIT or pelota.vidas == 0:\n \n pygame.draw.rect(screen, fgColor, pygame.Rect(\n (0,0,WIDTH,HEIGHT)))\n textoFinal = myfont.render(f\"fin de la partida, gracias por jugar\", False, bgColor)\n screen.blit(textoFinal,(400,300))\n \n \n pygame.display.flip()\n pygame.time.wait(2000)\n funcionando=False\n if COLECTA_DATOS:\n archivo.write(f\"{pelota.x}, {pelota.y}, {pelota.vx}, {pelota.vy}, {raqueta.y} \\n\")\n pygame.display.flip()\n toPredict = df.append({'x': pelota.x, 'y' : pelota.y, 'vx' : pelota.vx, 'vy' : pelota.vy}, ignore_index=True)\n shouldMove = clf.predict(toPredict)\n\n pelota.actualizar()\n raqueta.update(shouldMove)\n\npygame.quit()\n","sub_path":"pongIA.py","file_name":"pongIA.py","file_ext":"py","file_size_in_byte":4509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"15720429","text":"import euler\nimport itertools\nimport math\n\ndef isPerfectSquare(n):\n h = n & 0xF\n if h > 9:\n return False\n if h != 2 and h != 3 and h != 5 and h != 6 and h != 7 and h != 8:\n t = math.floor(math.sqrt(n) + 0.5)\n return t*t == n\n return False\n\ndef run():\n isPrime = {}\n for i in itertools.count(3, 2):\n isPrime[i] = euler.isPrime(i)\n if isPrime[i]:\n continue\n conjecture = False\n for j in range(i-2, 1, -2):\n if not isPrime[j]:\n continue\n n = (i-j)//2\n if isPerfectSquare(n):\n conjecture = True\n break\n if not conjecture:\n return i\n","sub_path":"src/python/p046.py","file_name":"p046.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"266111691","text":"\r\nfrom collections import namedtuple\r\nfrom os import makedirs\r\nfrom os.path import abspath, dirname, exists, join\r\nfrom uuid import uuid4\r\nfrom sys import stderr\r\n\r\nfrom pypairtree.utils import identifier_to_path\r\nfrom pypremis.lib import PremisRecord\r\nfrom pypremis.nodes import *\r\n\r\nfrom .premis_apis import get_an_agent_identifier\r\n\r\n__AUTHOR__ = \"Tyler Danstrom\"\r\n__EMAIL__ = \"tdanstrom@uchicago.edu\"\r\n__VERSION__ = \"1.0.0\"\r\n__DESCRIPTION__ = \"a module to use in a command line tool to find all premis records in longTermStorage and if not already in livePremis copy the file into livePremis\"\r\n__COPYRIGHT__ = \"University of Chicago, 2016\"\r\n\r\ndef write_out_a_complete_file_tree(directory_string):\r\n \"\"\"a function to write out a complete directory hierarchy to disk\r\n ___Args__\r\n 1. directory_string (str): a string representing a path that needs to be written to disk\r\n \"\"\"\r\n if abspath(directory_string) == directory_string:\r\n directory_string = directory_string[1:]\r\n new_output = \"/\"\r\n for n_part in directory_string.split(\"/\"):\r\n new_output = join(new_output, n_part)\r\n if exists(new_output):\r\n pass\r\n else:\r\n makedirs(new_output, exist_ok=True)\r\n return True\r\n\r\n# start of premis node creation functions\r\n\r\ndef build_a_premis_event(event_type, event_date, outcome_status, outcome_message, agent, objid, agent_type=None):\r\n \"\"\"a function to generate a minimal PREMIS event record\r\n\r\n __Args__\r\n 1. event_type (str): a label that defines the category of event that is being created\r\n 2. event_date (str): an ISO date string representing the time that this event occurred\r\n 3. outcome_status (str): either SUCCESS or FAILURE, a label defining whether or not the\r\n event was able to be completed\r\n 4. outcome_message (str): a brief (1-2 sentence(s)) description of what happened in this event\r\n 5. agent (str): the official name for the agent that performed this event\r\n 6. objid (str): the PREMIS identifier for the object that this event occurred on\r\n \"\"\"\r\n new_event = None\r\n agent_id = get_an_agent_identifier(agent, agent_type=agent_type)\r\n if agent_id:\r\n event_id = EventIdentifier(\"DOI\", str(uuid4()))\r\n linkedObject = LinkingObjectIdentifier(\"DOI\", objid)\r\n linkedAgent = LinkingAgentIdentifier(\"DOI\", agent_id)\r\n event_detail = EventOutcomeDetail(eventOutcomeDetailNote=outcome_message)\r\n event_outcome = EventOutcomeInformation(outcome_status, event_detail)\r\n new_event = Event(event_id, event_type, event_date)\r\n new_event.set_linkingAgentIdentifier(linkedAgent)\r\n new_event.set_eventOutcomeInformation(event_outcome)\r\n new_event.set_linkingObjectIdentifier(linkedObject)\r\n return new_event\r\n\r\n# end of premis node creation functions\r\n\r\n# start of premis loading and writing functions \r\n\r\ndef write_a_premis_record(premis_record, file_path):\r\n \"\"\"a function to write a Premis Record to a particular file on-disk\r\n ___Args__\r\n 1. premis_record (Premis Record): an instance of pypremis.lib.PremisRecord\r\n 2. file_path (str): a string representing a valid location on-disk\r\n \"\"\"\r\n try:\r\n premis_record.write_to_file(file_path)\r\n except Exception as e:\r\n raise(e)\r\n\r\ndef open_premis_record(premis_file_path):\r\n \"\"\"a function to attempt to create an instance of a PremisRecord\r\n\r\n __Args__\r\n 1. premis_file_path (str): a string pointing to the location of a premis xml file on-disk\r\n \"\"\"\r\n output = None\r\n try:\r\n output = PremisRecord(frompath=premis_file_path)\r\n except ValueError:\r\n stderr.write(\"{} is not a valid premis record\\n\".format(premis_file_path))\r\n return output\r\n\r\n# end of premis loading and writing functions\r\n# start of premis record creation functions\r\n\r\ndef create_agent_path(dto, identifier):\r\n path = join(dto.root, str(identifier_to_path(identifier)), \"prf\", \"agent.xml\")\r\n return path\r\n\r\ndef create_a_new_premis_agent(dto):\r\n identifier = uuid4().hex\r\n path_to_agent = create_agent_path(dto, identifier)\r\n id_node = AgentIdentifier(\"DOI\", identifier)\r\n new_agent = Agent(id_node)\r\n new_agent.set_agentType(dto.type)\r\n new_agent.set_agentName(dto.name)\r\n new_record = PremisRecord(agents=[new_agent])\r\n try:\r\n write_out_a_complete_file_tree(dirname(path_to_agent))\r\n write_a_premis_record(new_record, path_to_agent)\r\n return (True, identifier)\r\n except IOError:\r\n return (False, None)\r\n\r\ndef edit_a_premis_agent(dto):\r\n identifier = dto.identifier\r\n pairtree_identifier = str(identifier_to_path(identifier))\r\n path_to_agent_record = create_agent_path(dto, identifier)\r\n record_to_edit = PremisRecord(frompath=path_to_agent_record)\r\n agents_list = record_to_edit.get_agent_list()\r\n agent_node = agents_list[0]\r\n for n_field in dto.edit_fields:\r\n if n_field == \"name\":\r\n agent_node.set_agentName(getattr(dto, n_field))\r\n elif n_field == \"type\":\r\n agent_node.set_agentType(getattr(dto, n_field))\r\n agent_list = [agent_node]\r\n record_to_edit = PremisRecord(agents=agent_list)\r\n try:\r\n write_a_premis_record(record_to_edit, path_to_agent_record)\r\n return (True, identifier)\r\n except IOError:\r\n return (False, None)\r\n\r\ndef create_or_edit_an_agent_record(dto):\r\n \"\"\"a function to create a new PREMIS record for an agent\r\n\r\n __Args__\r\n 1. agents_root (str): a string that is a valid path to agent records in livePremis\r\n 2. dto (AgentDataTransferObject): an object to pass Agent data from an api this function\r\n \"\"\"\r\n if not dto.identifier:\r\n return create_a_new_premis_agent(dto)\r\n else:\r\n return edit_a_premis_agent(dto)\r\n\r\ndef add_event_to_premis_record(path_to_record, new_event):\r\n the_record = PremisRecord(frompath=path_to_record)\r\n the_record.add_event(new_event)\r\n print(path_to_record)\r\n print(the_record)\r\n the_record.write_to_file(path_to_record)\r\n return True\r\n\r\ndef add_event_to_a_premis_agent(dto):\r\n \"\"\"a function to add a PREMIS event to a particular premis record\r\n\r\n __Args__\r\n 1. premis_record (PremisRecord) an instance of pyremis.lib.PremisRecord\r\n 2. an_event (Event): an instance of pypremis.nodes.Event\r\n \"\"\"\r\n path_to_agent_record = join(dto.root, str(identifier_to_path(dto.identifier)), \"prf\", \"agent.xml\")\r\n record_to_edit = PremisRecord(frompath=path_to_agent_record)\r\n agents = record_to_edit.get_agent_list()\r\n agent = agents[0]\r\n stderr.write(dto.event)\r\n new_linked_event = LinkingEventIdentifier(\"DOI\", dto.event)\r\n stderr.write(str(new_linked_event))\r\n agent.add_linkingEventIdentifier(new_linked_event)\r\n records_to_edit = PremisRecord(agents=[agent])\r\n write_a_premis_record(record_to_edit, path_to_agent_record)\r\n return True\r\n\r\n# end of premis record creation functions\r\n\r\n# start of premis searchiing functions\r\n\r\ndef find_object_characteristics_from_premis(premis_object):\r\n \"\"\"a function to return the object characteristics node from the a PremisRecord object\r\n\r\n __Args__\r\n 1. premis_object (PremisRecord): an instance of the pypremis.lib.PremisRecord class\r\n \"\"\"\r\n return premis_object.get_objectCharacteristics()[0]\r\n\r\ndef find_fixities_from_premis(object_chars, digest_algo_filter):\r\n \"\"\"a function to return the messageDigest value for a particular algorithm\r\n if it exists in the object characteristics presented\r\n\r\n __Args__\r\n 1. object_chars (list): a list of pypremis.nodes.ObjectCharacteristic nodes\r\n digest_algo_filter (str): a string label for a particular digest\r\n algorithm that needs to be found\r\n \"\"\"\r\n obj_fixiites = object_chars.get_fixity()\r\n for fixity in obj_fixiites:\r\n if fixity.get_messageDigestAlgorithm() == digest_algo_filter:\r\n return fixity.get_messageDigest()\r\n return None\r\n\r\ndef find_size_info_from_premis(object_chars):\r\n \"\"\"a function to find the size element value of a particular object characteristic\r\n\r\n __Args__\r\n 1. object_chars (list): a list of pypremis.nodes.ObjectCharacteristic nodes\r\n \"\"\"\r\n return object_chars.get_size()\r\n\r\ndef find_mimetype_from_premis(object_chars):\r\n return object_chars.get_format()[0].get_formatDesignation().get_formatName()\r\n\r\ndef find_objid_from_premis(premis_object):\r\n \"\"\"a function the object identifier of a particular PremisRecord instance\r\n\r\n __Args__\r\n 1. premis_object (PremisRecord): an instance of pypremis.lib.PremisRecord\r\n \"\"\"\r\n return premis_object.get_objectIdentifier()[0].get_objectIdentifierValue()\r\n\r\ndef find_related_objects_from_premis(premis_object):\r\n \"\"\"a function to find related objects for a given premis record\r\n\r\n __Args__\r\n 1. premis_record (PremisRecord): an instance of pypremis.node.Object\r\n \"\"\"\r\n related_objects_list = []\r\n try:\r\n relationships = premis_object.get_relationship()\r\n except KeyError:\r\n return []\r\n for n_relationship in relationships:\r\n if n_relationship.get_relatedObjectIdentifier():\r\n for n_related_object in n_relationship.get_relatedObjectIdentifier():\r\n related_objects_list.append(n_related_object.get_relatedObjectIdentifierValue())\r\n return related_objects_list\r\n\r\ndef extract_identity_data_from_premis_record(premis_file):\r\n \"\"\"a function to extract data needed to run a fixity check from a particular premis xml file\r\n\r\n __Args__\r\n 1. premis_file (str or PremisRecord): a string pointing to a premis record on-disk or\r\n an instance of a PremisRecord\r\n \"\"\"\r\n def premis_data_packager(content_loc, this_record, objid, file_size, fixity_digest,\r\n mimetype, events, related_objects):\r\n \"\"\"a function to return a data transfer object for extracting identity data\r\n from a particular PremisRecord instance\r\n \"\"\"\r\n return namedtuple(\"premis_data\",\r\n \"content_loc premis_record objid file_size fixity_to_test \" + \\\r\n \"mimetype events_list related_objects\")\\\r\n (content_loc, this_record, objid, int(file_size), fixity_digest,\r\n mimetype, events, related_objects)\r\n this_record = open_premis_record(premis_file)\r\n this_object = this_record.get_object_list()[0]\r\n the_characteristics = find_object_characteristics_from_premis(this_object)\r\n objid = find_objid_from_premis(this_object)\r\n file_size = find_size_info_from_premis(the_characteristics)\r\n file_mimetype = find_mimetype_from_premis(the_characteristics)\r\n fixity_digest = find_fixities_from_premis(the_characteristics, 'md5')\r\n content_loc = this_object.get_storage()[0].get_contentLocation().get_contentLocationValue()\r\n events = get_events_from_a_premis_record(this_record)\r\n related_objects = find_related_objects_from_premis(this_object)\r\n data = premis_data_packager(content_loc, this_record, objid, int(file_size), fixity_digest,\r\n file_mimetype, events, related_objects)\r\n return data\r\n\r\ndef extract_core_information_agent_record(premis_file):\r\n def data_packager():\r\n return namedtuple(\"agent_data\", \"name identifier type events\")\\\r\n (agent_name, agent_identifier, agent_type, agent_events)\r\n\r\n this_record = open_premis_record(premis_file)\r\n this_agent = this_record.get_agent_list()[0]\r\n agent_identifier = this_agent.get_agentIdentifier()[0].get_agentIdentifierValue()\r\n agent_type = this_agent.get_agentType()\r\n agent_name = this_agent.get_agentName()[0]\r\n try:\r\n agent_events = [x.get_linkingEventIdentifierValue()\r\n for x in this_agent.get_linkingEventIdentifier()]\r\n except KeyError:\r\n agent_events = []\r\n data = data_packager()\r\n return data\r\n\r\ndef find_particular_event(event_list, event_string):\r\n \"\"\"a function to seek out a particular type of event from a list of events in a PremisRecord\r\n\r\n __Args__\r\n 1. event_list (list): a list of pypremis.lib.Event nodes\r\n 2. event_string (str): a string representing an eventCategory that needs to be searched for\r\n \"\"\"\r\n output = None\r\n for n_event in event_list:\r\n if n_event.get_eventCategory() == event_string:\r\n output = n_event\r\n break\r\n return output\r\n\r\ndef get_events_from_a_premis_record(premis_record):\r\n \"\"\"a function to retrieve a list of events from a given premis record\r\n __Args__\r\n 1, premis_record (PremisRecord):\r\n \"\"\"\r\n if not isinstance(premis_record, PremisRecord):\r\n raise ValueError(\"{} is not a valid PremisRecord instance\\n\".format(str(premis_record)))\r\n premis_events = premis_record.get_event_list()\r\n events = []\r\n for n_event in premis_events:\r\n event_date = n_event.get_eventDateTime()\r\n event_type = n_event.get_eventType()\r\n event_outcome = n_event.get_eventOutcomeInformation()[0].get_eventOutcome()\r\n events.append((event_type, event_date, event_outcome))\r\n return events\r\n\r\n# end of premis searching functions\r\n","sub_path":"ldrpremisbuilding/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":13205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"587329155","text":"# -*- coding: utf-8 -*-\nimport alpaca_trade_api as tradeapi\nimport time\nimport threading\nimport warnings\nfrom collections import deque\nimport random\nfrom pickle import dump,load\n\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\n\nfrom sklearn import preprocessing as spp\nfrom sklearn.preprocessing import OneHotEncoder\n\nfrom os import path\n\nimport keras\nfrom keras.preprocessing.sequence import TimeseriesGenerator\nfrom keras.models import Sequential\nfrom keras.layers import LSTM\nfrom keras.layers import Dense\nfrom keras.metrics import RootMeanSquaredError\nfrom keras.metrics import MeanSquaredError\n\nimport logging\n\n#TODO: Fill in missing days(weekends, holidays) with interpolated results\n# should remove errors due to variable time differentials between observations.\n#TODO: Reframe data as deltas from open. could be less consistent but wont hit consistent sells.\n#TODO: Convert to Alpaca bars.df instead of bars_to_data_frame\nclass AutoTrader:\n def __init__(self):\n warnings.filterwarnings(\"ignore\")\n gmt = pd.Timestamp.now()\n logging.basicConfig(filename=str.format('Info_{}.log',str(gmt.date())),level=logging.INFO)\n self.api = tradeapi.REST()\n self.window_size = 100\n self.scaler = {}\n self.n = 10\n \n class AccountRestrictedError(Exception):\n \"\"\"Exception Raised for accounts restricted from trading.\"\"\"\n def __init__(self, account, message):\n self.message = message\n self.account = account\n \n def run(self):\n # print('Performing First Time Training')\n logging.info(str.format('----------------------------------------\\nPerforming First Time Training: t = {}',pd.Timestamp.now('EST').time()))\n # logging.info(str.format('Canceling Orders: t = {}',pd.Timestamp.now('EST').time()))\n # self.api.cancel_all_orders()\n \n if not path.exists('Network'):\n #if the universe file does not exist yet create a universe file\n #containing a list of symbols trading between 5 and 25 USD.\n #ISSUE:this is very slow and prediction results are poor.\n symbols = list()\n if not path.exists('universe'):\n logging.info('No Universe File Found, Creating Universe:\\n')\n logging.info(str.format('Fetching Assets: t = {}',pd.Timestamp.now('EST').time()))\n assets = self.api.list_assets(status = 'active')\n logging.info(str.format('Assets Fetched: t = {}',pd.Timestamp.now('EST').time())) \n for asset in assets: \n if asset.tradable == True:\n symbols.append(asset.symbol)\n #check last price to filter to only tradeable assets that fall within our price range\n logging.info(str.format('Checking Asset Trade Range: t = {}',pd.Timestamp.now('EST').time()))\n self.data_frame = self.get_bar_frame(symbols)\n logging.info(str.format('Data Fetched: t = {}', pd.Timestamp.now('EST').time()))\n self.data_frame = self.data_frame.sort_index().iloc[-self.window_size:]\n #drop the incomplete bar containing today's data\n self.data_frame = self.data_frame.loc[pd.Timestamp.today(tz ='EST').floor('D')]\n logging.info(str.format('Data Sorted: t = ',pd.Timestamp.now('EST').time()))\n self.data_frame = self.data_frame.interpolate(method = 'time')\n self.data_frame = self.data_frame.bfill()\n \n pop_indx = []\n suffixes = {'_o','_h','_l','_c','_v'}\n # names = {'open','high','low','close','volume'}\n for symbol in symbols:\n #check if symbol has any data\n for suffix in suffixes:\n if not pop_indx.__contains__(symbol):\n if not symbol+suffix in self.data_frame.columns:\n pop_indx.append(symbol)\n elif not suffix.startswith('_v'):\n closes = self.data_frame.loc[:,symbol + suffix].fillna(0)\n if closes.isna().sum()>0:\n pop_indx.append(symbol)\n elif closes.gt(25).sum()>0:\n pop_indx.append(symbol)\n elif closes.lt(5).sum()>0:\n pop_indx.append(symbol)\n logging.info('Symbols outside range identified: Count = ' + str(len(pop_indx)))\n for symbol in pop_indx:\n symbols.remove(symbol)\n for suffix in suffixes:\n if symbol+suffix in self.data_frame.columns:\n self.data_frame.pop(symbol + suffix)\n logging.info(str.format('Symbols removed: t = {}', pd.Timestamp.now('EST').time()))\n \n with open('universe','w') as universe_file:\n for symbol in symbols:\n universe_file.write(symbol+ '\\n')\n else:\n self.symbols = self.read_universe()\n #convert bar entity data into raw numerical data\n logging.info(str.format('Fetching Data For Training: t = {}', pd.Timestamp.now('EST').time()))\n self.data_frame = self.get_bar_frame(self.symbols)\n \n logging.info(str.format('Training Data Fetched: t = {}', pd.Timestamp.now('EST').time()))\n self.data_frame = self.data_frame.sort_index() #.iloc[-self.window_size:]\n logging.info(str.format('Training Data Sorted: t = {}', pd.Timestamp.now('EST').time()))\n \n \n \n self.data_frame = self.preprocess(self.data_frame,True)\n \n dump(self.scaler,open('scaler.pkl','wb'))\n \n Y = self.data_frame.loc[:,self.data_frame.iloc[0].index.map(lambda t: t.endswith('_h') or t.endswith('_l'))] \n self.time_data_generator = TimeseriesGenerator(self.data_frame.to_numpy(), Y.to_numpy(),\n \tlength=self.n, sampling_rate=1, batch_size=10)\n self.train_neural_network(self.time_data_generator)\n else:\n self.neural_network = keras.models.load_model('Network')\n self.symbols = self.read_universe()\n self.scaler = load(open('scaler.pkl','rb'))\n \n \n while True:\n tAMO = threading.Thread(target = self.await_market_open())\n tAMO.start()\n tAMO.join()\n \n # Check if account is restricted from trading.\n account = self.api.get_account()\n if account.trading_blocked:\n logging.error('account is currently restricted from trading.')\n raise self.AccountRestrictedError(account,'account is currently restricted from trading.')\n #TODO:should break out here if account blocked no point in trying ot trade.\n \n today = pd.Timestamp.today(tz = 'EST').floor('D')\n #the following should execute only once the market is open\n \n prices = self.get_bar_frame(symbols = self.symbols, window_size = 11).loc[:today]\n #process prediction data, dropping today's incomplete bar \n pred = self.timeseries_prediction(prices)\n #get our current positions\n logging.info(str.format('Getting Positions: t = {}', pd.Timestamp.now('EST').time()))\n positions = self.api.list_positions()\n orders = self.api.list_orders('closed',after = today.isoformat())\n #if we havent made any orders today. \n #could cause issues with manual trading or other scripts/bots\n #TODO: Rewrite for constant evaluation of symbols\n \n if len(orders)<=0:\n for position in positions:\n symbol = position.symbol\n qty = int(position.qty)\n #multiply the last trading day's high&low by the fractional \n # predicted gains or losses to obtain expected high and low\n high = prices.iloc[-1].loc[symbol+'_h']*(1+pred.loc[0, symbol + '_h'])\n low = prices.iloc[-1].loc[symbol+'_l']*(1+pred.loc[0, symbol + '_l'])\n \n if high<=low:\n logging.error(str.format('Limit Sell Sym:{} [high({})<=low({})] limit_price {}', symbol,high,low,high))\n self.api.submit_order(symbol,qty,'sell','limit','day',high)\n else:\n #3.\tPlace OCO orders 15 minutes* after market open on current positions based on estimated H/L.\n logging.info(str.format('OCO Limit sell, Stop Loss {} limit_price {} stop_price {}',symbol,high,low))\n self.api.submit_order(symbol = symbol, qty = qty, side = 'sell', type = 'limit', time_in_force ='day', order_class = 'oco', take_profit = {\"limit_price\":high},stop_loss = {\"stop_price\":low})\n \"\"\"\n #4. every minute while the market is open,from approximately midday until 15 minutes before \n # market close, predict gains using today's data and create a queue\n # of symbols in order of predicted gains.\n # if we have more than 5% of our equity as available cash, make a\n # limit order for the next symbol in the queue for <%5 of our equity. \n \"\"\"\n tAMD = threading.Thread(target = self.await_midday())\n tAMD.start()\n tAMD.join()\n clock = self.api.get_clock()\n next_close = clock.next_close\n while pd.Timestamp.now(tz='EST')<(next_close-pd.Timedelta(15,'min')).tz_convert('EST'):\n account = self.api.get_account()\n self.MaxOrderCost = float(account.equity) * 0.05\n cash = self.get_available_cash()\n if cash>=self.MaxOrderCost:\n prices.append(self.get_bar_frame(self.symbols, window_size=1).loc[today])\n pred = self.timeseries_prediction(prices)\n\n order_symbols = [order.symbol for order in self.api.list_orders(status = 'all', after = today.isoformat()) if order.side == 'buy']\n position_symbols = [position.symbol for position in self.api.list_positions()]\n do_not_buy = list(set(order_symbols)|set(position_symbols))\n queue = deque(pred.loc[:,pred.loc[0].index.map(lambda t: t.endswith('_h'))].sort_values(by=0,axis=1).columns.to_numpy(copy = True))\n while cash>=self.MaxOrderCost:\n symbol = queue.pop()[:-2]\n if not symbol in do_not_buy:\n price = prices.loc[today].loc[symbol+'_c']\n qty = (self.MaxOrderCost//price)\n logging.info(str.format('\\tLimit Buy {} shares of {} limit price = {} \\@ {}',qty,symbol,price,pd.Timestamp.now('EST').time()))\n self.api.submit_order(symbol=symbol,qty = qty,side = 'buy',type = 'limit',time_in_force = 'day',limit_price = price)\n #adjust cash for new open order\n cash = cash-(price*qty)\n if symbol in position_symbols:\n for order_id in [order.id for order in self.api.list_orders('all',after = today.isoformat()) if order.symbol == symbol]:\n self.api.cancel_order(order_id)\n #remove data\n prices = prices.loc[:today]\n time.sleep(60)\n #5.\tCancel open orders 15 minutes* before market close.\n logging.info('canceling all orders')\n self.api.cancel_all_orders()\n time.sleep(60*16)\n\n def get_available_cash(self):\n account = self.api.get_account()\n #set our maximum buy order value to 5% of our total equity\n cash = float(account.cash) \n orders = self.api.list_orders()\n for order in orders:\n if order.side == 'buy':\n cash = cash - (float(order.limit_price)*int(order.qty))\n return cash\n \n def timeseries_prediction(self, data_frame):\n data = self.preprocess(data_frame)\n targets = data.loc[:,data.iloc[0].index.map(lambda t: t.endswith('_h') or t.endswith('_l'))]\n tdg = TimeseriesGenerator(data.to_numpy(),targets.to_numpy(),10,batch_size=10)\n pred = pd.DataFrame(self.neural_network.predict(tdg))\n pred.set_axis(targets.columns, axis = 'columns', inplace = True)\n self.inverse_scaling(pred,self.scaler)\n return pred\n \n def scale_data(self, data_frame, scaler, initial = False):\n logging.info(str.format('Scaling Data t = {}', pd.Timestamp.now('EST').time()))\n for data in data_frame:\n if initial:\n if not data.startswith('t_'):\n scaler[data] = spp.StandardScaler()\n scaled = scaler[data].fit_transform(np.array(data_frame.loc[:,data]).reshape(-1,1))\n index = 0\n for date in data_frame.index:\n data_frame.loc[date,data] = scaled[index][0]\n index+=1\n else:\n if not data.startswith('t_'):\n scaled = scaler[data].transform(np.array(data_frame.loc[:,data]).reshape(-1,1))\n index = 0\n for date in data_frame.index:\n data_frame.loc[date,data] = scaled[index][0]\n index+=1\n return data_frame\n logging.info(str.format('Data Normalized: t = ', pd.Timestamp.now('EST').time()))\n \n #takes an integer indexed data frame and returns that data frame unscaled\n def inverse_scaling(self,data_frame,scaler):\n logging.info(str.format('Unscaling Data t = ', pd.Timestamp.now('EST').time()))\n for data in data_frame:\n if not data.startswith('t_'):\n scaled = scaler[data].inverse_transform(np.array(data_frame.loc[:,data]).reshape(-1,1))\n for i in range(len(scaled)):\n data_frame.loc[i,data] = scaled[i][0]\n \n #obtain OHLCV bar data for securities returns a DataFrame for the past \n #{window_size} {bar_length} indexed by symbol and day\n def get_bar_frame(self, symbols, algo_time = None, window_size = None, bar_length = 'day'):\n data_frame = pd.DataFrame()\n if window_size == None:\n window_size = self.window_size\n if not isinstance(bar_length,str):\n raise ValueError('bar_length must be a string.')\n index = 0\n batch_size = 200\n formatted_time = 0\n if algo_time is not None:\n formatted_time = algo_time.isoformat()\n else:\n formatted_time = self.api.get_clock().timestamp.astimezone('EST') \n delta = pd.Timedelta(window_size,'D')\n logging.info(str.format('Getting Bars: t = {}', pd.Timestamp.now('EST').time()))\n while index < len(symbols):\n symbol_batch = symbols[index:index+batch_size]\n logging.info(str.format('Getting Bars for indicies {}:{} t = {}',index,index+batch_size,pd.Timestamp.now('EST').time()))\n # Retrieve data for this batch of symbols\n bars = self.api.get_barset(\n symbols=symbol_batch,\n timeframe=bar_length,\n limit= window_size,\n end=formatted_time.isoformat(),\n start=(formatted_time - delta).isoformat()\n )\n logging.info(str.format('Bars Recieved: t = {}', pd.Timestamp.now('EST').time()))\n index+=batch_size\n #start threads here\n data_frame = data_frame.join(self.bars_to_data_frame(bars),how='outer')\n #join threads here\n return data_frame\n \n # Wait for market to open.\n # Checks the clock every minute while the market is not open.\n def await_market_open(self):\n clock = self.api.get_clock()\n openingTime = clock.next_open.astimezone('EST')\n closingTime = clock.next_close.astimezone('EST')\n if openingTime= 0 else -1\n large0 = p.get_height() if p.get_height() >=0 else 0\n # print(p.get_height(), large0)\n ax.annotate('{0:.1f}%'.format(100 * sign * p.get_height()),\n (p.get_x() + p.get_width() / 2., large0),\n ha='center', va='center', fontsize=11, color='gray', rotation=30, xytext=(0, 15),\n textcoords='offset points')\n aaa.apply(annotateBars, ax=g, axis=1)\n\n sns.plt.ylabel('收益占比')\n # sns.plt.rcParams['image.cmap'] = 'Paired'\n\n sns.plt.show()\n\n # asset_contribution_total = pd.DataFrame({'业绩归因': asset_contribution_total})\n # asset_contribution_total.plot(title=self._s_name + ' 业绩归因', kind = 'bar', alpha=0.9,\n # colormap=\"Paired\"\n # )\n # plt.show()\n\n return asset_contribution_total\n\n def draw_down_plot(self, legend=True, figure_size=(12, 6), color='Green'): # color = Green\n \"\"\"绘制回撤分析图\"\"\"\n\n # 回撤分析图\n self._backtest_res.daily_dd.plot(kind='area', title=self._s_name+' 回撤分析', label=self._s_name, legend=legend,\n alpha=0.6, color='Gray', ylim=(self._backtest_res.daily_dd.min()-0.1, 0),\n figsize=figure_size)\n mdd_sdt = self._backtest_res.analyze_result['Mdd_start']\n mdd_edt = self._backtest_res.analyze_result['Mdd_end']\n mdd_range = self._backtest_res.analyze_result['Mdd_range']\n\n # 最大回撤区间\n self._backtest_res.daily_dd[mdd_sdt:mdd_edt].plot(kind='area', label='��大回撤区间({0}日)'.format(mdd_range),\n legend=legend, alpha=0.8, color=color)\n\n locs, labels = plt.yticks()\n plt.yticks(locs, ['{0:.0f}%'.format(a * 100) for a in locs])\n\n plt.legend(loc=3)\n plt.show()\n\n def draw_rebalance_weight(self, figure_size=(12, 6),\n cmap=sns.color_palette(\"Paired\", 12)): # sns.color_palette(\"Paired\") \"RdGy\", 6\n \"\"\"绘制调仓的权重图\"\"\"\n\n weight = self._rebalance_weight.copy()\n weight.index = [pd.datetime.strftime(a, '%Y-%m') for a in weight.index]\n\n weight.plot(title=self._s_name + ' 调仓权重', kind='bar', stacked=True, figsize=figure_size, alpha=0.9,\n color=cmap)\n\n step = len(weight.index) // 8\n plt.xticks(range(0, len(weight.index), step), [weight.index[i] for i in range(0, len(weight.index), step)])\n\n loc, _ = plt.yticks()\n plt.yticks(loc, ['{0:.0f}%'.format(a*100) for a in loc])\n\n plt.legend(bbox_to_anchor=(1.05, 0.5), loc=2, borderaxespad=0.) # put legend on the right\n plt.show()\n\n def draw_daily_weight(self, figure_size=(12, 6),\n cmap=sns.color_palette(\"Paired\", 12)): # ns.diverging_palette(255, 133, l=60, n=7, center=\"dark\")\n \"\"\"绘制每日权重图\"\"\"\n\n weight = self._daily_weight.copy()\n weight.index = [pd.datetime.strftime(a, '%Y-%m') for a in weight.index]\n weight.plot(title=self._s_name + ' 每日权重', kind='area', stacked=True, figsize=figure_size, alpha=0.9,\n color=cmap)\n\n step = len(weight.index) // 8\n plt.xticks(range(0, len(weight.index), step), [weight.index[i] for i in range(0, len(weight.index), step)])\n plt.yticks(np.arange(0,1.1,0.2), ['{0:.0f}%'.format(a*100) for a in np.arange(0,1.1,0.2)])\n\n plt.legend(bbox_to_anchor=(1.05, 0.5), loc=2, borderaxespad=0.)\n plt.show()\n\n\ndef analyze(res_nv, rf=None, res_turnover=None, freq='D'):\n \"\"\"对回测结果进行分析\"\"\"\n\n annal_ret = als.cal_annal_return(res_nv, freq=freq)\n annal_vol = als.cal_annal_volatility(res_nv, freq=freq)\n max_dd, mdd_sdt, mdd_edt, mdd_range, daily_dd = als.cal_max_drawdown_info(res_nv)\n sharpe = als.cal_sharpe(res_nv, rf=rf, freq=freq)\n if res_turnover is None:\n to_average = .0\n else:\n to_average = res_turnover.mean()\n max_wait_days = als.cal_max_wait_periods(res_nv)\n IR = als.cal_information_ratio(res_nv)\n \n analyze_result ={\n \"Annal ret\": annal_ret, \"Annal vol\": annal_vol, \"Max Drawdown\": max_dd, \"Sharpe\": sharpe,\n \"Average turnover\": to_average, \"Mdd_start\": mdd_sdt, \"Mdd_end\": mdd_edt, \"Mdd_range\": mdd_range,\n \"Max_wait\": max_wait_days, 'Information Ratio': IR\n }\n\n res_report = {\n \"Annal ret\" : '{0:.2%}'.format(annal_ret), \"Annal vol\": '{0:.2%}'.format(annal_vol),\n \"Max Drawdown\": '{0:.2%}'.format(max_dd), \"Sharpe\": '{0:.2}'.format(sharpe),\n \"Average turnover\": '{0:.2%}'.format(to_average), \"Mdd_start\": mdd_sdt, \"Mdd_end\": mdd_edt,\n \"Mdd_range\": '{0} Days'.format(mdd_range), \"Max_wait\": '{0} Days'.format(max_wait_days),\n \"Information Ratio\": '{0:.2}'.format(IR)\n }\n return pd.DataFrame(res_report, index=['value']).T.sort_index()\n\n\n\nif __name__ == '__main__':\n # 获取收益率数据\n db = pd.HDFStore('..\\data\\DB.h5')\n print(db.keys())\n ret = db['ret_index']\n # ret = ret[['Bond', 'Stock']]\n ret = ret.dropna()\n db.close()\n\n from pyasset.estimator import Estimator\n from pyasset.allocation import Allocation\n from pyasset.config import TRADING_DAYS_A_YEAR\n\n # 估计参数\n m_estimator = Estimator(ret)\n ret_xp = m_estimator.ewm(halflife=60).mean().shift(1)\n ret_xp = (ret_xp + 1) ** TRADING_DAYS_A_YEAR - 1 # 年化\n\n cov_xp = m_estimator.ewm(halflife=10).cov().shift(1, axis=0)\n cov_xp = cov_xp * TRADING_DAYS_A_YEAR # 年化\n\n m_allocation = Allocation(ret_xp, cov_xp, 'M', '2008-12-31', '2016-01-09')\n m_allocation.get_rebalance_info()\n\n weight_m = m_allocation.get_equal_weight()\n\n b4 = Backtest(weight_m, ret, start_date='2005-12-31', end_date='2016-01-09', fee_rate=0)\n b4.analyze()\n\n # ret_daily = b4.res_nv.pct_change()\n # asset_ret = b4._quote.reindex(ret_daily.index)\n # asset_weight = b4.res_weight.shift(1).reindex(ret_daily.index)\n # print((((1+asset_ret) * asset_weight).sum(axis=1) - ret_daily-1).sum())\n #\n # asset_contribution = ((asset_ret * asset_weight).T / ret_daily * np.log(ret_daily + 1)).T\n # asset_contribution_total = asset_contribution.sum() / (np.log(ret_daily + 1).sum())\n # asset_contribution_total['residual'] = 1 - asset_contribution_total.sum()\n #\n # print(asset_contribution_total)\n\n x = Xray(b4, strategy_name='测试')\n # attri = x.return_analyser(start_date='2005-12-31', end_date='2016-01-09', plot=True)\n # print(attri)\n # x.draw_down_plot()\n # x.draw_rebalance_weight()\n # x.draw_daily_weight()\n x.run()","sub_path":"pyasset/xray.py","file_name":"xray.py","file_ext":"py","file_size_in_byte":10692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"62330735","text":"import base64\nimport os\nfrom django.shortcuts import render\nfrom django.views.generic import FormView\nfrom django.contrib.auth.models import User\nfrom django.core.mail import send_mail\nfrom django.conf import settings\nfrom counter.forms import RegistrationForm\nfrom counter.models import Profile\nfrom django.views import generic\n\n\ndef home_view(request):\n return render(request, \"index.html\")\n\n\n# @login_required\ndef profile_view(request):\n\n try:\n current_user = request.user\n profile = Profile.objects.get(user_id=current_user.id)\n except (Profile.DoesNotExist, User.DoesNotExist):\n return render(request, 'no-profile-available.html')\n\n total = profile.total\n if request.method == 'POST' and request.POST.get('number'):\n total = float(request.POST.get('number')) * float(2)\n Profile.objects.filter(id=profile.id).update(total=total)\n\n if 'clear all' in request.POST:\n total = 0\n Profile.objects.filter(id=profile.id).update(total=0)\n\n return render(request,\n \"profile.html\", {\n 'current_user': current_user.username,\n 'total': total\n })\n\n\nclass RegistrationView(FormView):\n model = User\n template_name = 'registration/register.html'\n form_class = RegistrationForm\n success_url = '/send_confirmation_mail'\n\n def form_valid(self, form):\n user = User.objects.create_user(form.data['username'],\n form.data['email'],\n form.data['password1'],\n first_name=form.data['first_name'],\n last_name=form.data['last_name'])\n user.is_active = False\n user.save()\n\n if user is not None:\n self.generate_profile(user)\n\n return super(RegistrationView, self).form_valid(form)\n\n def generate_key(self):\n return base64.b32encode(os.urandom(7))[:10].lower()\n\n def generate_profile(self, user):\n profile = Profile(key=self.generate_key(), user=user)\n profile.save()\n send_mail(\n 'Mate Counter account confirmation',\n \"\"\"\n Hello,\n\n please click this link to activate your Mate Counter account:\n {0}/registration_done/{1}\n\n Sincerely,\n The Mate Counter Team\n \"\"\".format(settings.SITE_URL, profile.key.decode(\"utf-8\")),\n 'matecounter@matecounter.com',\n [user.email],\n fail_silently=False,\n )\n\n\nclass RegistrationDoneView(generic.TemplateView):\n template_name = 'registration/registration_done.html'\n\n def get_context_data(request, key):\n matches = Profile.objects.filter(key=key)\n if matches.exists():\n profile = matches.first()\n if profile.user.is_active:\n request.template_name = (\n 'osschallenge/user_is_already_active.html')\n else:\n profile.user.is_active = True\n profile.user.save()\n else:\n request.template_name = 'osschallenge/registration_failed.html'\n\n\ndef send_confirmation_mail_view(request):\n return render(request, \"registration/send_confirmation_mail.html\")\n","sub_path":"counter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"345940700","text":"def factorial(n):\r\n if n == 0:\r\n return 1\r\n else:\r\n return n * factorial(n-1)\r\n\r\ndef factorial_prompt():\r\n print(\"Please select one of the parts to run: \")\r\n options = \"\"\" 1. Printing factorials\r\n 2. Digits in factorials\r\n 3. Factoring factorials\"\"\"\r\n print(options)\r\n opt=input(\"Enter choice: \")\r\n if (opt==\"1\"):\r\n factorial_print() \r\n elif (opt==\"2\"):\r\n frequency_print()\r\n elif (opt==\"3\"):\r\n factorization_print()\r\n\r\ndef factorial_print():\r\n n=int(input(\"Enter a number: \"))\r\n x=1\r\n while x<=n:\r\n outp1=\"{}! = {}\"\r\n print(outp1.format(x,factorial(x)))\r\n x=x+1\r\n\r\ndef frequency_print():\r\n n = int(input(\"Enter the maximum n to compute n! for: \"))\r\n d = int(input(\"Enter a single digit: \"))\r\n\r\n frequency_digits(n,d)\r\n\r\ndef factorization_print():\r\n number=int(input(\"Enter a number: \"))\r\n factorization(number)\r\n# Digits in factorials\r\ndef frequency_digits(n, d):\r\n c = 0;\r\n while (n > 0):\r\n if (n % 10 == d):\r\n c += 1;\r\n n = int(n / 10);\r\n return c;\r\n# Factoring factorials\r\ndef factorization(number):\r\n print(str(number) + \"! = \" + \"*\".join(str(n) for n in range(1, number + 1)))\r\n","sub_path":"factorial.py","file_name":"factorial.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"166975989","text":"import evaluation as ev\nfrom evaluation import *\n\n\"\"\"\n@desc: Entropy-Entropy plot\n\"\"\"\ndef plot(x, y, xlabel, ylabel, filename, title=None):\n # Calculate entropy of x\n x_entropies = np.array([ev._helper.transition_entropy(t) for t in x])\n # Calculate entropy of y\n y_entropies = np.array([ np.array([ev._helper.transition_entropy(t) for t in y[i]]) for i in range(NUM_RUNS)])\n\n # Define mean and std of y entropies\n y_mean = np.mean(y_entropies, axis=0)\n y_std = np.std(y_entropies, axis=0)\n\n plt.plot(np.arange(0, 1.5, 0.1), np.arange(0, 1.5, 0.1), color='darkorange')\n #plt.scatter(x_entropies, y_entropies, color=\"red\")\n plt.errorbar(x_entropies, y_mean, yerr=y_std, fmt=\"-o\", markersize=4, capsize=4)\n plt.ylim(ymin=0)\n plt.xlim(xmin=0)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.savefig(PLOTPATH + '/'+ filename +'.'+ FILE_TYPE, format=FILE_TYPE, transparent=True)\n plt.close()\n\ndef initial_weights(weight_transitions):\n plot(PARA.c.source.transitions, weight_transitions,\n xlabel=r'$H_{input}$', ylabel=r'$H_{weight\\;output}$',\n filename=\"entropy_weights-initial\")\n\ndef initial_spont(spont_transitions):\n plot(PARA.c.source.transitions, spont_transitions,\n xlabel=r'$H_{input}$', ylabel=r'$H_{spont\\;output}$',\n filename=\"entropy_spont-initial\")\n","sub_path":"michaelis/evaluation/entropy.py","file_name":"entropy.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"637322873","text":"\"\"\"\nReading csv file using csv reader\n\"\"\"\nimport csv\nfrom conf.read_conf import read_config\n\n# ----home path\nhome = read_config()\n\n# ----calculate total\ntotal = 0.0\nwith open(home + \"Data/profile.csv\", \"r\") as f:\n rows = csv.reader(f)\n headers = next(rows) # Header\n for row in rows:\n row[2] = int(row[2])\n row[3] = float(row[3])\n print(row)\n total += row[2] * row[3]\n\nprint('Total cost:', total)\n","sub_path":"lesson_3/csv_reader.py","file_name":"csv_reader.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"634212765","text":"import time\nimport tkinter.ttk as ttk\nfrom tkinter import *\n\nroot = Tk()\nroot.title(\"D582 GUI\")\nroot.geometry(\"640x480\") # 가로 * 세로\n\n# progressbar = ttk.Progressbar(root, maximum=100, mode=\"indeterminate\") # 왔다갔다 하는 것\nprogressbar = ttk.Progressbar(root, maximum=100, mode=\"determinate\")\nprogressbar.start(10) # 10ms 마다 움직임\nprogressbar.pack()\n\ndef btncmd():\n progressbar.stop() # 작동 중지\n\nbtn = Button(root, text=\"중지\", command=btncmd)\nbtn.pack()\n\np_var2 = DoubleVar()\nprogressbar2 = ttk.Progressbar(root, maximum=100, length=150, variable=p_var2) # length : 바의 길이\nprogressbar2.pack()\n\ndef btncmd2():\n for i in range(101):\n time.sleep(0.01) # 0.01초 대기\n \n p_var2.set(i) # progress bar 의 값 설정\n progressbar2.update() # ui 업데이트\n print(p_var2.get())\n\nbtn2 = Button(root, text=\"시작\", command=btncmd2)\nbtn2.pack()\n\nroot.mainloop()","sub_path":"gui_basic/9_progressbar.py","file_name":"9_progressbar.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"586987201","text":"#!/usr/bin/env python\n# coding=utf-8\n\ndef foo(familyname, givename, lastname):\n print('my familyname is ' +str(familyname))\n print('my givename is ' + str(givename))\n print('my lastname is ' + str(lastname))\n\n\n\nif __name__ == '__main__':\n username = ('wang', 'hao')\n # foo(* username)\n\n username2 = {'givename':'hao', 'familyname':'wang', 'lastname':'lee'}\n foo(**username2)\n","sub_path":"python/system/parameter.py","file_name":"parameter.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"69359389","text":"from django.test import TestCase\nfrom modules.accounts.models import User\n\n\nclass UserPessoaFisicaTestCase(TestCase):\n def setUp(self):\n user = User.objects.create(\n nome='Bruce Wayne', \n cpf='066.123.234-99', \n email='bruce.wayne@batman.com',\n password='123123',\n telefone='(86) 3212-1234',\n endereco='Mansão Wayne',\n numero='1233',\n bairro='Cais (Zona Portuária)',\n cidade='Gotham City',\n cep='98.123-123',\n estado='MA'\n )\n user.full_clean()\n\n def test_create_user(self):\n username = '06612323499'\n user = User.objects.get(username=username)\n self.assertEqual(user.username, username)\n ","sub_path":"tests/accounts/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"1107263","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.utils.project import get_project_settings\nfrom freezermealpro.items import Recipe\nsettings = get_project_settings()\n\n\nclass RecipesSpider(scrapy.Spider):\n name = 'recipes'\n allowed_domains = ['members.newleafwellness.biz']\n start_urls = ['https://members.newleafwellness.biz/']\n\n def parse(self, response):\n login_form = response.xpath('//form[@id=\"loginform\"]')\n if login_form:\n return scrapy.FormRequest.from_response(\n response,\n formxpath='//form[@id=\"loginform\"]',\n formdata={'log': settings.get('FMP_USER'),\n 'pwd': settings.get('FMP_PASS')},\n callback=self.parse_recipes,\n dont_filter=True\n )\n else:\n return self.parse_recipes(response)\n\n def parse_recipes(self, response):\n for recipe in response.css('article.recipe'):\n recipe_url = recipe.css(\n 'a.entire-meta-link ::attr(href)').get()\n image_url = recipe.css(\n 'span.post-featured-img img ::attr(src)').get()\n categories = recipe.css('span.meta-category a ::text').getall()\n request = scrapy.Request(recipe_url, callback=self.parse_recipe)\n request.cb_kwargs['recipe_url'] = recipe_url\n request.cb_kwargs['image_url'] = image_url\n request.cb_kwargs['categories'] = categories\n yield request\n\n next_page = response.css('div#pagination a.next ::attr(href)').get()\n if next_page:\n yield scrapy.Request(next_page, callback=self.parse)\n\n def parse_recipe(self, response, recipe_url, image_url, categories):\n content = response.css('article.recipe')\n\n recipe = Recipe()\n recipe['origin_url'] = recipe_url\n recipe['image_url'] = image_url\n recipe['categories'] = categories\n recipe['title'] = content.css('span.wpurp-recipe-title ::text').get()\n recipe['description'] = content.css(\n 'span.wpurp-recipe-description ::text').get()\n\n # recipe['notes'] = content.css(\n # 'div.wpurp-recipe-notes').get()\n\n yield recipe\n","sub_path":"dataprovider/freezermealpro/freezermealpro/spiders/recipes.py","file_name":"recipes.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"507818174","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCrop images\n\n@author: Tom Williams\n\"\"\"\n\nimport numpy as np\nfrom astropy.io import fits\n\n#850 Files, crop to 30 arcmin\n\nfolder = '/home/daedalusdata/c1625914/UsefulCode/gmc_catalogue/to_feather/'\n\nfilenames = ['450-combined']\n\nfor filename in filenames:\n data,header = fits.getdata(folder+filename+'.fits',header=True)\n \n# if len(data.shape) == 3:\n# data = data[0,:,:]\n# \n# if len(data.shape) == 4:\n# data = data[0,0,:,:]\n \n x_centre = data.shape[2]/2\n y_centre = data.shape[3]/2\n \n radius = {'850-combined':225,'450-combined':450,\n 'm33-850':225,'m33-450':450}[filename]\n \n for i in range(data.shape[2]):\n for j in range(data.shape[3]):\n if (i-x_centre)**2+(j-y_centre)**2 > radius**2:\n data[0,0,i,j] = np.nan\n \n fits.writeto(folder+filename+'_crop.fits',data,header,clobber=True)","sub_path":"gmc_catalogue/old/crop.py","file_name":"crop.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"140101692","text":"import sqlite3\n\nclass SQL:\n def __init__(self, sql):\n \"execute?\"\n self.dbpath = sql\n self.conn = sqlite3.connect(sql)\n \n def query(self, query):\n \"SQL info here\"\n curs = self.conn.cursor()\n result - curs.execute(query).fetchall()\n curs.close()\n self.conn.commit\n return result","sub_path":"module1-introduction-to-sql/part1_query.py","file_name":"part1_query.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"294402032","text":"# -*- coding: utf-8 -*-\n\"\"\"\n Created on 2018-07-10 08:13\n \n @Author : CCHEN\n \n @Purpose: Read information from define file, include channels, power and rods\n\"\"\"\n\n\n############################################################################\nclass DefineRelation(object):\n\n def __init__(self, finp):\n self.fi = finp\n\n def maprodchannel(self):\n finp = self.fi\n\n mapasem = []\n maprods = []\n for lines in finp:\n if 'coreasem' in lines:\n for line in finp:\n if 'end' not in line:\n line = line.split()\n mapasem.append(line)\n else:\n break\n elif 'assemrods' in lines:\n for line in finp:\n if 'end' not in line:\n line = line.split()\n maprods.append(line)\n else:\n break\n ###############################################\n # sorting relation for channels and rods\n chlasem = []\n mxasem = len(mapasem)\n myasem = len(mapasem[0])\n for i in range(mxasem):\n chanl = []\n cdict = {}\n for j in range(myasem):\n cdict['numb'] = int(mapasem[i][j])\n if mapasem[i][j] is not '0':\n cdict['xcrd'] = [i]\n cdict['ycrd'] = [j]\n else:\n if 'xcrd' in cdict:\n cdict.pop('xcrd')\n if 'ycrd' in cdict:\n cdict.pop('ycrd')\n chanl.append(cdict.copy())\n chlasem.append(chanl)\n #\n chlrods = []\n mxrods = len(maprods)\n myrods = len(maprods[0])\n for i in range(mxrods + 1):\n chanl = []\n cdict = {}\n for j in range(myrods + 1):\n (cdict['numb'], cdict['xcrd'], cdict['ycrd']) = DefineRelation.checkaround(i, j, maprods)\n chanl.append(cdict.copy())\n chlrods.append(chanl)\n\n return chlasem, chlrods\n\n @staticmethod\n def checkaround(ii, jj, maprods):\n numb = 0\n xcrd = []\n ycrd = []\n mxrod = len(maprods) - 1\n myrod = len(maprods[0]) - 1\n for m in range(2):\n ix = ii - m\n for n in range(2):\n jy = jj - n\n if 0 <= ix <= mxrod and 0 <= jy <= myrod:\n numb = numb + 1\n xcrd.append(ix)\n ycrd.append(jy)\n\n return numb, xcrd, ycrd\n\n\n############################################################################\nclass ReadDistribution(object):\n\n def __init__(self, fichl, fipow):\n self.fichl = fichl\n self.fipow = fipow\n\n def readchannels(self):\n typ = 'int'\n numb = 0\n finp = self.fichl\n for lines in finp:\n if 'Number' in lines:\n for line in finp:\n if 'end' not in line:\n line = int(line)\n distribution = [[] for k in range(line)]\n else:\n break\n elif 'Channels' in lines:\n mapcs = ReadDistribution.readmodule(finp, typ)\n ReadDistribution.rankchannel(mapcs, distribution, 'cs')\n elif 'A1' in lines:\n numb = numb + 1\n mapa1 = ReadDistribution.readmodule(finp, typ)\n ReadDistribution.rankchannel(mapa1, distribution, 'a1')\n elif 'A2' in lines:\n numb = numb + 1\n mapa2 = ReadDistribution.readmodule(finp, typ)\n ReadDistribution.rankchannel(mapa2, distribution, 'a2')\n elif 'A3' in lines:\n numb = numb + 1\n mapa3 = ReadDistribution.readmodule(finp, typ)\n ReadDistribution.rankchannel(mapa3, distribution, 'a3')\n elif 'A4' in lines:\n numb = numb + 1\n mapa4 = ReadDistribution.readmodule(finp, typ)\n ReadDistribution.rankchannel(mapa4, distribution, 'a4')\n #\n return numb, distribution, mapcs\n\n def readradialpower(self):\n typ = 'float'\n finp = self.fipow\n rtemp = mapcs = mapa1 = mapa2 = mapa3 = mapa4 = []\n for lines in finp:\n if 'Power' in lines:\n mapcs = ReadDistribution.readmodule(finp, typ)\n elif 'A1' in lines:\n mapa1 = ReadDistribution.readmodule(finp, typ)\n elif 'A2' in lines:\n mapa2 = ReadDistribution.readmodule(finp, typ)\n elif 'A3' in lines:\n mapa3 = ReadDistribution.readmodule(finp, typ)\n elif 'A4' in lines:\n mapa4 = ReadDistribution.readmodule(finp, typ)\n elif 'Temp' in lines:\n rtemp = ReadDistribution.readmodule(finp, typ)\n #\n return rtemp, mapcs, mapa1, mapa2, mapa3, mapa4\n\n @staticmethod\n def readmodule(finp, typ):\n data = []\n for line in finp:\n if 'end' not in line:\n line = line.split()\n if typ is 'int':\n line = list(map(int, line))\n elif typ is 'float':\n line = list(map(float, line))\n data.append(line)\n else:\n break\n\n return data\n\n @staticmethod\n def rankchannel(mapinp, mapchnl, loc):\n\n for i in range(len(mapinp)):\n for j in range(len(mapinp[0])):\n mloc = mapinp[i][j] - 1\n if mloc >= 0:\n mapchnl[mloc].append([loc, i, j])\n\n\n############################################################################\nclass ReadRadialBoundary(object):\n def __init__(self, finp):\n self.finp = finp\n self.mapbnd = []\n\n def readboundary(self, fcase):\n finp = self.finp\n self.mapbnd = []\n #\n cdict = {}\n for lines in finp:\n if 'Temp' in lines:\n for linem in finp:\n if fcase in linem:\n cdict['Temp'] = ReadDistribution.readmodule(finp, 'int')\n break\n self.mapbnd.append(cdict.copy())\n elif 'Flow' in lines:\n for linem in finp:\n if fcase in linem:\n cdict['Flow'] = ReadDistribution.readmodule(finp, 'int')\n break\n self.mapbnd.append(cdict.copy())\n","sub_path":"MSLBInputCard/DefineRelation.py","file_name":"DefineRelation.py","file_ext":"py","file_size_in_byte":6612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"274584901","text":"\"\"\"Support for Orbit BHyve switch (toggle zone).\"\"\"\nimport datetime\nimport logging\n\nfrom datetime import timedelta\nfrom homeassistant.components.switch import DEVICE_CLASS_SWITCH, SwitchDevice\nfrom homeassistant.util import dt\n\nfrom . import BHyveEntity\nfrom .const import DOMAIN\nfrom .pybhyve.errors import BHyveError\nfrom .util import orbit_time_to_local_time\n\n_LOGGER = logging.getLogger(__name__)\n\nDEFAULT_MANUAL_RUNTIME = timedelta(minutes=10)\n\nPROGRAM_SMART_WATERING = \"e\"\nPROGRAM_MANUAL = \"manual\"\n\nATTR_MANUAL_RUNTIME = \"manual_preset_runtime\"\nATTR_SMART_WATERING_ENABLED = \"smart_watering_enabled\"\nATTR_SPRINKLER_TYPE = \"sprinkler_type\"\nATTR_IMAGE_URL = \"image_url\"\nATTR_STARTED_WATERING_AT = \"started_watering_station_at\"\nATTR_SMART_WATERING_PLAN = \"watering_program\"\n\nATTR_PROGRAM = \"program_{}\"\n\n\nasync def async_setup_platform(hass, config, async_add_entities, _discovery_info=None):\n \"\"\"Set up BHyve binary sensors based on a config entry.\"\"\"\n bhyve = hass.data[DOMAIN]\n\n switches = []\n devices = await bhyve.devices\n programs = await bhyve.timer_programs\n for device in devices:\n if device.get(\"type\") == \"sprinkler_timer\":\n for zone in device.get(\"zones\"):\n name = \"{0} Zone\".format(zone.get(\"name\", \"Unknown\"))\n _LOGGER.info(\"Creating switch: %s\", name)\n switches.append(\n BHyveZoneSwitch(\n hass, bhyve, device, zone, name, programs, \"water-pump\"\n )\n )\n\n async_add_entities(switches, True)\n\n\nclass BHyveZoneSwitch(BHyveEntity, SwitchDevice):\n \"\"\"Define a BHyve switch.\"\"\"\n\n def __init__(self, hass, bhyve, device, zone, name, programs, icon):\n \"\"\"Initialize the switch.\"\"\"\n self._zone = zone\n self._zone_id = zone.get(\"station\")\n self._entity_picture = zone.get(\"image_url\")\n self._manual_preset_runtime = device.get(\n \"manual_preset_runtime_sec\", DEFAULT_MANUAL_RUNTIME.seconds\n )\n self._initial_programs = programs\n\n super().__init__(hass, bhyve, device, name, icon, DEVICE_CLASS_SWITCH)\n\n def _setup(self, device):\n self._state = None\n self._attrs = {}\n self._available = device.get(\"is_connected\", False)\n\n status = device.get(\"status\", {})\n watering_status = status.get(\"watering_status\")\n\n _LOGGER.info(\"{} watering_status: {}\".format(self.name, watering_status))\n\n zones = device.get(\"zones\", [])\n\n zone = None\n for z in zones:\n if z.get(\"station\") == self._zone_id:\n zone = z\n break\n\n if zone is not None:\n is_watering = (\n watering_status is not None\n and watering_status.get(\"current_station\") == self._zone_id\n )\n self._state = is_watering\n self._attrs = {ATTR_MANUAL_RUNTIME: self._manual_preset_runtime}\n\n smart_watering_enabled = zone.get(\"smart_watering_enabled\")\n if smart_watering_enabled is not None:\n self._attrs[ATTR_SMART_WATERING_ENABLED] = smart_watering_enabled\n\n sprinkler_type = zone.get(\"sprinkler_type\")\n if sprinkler_type is not None:\n self._attrs[ATTR_SPRINKLER_TYPE] = sprinkler_type\n\n image_url = zone.get(\"image_url\")\n if image_url is not None:\n self._attrs[ATTR_IMAGE_URL] = image_url\n\n if is_watering:\n started_watering_at = watering_status.get(\"started_watering_station_at\")\n self._set_watering_started(started_watering_at)\n\n if self._initial_programs is not None:\n programs = self._initial_programs\n for program in programs:\n self._set_watering_program(program)\n self._initial_programs = None\n\n def _set_watering_started(self, timestamp):\n if timestamp is not None:\n self._attrs[ATTR_STARTED_WATERING_AT] = orbit_time_to_local_time(timestamp)\n else:\n self._attrs[ATTR_STARTED_WATERING_AT] = None\n\n def _set_watering_program(self, program):\n if program is None:\n return\n\n program_name = program.get(\"name\", \"Unknown\")\n program_id = program.get(\"program\")\n program_enabled = program.get(\"enabled\", False)\n program_attr = ATTR_PROGRAM.format(program_id)\n\n # Filter out any run times which are not for this switch\n active_program_run_times = list(\n filter(\n lambda x: (x.get(\"station\") == self._zone_id),\n program.get(\"run_times\", []),\n )\n )\n\n is_smart_program = program.get(\"is_smart_program\", False)\n\n self._attrs[program_attr] = {\n \"enabled\": program_enabled,\n \"name\": program_name,\n \"is_smart_program\": is_smart_program,\n }\n\n if not program_enabled or not active_program_run_times:\n _LOGGER.info(\n \"Watering program {} ({}) is not enabled, skipping\".format(\n program_name, program_id\n )\n )\n if is_smart_program == True:\n self._attrs[ATTR_SMART_WATERING_PLAN] = None\n\n return\n\n \"\"\"\n \"name\": \"Backyard\",\n \"frequency\": { \"type\": \"days\", \"days\": [1, 4] },\n \"start_times\": [\"07:30\"],\n \"budget\": 100,\n \"program\": \"a\",\n \"run_times\": [{ \"run_time\": 20, \"station\": 1 }],\n \"\"\"\n\n if is_smart_program == True:\n upcoming_run_times = []\n for plan in program.get(\"watering_plan\", []):\n run_times = plan.get(\"run_times\")\n if run_times:\n zone_times = list(\n filter(lambda x: (x.get(\"station\") == self._zone_id), run_times)\n )\n if zone_times:\n plan_date = orbit_time_to_local_time(plan.get(\"date\"))\n for time in plan.get(\"start_times\", []):\n t = dt.parse_time(time)\n upcoming_run_times.append(\n plan_date + timedelta(hours=t.hour, minutes=t.minute)\n )\n self._attrs[ATTR_SMART_WATERING_PLAN] = upcoming_run_times\n else:\n self._attrs[program_attr].update(\n {\n \"start_times\": program.get(\"start_times\", []),\n \"frequency\": program.get(\"frequency\", []),\n \"run_times\": active_program_run_times,\n }\n )\n\n def _on_ws_data(self, data):\n \"\"\"\n {'event': 'change_mode', 'mode': 'auto', 'device_id': 'id', 'timestamp': '2020-01-09T20:30:00.000Z'}\n {'event': 'watering_in_progress_notification', 'program': 'e', 'current_station': 1, 'run_time': 14, 'started_watering_station_at': '2020-01-09T20:29:59.000Z', 'rain_sensor_hold': False, 'device_id': 'id', 'timestamp': '2020-01-09T20:29:59.000Z'}\n {'event': 'device_idle', 'device_id': 'id', 'timestamp': '2020-01-10T12:32:06.000Z'}\n {'event': 'set_manual_preset_runtime', 'device_id': 'id', 'seconds': 480, 'timestamp': '2020-01-18T17:00:35.000Z'}\n {'event': 'program_changed' }\n \"\"\"\n event = data.get(\"event\")\n if event is None:\n _LOGGER.warning(\"No event on ws data {}\".format(data))\n return\n elif event == \"device_idle\" or event == \"watering_complete\":\n self._state = False\n self._set_watering_started(None)\n elif event == \"watering_in_progress_notification\":\n zone = data.get(\"current_station\")\n if zone == self._zone_id:\n self._state = True\n started_watering_at = data.get(\"started_watering_station_at\")\n self._set_watering_started(started_watering_at)\n elif event == \"change_mode\":\n program = data.get(\"program\")\n self._state = program == PROGRAM_SMART_WATERING or program == PROGRAM_MANUAL\n elif event == \"set_manual_preset_runtime\":\n self._manual_preset_runtime = data.get(\"seconds\")\n self._attrs[ATTR_MANUAL_RUNTIME] = self._manual_preset_runtime\n elif event == \"program_changed\":\n watering_program = data.get(\"program\")\n lifecycle_phase = data.get(\"lifecycle_phase\")\n if lifecycle_phase != \"destroy\":\n self._set_watering_program(watering_program)\n else:\n self._attrs[ATTR_SMART_WATERING_PLAN] = None\n\n async def _send_station_message(self, station_payload):\n try:\n now = datetime.datetime.now()\n iso_time = now.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\n payload = {\n \"event\": \"change_mode\",\n \"mode\": \"manual\",\n \"device_id\": self._device_id,\n \"timestamp\": iso_time,\n \"stations\": station_payload,\n }\n _LOGGER.info(\"Starting watering\")\n await self._bhyve.send_message(payload)\n\n except BHyveError as err:\n _LOGGER.warning(\"Failed to send to BHyve websocket message %s\", err)\n raise (err)\n\n @property\n def entity_picture(self):\n return self._entity_picture\n\n @property\n def unique_id(self):\n \"\"\"Return a unique, unchanging string that represents this sensor.\"\"\"\n return f\"{self._mac_address}:{self._device_type}:zone:{self._zone_id}\"\n\n @property\n def is_on(self):\n \"\"\"Return the status of the sensor.\"\"\"\n return self._state is True\n\n async def async_turn_on(self, **kwargs):\n \"\"\"Turn the switch on.\"\"\"\n station_payload = [\n {\"station\": self._zone_id, \"run_time\": self._manual_preset_runtime}\n ]\n self._state = True\n await self._send_station_message(station_payload)\n\n async def async_turn_off(self, **kwargs):\n \"\"\"Turn the switch off.\"\"\"\n station_payload = []\n self._state = False\n await self._send_station_message(station_payload)\n\n","sub_path":"custom_components/bhyve/switch.py","file_name":"switch.py","file_ext":"py","file_size_in_byte":10225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"498616597","text":"import torch\nfrom torch import Tensor\n\nfrom . import dtw\nfrom . import path_dtw2\n\n\nclass DTWShpTime(torch.nn.Module):\n def __init__(self, alpha, gamma):\n \"\"\"\n Batch-DILATE loss function, a batchwise extension of https://github.com/vincent-leguen/DILATE\n\n :param alpha: Weight of shape component of the loss versus the temporal component.\n :type alpha: float\n :param gamma: Weight of softmax component of DTW.\n :type gamma: float\n \"\"\"\n super(DTWShpTime, self).__init__()\n assert 0 <= alpha <= 1\n assert 0 <= gamma <= 1\n self.alpha = alpha\n self.gamma = gamma\n\n def forward(self, input: Tensor, target: Tensor) -> Tensor:\n \"\"\"\n Pass through the loss function with input tensor (the prediction) and the target tensor.\n\n :param input: prediction, shape should be (batch, channels, num_timesteps_outputs).\n :type input: torch.Tensor\n :param target: target with same shape as prediction.\n :type target: torch.Tensor\n :return: total_loss, shape_loss, temporal_loss, with first dimensions being the batch\n :rtype: tuple\n \"\"\"\n assert input.device == target.device\n batch_size, N_channel, N_output = input.shape\n\n D = dtw.pairwise_distances_with_channels_and_batches(\n target[:, :, :].reshape(batch_size * N_channel, N_output, 1).double(),\n input[:, :, :].reshape(batch_size * N_channel, N_output, 1).double()\n )\n\n D = D.reshape(batch_size, N_channel, N_output, N_output)\n\n softdtw_batch = dtw.SoftDTWBatch.apply\n loss_shape = softdtw_batch(D, self.gamma)\n\n path_dtw = path_dtw2.PathDTWBatch2.apply\n path = path_dtw(D, self.gamma)\n\n Omega = dtw.pairwise_distances(torch.arange(1, N_output + 1).view(N_output, 1)).to(target.device)\n\n Omega = Omega.repeat(N_channel, 1, 1)\n loss_temporal = torch.sum(path * Omega, dim=(1, 2)) / (N_output * N_output)\n\n loss = self.alpha * loss_shape + (1 - self.alpha) * loss_temporal\n loss = loss.mean()\n\n return loss, loss_shape, loss_temporal\n","sub_path":"batchdilate/dilate_loss_clean.py","file_name":"dilate_loss_clean.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"27345268","text":"from flask import render_template, redirect, request\r\nfrom app import app, models, db\r\nfrom .forms import CustomerForm, OrderForm\r\n\r\n@app.route('/')\r\ndef index():\r\n return redirect('/create_customer')\r\n\r\n@app.route('/create_customer', methods=['GET', 'POST'])\r\ndef create_customer():\r\n customerForm = CustomerForm()\r\n if customerForm.validate_on_submit():\r\n customer = models.Customer(\r\n first_name = customerForm.first_name.data,\r\n last_name = customerForm.last_name.data,\r\n company = customerForm.company.data,\r\n email = customerForm.email.data,\r\n phone = customerForm.phone.data)\r\n # you will need to add Address here\r\n address = models.Address(\r\n street_name = customerForm.street_name.data,\r\n city = customerForm.city.data,\r\n state = customerForm.state.data,\r\n country = customerForm.country.data,\r\n zip_code = customerForm.zip_code.data,\r\n customer = customer)\r\n db.session.add(customer)\r\n db.session.add(address)\r\n db.session.commit()\r\n return redirect('/customers')\r\n return render_template('customer.html', form=customerForm)\r\n\r\n@app.route('/customers')\r\ndef display_customer():\r\n customers = models.Customer.query.all()\r\n orders = models.Order.query.all()\r\n #orderCustomer = models.orders.query.all()\r\n return render_template('home.html', customers=customers, orders=orders)\r\n\r\n@app.route('/create_order', methods=['GET', 'POST'])\r\ndef create_order():\r\n orderForm = OrderForm()\r\n if orderForm.validate_on_submit():\r\n order = models.Order(\r\n num_parts_ordered = orderForm.num_parts_ordered.data,\r\n total_spent = orderForm.total_spent.data)\r\n customerids = orderForm.customer_id.data.split(',')\r\n\r\n for customer_id in customerids:\r\n customer = models.Customer.query.filter_by(id=customer_id).first()\r\n customer.orders.append(order)\r\n db.session.add(order)\r\n db.session.commit()\r\n return redirect('/customers')\r\n return render_template('orderform.html', form=orderForm)\r\n","sub_path":"flask/orm-tutorial/models-tutorial/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"580450815","text":"#!/usr/bin/env python\nimport bottle\nimport subprocess\nimport os\nimport classifier\nimport settings\nimport utils\n\nfrom custom import MissingFile\n\np1 = subprocess.Popen(['ip','addr','show','eth0'],stdout=subprocess.PIPE)\np2 = subprocess.Popen(['sed','-rn',r's/\\s*inet\\s(([0-9]{1,3}\\.){3}[0-9]{1,3}).*/\\1/p'],stdin=p1.stdout,stdout=subprocess.PIPE)\np1.stdout.close()\nip_addr = p2.communicate()[0].strip()\np1.wait()\n\napp = bottle.app()\n\n@app.hook('after_request')\ndef handle_cors():\n \"\"\"\n Let there be no cors at all ;)\n \"\"\"\n bottle.response.headers['Access-Control-Allow-Origin'] = '*'\n bottle.response.headers['Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'\n bottle.response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'\n\n@bottle.route('/')\ndef index():\n return {'status': 'ok'}\n\n@bottle.route('/classifications', method=['OPTIONS', 'POST'])\ndef classify():\n if bottle.request.method == 'OPTIONS':\n return {}\n try:\n img = utils.save_image(bottle.request)\n return classifier.classify(settings.UPLOADS + img.filename)\n except MissingFile:\n return {'error': 'missing image file'}\n\n@bottle.route('/urls', method=['OPTIONS', 'POST'])\ndef classify_url():\n if bottle.request.method == 'OPTIONS':\n return {}\n filename= utils.save_image_from_url(bottle.request)\n return classifier.classify(settings.UPLOADS + filename)\n\n\nif __name__=='__main__':\n bottle.debug(True)\n bottle.run(app=app, host='localhost', port=80)\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"525594172","text":"import cv2\nimport numpy as np\n\n\n#カメラを選ぶ\ncap = cv2.VideoCapture(1)\n\nwhile(1):\n #カメラから画像データを読み込む\n ret, camera = cap.read()\n\n #読み込んたかどうか判断\n if ret == True:\n #median filter、ノイズ除去\n camera = cv2.medianBlur(camera, 5)\n #RGB 2 GRAY\n cameraGray = cv2.cvtColor(camera, cv2.COLOR_RGB2GRAY)\n\n #丸検出\n circles = cv2.HoughCircles(cameraGray, cv2.HOUGH_GRADIENT,4,100,\n param1=150,param2=150,minRadius=38,maxRadius=80)\n\n #丸検出したかどうかを判断\n if circles is not None:\n #画像に丸を描く\n circles = np.uint16(np.around(circles))\n for i in circles[0,:]:\n # draw the outer circle\n cv2.circle(camera,(i[0],i[1]),i[2],(0,255,0),2)\n # draw the center of the circle\n cv2.circle(camera,(i[0],i[1]),2,(0,0,255),3)\n cv2.imshow('capture',camera)\n\n #'q'を押したら、プログラム終了\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"TestCode/real-timeCircleCheck.py","file_name":"real-timeCircleCheck.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"219014311","text":"from selenium import webdriver\r\nfrom bs4 import BeautifulSoup\r\nfrom subprocess import call\r\nfrom threading import Thread\r\nimport requests\r\nimport os, errno\r\nimport json\r\n\r\n\r\nclass Catcher:\r\n def __init__(self):\r\n self.broswer_path = None\r\n self.driver = None\r\n\r\n def create_download_dir(self, path):\r\n try:\r\n os.makedirs(path)\r\n except OSError as e:\r\n if e.errno != errno.EEXIST:\r\n raise\r\n return True\r\n\r\n def set_browser_path(self, path):\r\n self.broswer_path = str(path)\r\n self.driver = webdriver.PhantomJS(executable_path=str(path))\r\n return True\r\n\r\n def set_target_url(self, url):\r\n self.target_url = str(url)\r\n return True\r\n\r\n def parse(self, target_url):\r\n try:\r\n self.driver.get(target_url)\r\n soup = BeautifulSoup(self.driver.page_source, 'html.parser')\r\n except:\r\n print('Fail to parse '+target_url)\r\n return False\r\n return soup\r\n\r\n def get_all_episodes(self, soup):\r\n ep_list = []\r\n tmp = soup.find_all(id='info')\r\n tag_a_list = tmp[1].find_all('a')\r\n for i in range(len(tag_a_list)):\r\n ep_list.append('http://www.cartoonmad.com'+tag_a_list[i]['href'])\r\n return ep_list\r\n\r\n def parse_single_episode(self, ep_link):\r\n return self.parse(ep_link)\r\n\r\n def get_episodes_pics(self, soup):\r\n return soup.select('img[src*=\"cartoonmad\"]')[0]['src']\r\n\r\n def get_episode_links(self, soup):\r\n return 'http://www.cartoonmad.com/comic/'+soup.select('img[src*=\"cartoonmad\"]')[0].parent['href']\r\n\r\n def has_next_page(self, soup):\r\n next_page_link = soup.select('img[src*=\"cartoonmad\"]')[0].parent['href']\r\n if next_page_link == 'thend.asp':\r\n return False\r\n else:\r\n return True\r\n\r\n def download_episode_pics(self, download_dir, episode_num, pic_links):\r\n download_dir += str(episode_num)+'/'\r\n self.create_download_dir(download_dir)\r\n pic_list = []\r\n print('Downloading episode '+str(episode_num))\r\n for i in range(len(pic_links)):\r\n file_name = download_dir+pic_links[i].split('/')[-2]+'-'+pic_links[i].split('/')[-1]\r\n r = requests.get(pic_links[i])\r\n pic_list.append(file_name)\r\n with open(file_name, 'wb') as outfile:\r\n outfile.write(r.content)\r\n self.build_json(pic_list, download_dir)\r\n print('Downloading episode '+str(episode_num)+' finished')\r\n\r\n def load_current_episode_num(self, download_dir):\r\n try:\r\n count_file = open(download_dir+'current_episode.txt','r')\r\n except:\r\n self.save_current_episode_num(0, download_dir)\r\n return 0\r\n current_episode_num = int(count_file.readline())\r\n count_file.close()\r\n return current_episode_num\r\n\r\n def save_current_episode_num(self, current_episode_num, download_dir):\r\n count_file = open(download_dir+'current_episode.txt','w')\r\n count_file.write(str(current_episode_num)+'\\n')\r\n count_file.close()\r\n return True\r\n\r\n def build_json(self, pic_list, download_dir):\r\n json_file = open(download_dir+'pic_list.json', 'w')\r\n json_file.write(json.dumps(pic_list))\r\n json_file.close()\r\n\r\n def build_comics_json(self, comics_list):\r\n json_file = open('F:/漫畫/漫畫/comics_list.json', 'w')\r\n json_file.write(json.dumps(comics_list))\r\n json_file.close()\r\n\r\n def read_comic_list(self):\r\n comics_list = []\r\n try:\r\n json_data=open('F:/漫畫/漫畫/comics_list.json').read()\r\n except:\r\n self.build_comics_json(comics_list)\r\n return comics_list\r\n comics_list = json.loads(json_data)\r\n return comics_list\r\n\r\ntest = Catcher()\r\ndownload_dir = 'F:/漫畫/漫畫/'\r\nurl = input('Input comic url on \"www.cartoonmad.com\" : ')\r\ncomic_name = input('Input comic name (this will create folder for download) : ')\r\ndownload_dir += comic_name+'/'\r\n\r\n#get comics list from json\r\ncomics_list = test.read_comic_list()\r\ncomics_list.sort()\r\n\r\nif comic_name not in comics_list:\r\n test.create_download_dir(download_dir)\r\n comics_list.append(comic_name)\r\ntest.set_browser_path('F:/projects/python_catch/phantomjs-2.1.1-windows/phantomjs-2.1.1-windows/bin/phantomjs')\r\n\r\n#update comics list\r\ntest.build_comics_json(comics_list)\r\n\r\n#get all episodes\r\nsoup = test.parse(url)\r\nepisodes = test.get_all_episodes(soup)\r\n\r\n#load current episode number\r\ncurrent_episode_num = test.load_current_episode_num(download_dir)\r\n\r\nfor i in range(current_episode_num, len(episodes), 1):\r\n test.save_current_episode_num(i+1, download_dir)\r\n pic_links = []\r\n episode_soup = test.parse_single_episode(episodes[i])\r\n count = 1\r\n while True:\r\n print('Processing episode '+ str(i+1) +' picture '+ str(count))\r\n picture_link = test.get_episodes_pics(episode_soup)\r\n pic_links.append(picture_link)\r\n next_page_link = test.get_episode_links(episode_soup)\r\n if test.has_next_page(episode_soup) == False:\r\n break\r\n episode_soup = test.parse(next_page_link)\r\n count += 1\r\n\r\n t = Thread(target=test.download_episode_pics, args=(download_dir, str(i+1), pic_links,))\r\n t.start()\r\n","sub_path":"catcher.py","file_name":"catcher.py","file_ext":"py","file_size_in_byte":5390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"70054384","text":"from k3 import *\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\n\nclass Fire(nn.Module):\n def __init__(\n self,\n inplanes,\n squeeze_planes,\n expand1x1_planes,\n expand3x3_planes,\n name='',\n A={}\n ):\n super(Fire, self).__init__()\n self.A = A\n self.name = name\n self.inplanes = inplanes\n self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)\n self.squeeze_activation = nn.ReLU(inplace=True)\n self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes,\n kernel_size=1)\n self.expand1x1_activation = nn.ReLU(inplace=True)\n self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes,\n kernel_size=3, padding=1)\n self.expand3x3_activation = nn.ReLU(inplace=True)\n\n def forward(self, x):\n x = self.squeeze_activation(self.squeeze(x))\n return torch.cat([\n self.expand1x1_activation(self.expand1x1(x)),\n self.expand3x3_activation(self.expand3x3(x))\n ], 1)\n\n\na = 3\nb = 16\nc = 32\nd = 32\ne = 2*d\n\"\"\"\na = 3\nb = 5\nc = 8\nd = 12\ne = 2*d\n\"\"\"\nshapes_have_been_printed = False\n\nclass SqueezeNet(nn.Module):\n def __init__(self,net_name):\n super().__init__()\n self.net_name = net_name\n self.a = nn.Conv2d(a, d, kernel_size=3, stride=2)\n self.b = nn.ReLU(inplace=True)\n self.c = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)\n self.d = Fire(d, b, d, d) \n self.e = Fire(d+d, b, d, d)\n self.f = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)\n self.g = Fire(e, c, e, e)\n self.i = Fire(e+e, c, e, e)\n self.j = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)\n self.k = nn.Upsample((32,32),mode='nearest')\n self.k2 = nn.Upsample((64,64),mode='nearest')\n self.k3 = nn.Upsample((140,140),mode='nearest')\n self.l = nn.Conv2d(\n in_channels=d+d,\n out_channels=a,\n padding=1,\n kernel_size=3)\n self.l2 = nn.Conv2d(\n in_channels=e+e,\n out_channels=a,\n padding=1,\n kernel_size=3)\n\n self.o = nn.AvgPool2d(2, stride=2)\n\n def ps(self,x,n=''):\n if shapes_have_been_printed:\n return\n print(self.net_name,n,shape(x))\n\n def forward(self,x,print_shape=False):\n \n x = self.k3(x); self.ps(x,'k3')\n\n x = self.a(x); self.ps(x,'a')\n \n x = self.b(x); self.ps(x,'b')\n \n x = self.c(x); self.ps(x,'c')\n \n x = self.d(x); self.ps(x,'d')\n \n x = self.e(x); self.ps(x,'e')\n\n #x = self.f(x); self.ps(x,'f')\n\n #x = self.g(x); self.ps(x,'g')\n\n #x = self.i(x); self.ps(x,'i')\n\n #x = self.j(x); self.ps(x,'j')\n\n x = self.k3(x); self.ps(x,'k')\n \n x = self.l(x); self.ps(x,'l')\n \n return x\n\n\n\n\nclass Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.aa = SqueezeNet('aa')\n self.bb = SqueezeNet('bb')\n self.cc = SqueezeNet('cc')\n self.dd = SqueezeNet('dd')\n self.A = {}\n\n def forward(self,x):\n\n self.A[0] = torch.clone(x)\n \n x = self.aa(x); self.A[1] = torch.clone(x)\n\n x = self.bb(x); self.A[2] = torch.clone(x)\n \n x = self.cc(x); self.A[3] = torch.clone(x)\n\n x = self.dd(x); self.A[4] = torch.clone(x)\n\n return torch.flatten(x, 1)\n\n\n\ndef from_1024_to_32x32(t):\n a = t.cpu().numpy()\n return a.reshape(32,32)\n\n\ndef get_batch(indicies,seen_indicies,bs,Inputs,Targets):\n assert len(indicies) >= bs\n b_input,b_output = [],[]\n for i in range(bs):\n j = indicies.pop()\n seen_indicies.append(j)\n b = []\n for l in [0,0,0]:\n a = Inputs[j+l][30:170,30:170,:]\n a = np.sum(a,axis=2)\n a[a>1] = 255\n b.append(a)\n b = na(b)\n b_input.append( b ) \n b_output.append( Targets[j][30:170,30:170,:].transpose(2,0,1) )\n\n b_input = na(b_input)\n\n b_output = na(b_output)\n\n return b_input, b_output\n\n\ndef cuda_to_rgb_image(cu):\n return z55(cu.detach().cpu().numpy()[0,:].transpose(1,2,0))\n\n\n\n#loss_list = []\n#running_loss = 0.0\n#ctr = 0\n#loss_ctr = 0\n\nsave_timer = Timer(600)\nshow_timer = Timer(5)\nftimer = Timer(10)\n#indicies = []\n#seen_indicies = []\n# epoch = 0\nbatch_size = 16\nPATH = opjD('segnet0.0.pth')\nload_net = False\n\n\nnet = Net()\nif load_net:\n net.load_state_dict(torch.load(PATH))\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nnet.to(device)\ncriterion = nn.MSELoss()\noptimizer = optim.Adam(net.parameters(), lr=0.001)\n\n\nMeta = {\n 'train':{\n 'ctr':0,\n 'epoch':0,\n 'running_loss':0,\n 'loss_ctr':0,\n 'loss_list_x':[],\n 'loss_list_y':[],\n 'indicies':[],\n 'loss_color':'b',\n },\n 'val':{\n 'ctr':0,\n 'epoch':0,\n 'running_loss':0,\n 'loss_ctr':0,\n 'loss_list_x':[],\n 'loss_list_y':[],\n 'indicies':[],\n 'loss_color':'r',\n },\n}\nIndicies = {\n 'train':[],\n 'val':[],\n}\nSeen_indicies = {\n 'train':[],\n 'val':[],\n}\nmodes = ['train','val']\n\n_Data = h5r(opjD('data_with_flip.h5py'))\nInputs = np.concatenate((_Data['img'],_Data['img_flip']),axis=0)\nTargets = np.concatenate((_Data['seg'],_Data['seg_flip']),axis=0)\n\"\"\"\nif len(sggo(opjD('_indicies.pkl'))) > 0:\n cy('loading _indicies . . .',r=True)\n _indicies = loD('_indicies.pkl')\nelse:\n cE('creating _indicies . . .',r=True)\n _indicies = list(range(len(Targets)))\n \n np.random.shuffle(_indicies)\n cE('saving _indicies . . .',r=True)\n soD('_indicies.pkl',_indicies)\n_i = int(0.1*len(_indicies))\n\"\"\"\n_indicies = list(range(len(Targets)))\n\nl = len(_indicies)\nMeta['val']['indicies'] = _indicies[:l//10]\nMeta['train']['indicies'] = _indicies[l//10:]\ncg('len(_indicies) =',len(_indicies))\ncg(\"Meta['val']['indicies'] =\",len(Meta['val']['indicies']))\ncg(\"Meta['train']['indicies'] =\",len(Meta['train']['indicies']))\n\nctr0 = 0\n\nwhile True:\n\n for mode in modes:\n #cm(mode,len(Indicies[mode]) < batch_size)\n if len(Indicies[mode]) < batch_size:\n #cm(mode,r=True)\n Indicies[mode] = Meta[mode]['indicies'].copy()\n np.random.shuffle(Indicies[mode])\n Meta[mode]['epoch'] += 1\n #cm(mode,len(Indicies[mode]))\n if Meta[mode]['ctr'] > 0:\n shapes_have_been_printed = True\n\n Meta[mode]['ctr'] += 1\n if ftimer.rcheck():\n print('Hz',dp(batch_size*Meta[mode]['ctr']/ftimer.time_s))\n ctr = 0\n\n b_in,b_out = get_batch(\n Indicies[mode],Seen_indicies[mode],16,Inputs,Targets)\n\n inputs = torch.from_numpy(b_in).float()\n inputs = inputs.to(device)\n\n targets = torch.from_numpy(b_out).float()\n targets = targets.to(device)\n\n optimizer.zero_grad()\n\n outputs = net(inputs)\n\n loss = criterion(outputs,torch.flatten(targets,1))\n\n if mode == 'train':\n loss.backward()\n optimizer.step()\n\n Meta[mode]['running_loss'] += loss.item()\n Meta[mode]['loss_ctr'] += 1\n\n if save_timer.rcheck():\n\n torch.save(net.state_dict(), opjD(d2p('segnet1',int(time.time()),'pth')))\n\n if Meta[mode]['loss_ctr'] >= 500:\n Meta[mode]['loss_ctr'] = 0\n Meta[mode]['loss_list_y'].append(Meta[mode]['running_loss'])\n Meta[mode]['loss_list_x'].append(ctr0)\n ctr0 += 1\n Meta[mode]['running_loss'] = 0.0\n print('[%d, %5d] loss: %.6f' %\n (Meta[mode]['epoch'] + 1, ctr + 1, Meta[mode]['loss_list_y'][-1]))\n figure('loss',figsize=(2,2));clf()\n for _mode in modes:\n plot(Meta[_mode]['loss_list_x'],\n Meta[_mode]['loss_list_y'],\n Meta[_mode]['loss_color']);spause()\n figure('seen_indicies '+mode)\n clf()\n hist(Seen_indicies[mode])\n spause()\n\n if show_timer.rcheck():\n images = []\n for k in [0,1,2,3,4]:\n try:\n images.append(cuda_to_rgb_image(net.A[k]))\n except:\n pass\n images.append(cuda_to_rgb_image(targets))\n mi(np.concatenate(images,axis=1),'images '+mode)\n spause()\n\n\n\n\n\n\nprint('Finished Training')\n\n\n\n\nif False:\n torch.save(net.state_dict(), PATH)\n net = Net()\n \n\n\n#EOF\n","sub_path":"V/SegNet/_older/segnet1-cuda.py","file_name":"segnet1-cuda.py","file_ext":"py","file_size_in_byte":8758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"305577255","text":"#!/usr/bin/env python\n#\n# This program shows how to use MPI_Gatherv. Each processor sends a\n# different amount of data to the root processor. We use MPI_Gather\n# first to tell the root how much data is going to be sent.\n#\n# numpy is required\nimport numpy\nfrom numpy import *\n\n# mpi4py module\nfrom mpi4py import MPI\nimport sys\n\n\n# Initialize MPI and print out hello\ncomm=MPI.COMM_WORLD\nmyid=comm.Get_rank()\nnumprocs=comm.Get_size()\nprint(\"hello from \",myid,\" of \",numprocs)\n\n\nmpi_root=0\n\n# Three step process for setting up the call to Gatherv\n# Each processor sends different amounts of data in\n# the Gatherv\n# Step 1\n# Here we set up the amount of data each will send.\nmysize=2*myid+2\nmyray=zeros(mysize,\"i\")\nfor i in range(0, mysize):\n\tmyray[i]=myid+1\nprint(myid,myray)\n\n\n# Step 2\n# Send the different numbers of values from each processor to the\n# root\n# mysize contains the number of values we will send in the gatherv\ncounts=comm.gather(mysize,root=mpi_root)\n# counts will only be defined on the root, None elsewhere\nprint(myid,counts)\n\n\nif myid == mpi_root :\n displacements=zeros(numprocs,\"i\")\nelse :\n displacements=zeros(0,\"i\")\n\n# Step 3\n# We set up the displacement array. This says where in our\n# final array each processor will put its data. For the \n# normal simple case this is just a running total of the \n# counts array\n\nif myid == mpi_root:\n displacements[0]=0\n for i in range(1, numprocs):\n displacements[i]=counts[i-1]+displacements[i-1]\n size=0\n for i in range(0, numprocs):\n size=size+counts[i]\n#myquit(\"almost\")\nallray=empty(sum(counts),\"i\")\n\n# Here we need to includ the counts and displacements array \ncomm.Gatherv(sendbuf=[myray, MPI.INT], recvbuf=[allray, (counts,displacements), MPI.INT], root=mpi_root) \n\n\t \nif myid == mpi_root:\n print(\"allrray= \",allray)\n\nMPI.Finalize()\n\n","sub_path":"array/bot/others/P_ex08.py","file_name":"P_ex08.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"646861821","text":"class Solution:\r\n def isOneBitCharacter(self, bits: List[int]) -> bool:\r\n #找规律题\r\n #倒数第三位如果是0肯定为False,如果是1待定,继续往下分析 copy\r\n bits_reverse = bits[::-1]\r\n bits_reverse.append(0)\r\n id1 = [i for i,x in enumerate(bits_reverse) if x==0] #只获取0\r\n if id1[1] % 2 == 0:\r\n return False\r\n else:\r\n return True\r\n\r\n\r\n\r\n#lose不谨慎\r\n # if len(bits)==1:return True\r\n # elif bits[-2:]==[0,0]:return True\r\n # elif len(bits)<4 and bits[-3:]==[1,1,0]:\r\n # return True\r\n # elif len(bits)>=4 and bits[-4:]==[0,1,1,0]:\r\n # return True\r\n # return False\r\n","sub_path":"leetcode_solution/leetcode类别/1数组10.23-27.29/简单/717. 1比特与2比特字符.py","file_name":"717. 1比特与2比特字符.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"92464555","text":"from torchvision.datasets import MNIST\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\nfrom backbone.MNISTMLP_our import MNISTMLP_our\nimport torch.nn.functional as F\nfrom utils.conf import base_path\nfrom PIL import Image\nfrom datasets.utils.validation import get_train_val\nfrom datasets.utils.continual_dataset import ContinualDataset, store_masked_loaders_minist, get_previous_gan_loader\nfrom typing import Tuple\nfrom datasets.transforms.denormalization import DeNormalize_one\nimport numpy as np\nimport torchvision.transforms.functional as transofrms_f\n\n\nclass MyMNIST(MNIST):\n \"\"\"\n Overrides the MNIST dataset to change the getitem function.\n \"\"\"\n def __init__(self, root, train=True, transform=None,\n target_transform=None, download=False) -> None:\n self.not_aug_transform = transforms.ToTensor()\n super(MyMNIST, self).__init__(root, train,\n transform, target_transform, download)\n\n def __getitem__(self, index: int) -> Tuple[type(Image), int, type(Image)]:\n \"\"\"\n Gets the requested element from the dataset.\n :param index: index of the element to be returned\n :returns: tuple: (image, target) where target is index of the target class.\n \"\"\"\n img, target = self.data[index], self.targets[index]\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(img.numpy(), mode='L')\n original_img = self.not_aug_transform(img.copy())\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n if hasattr(self, 'logits'):\n return img, target, original_img, self.logits[index]\n\n return img, target, original_img\n\n\nclass SequentialMNIST(ContinualDataset):\n\n N_CLASSES_PER_TASK = 2\n N_TASKS = 5\n TRANSFORM = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])\n\n def get_data_loaders_gan(self, gan_batch_size):\n train_dataset = MyMNIST(base_path() + 'MNIST', train=True,\n download=True, transform=None)\n\n return get_previous_gan_loader(train_dataset, gan_batch_size, self)\n\n def get_data_loaders(self):\n transform = self.TRANSFORM\n train_dataset = MyMNIST(base_path() + 'MNIST',\n train=True, download=True, transform=transform)\n if self.args.validation:\n train_dataset, test_dataset = get_train_val(train_dataset,\n transform, self.NAME)\n else:\n test_dataset = MNIST(base_path() + 'MNIST',\n train=False, download=True, transform=transform)\n\n train, test = store_masked_loaders_minist(train_dataset, test_dataset, self)\n return train, test\n\n def not_aug_dataloader(self, batch_size):\n transform = self.TRANSFORM\n # transform = transforms.ToTensor()\n train_dataset = MyMNIST(base_path() + 'MNIST',\n train=True, download=True, transform=transform)\n train_mask = np.logical_and(np.array(train_dataset.targets) >= self.i -\n self.N_CLASSES_PER_TASK, np.array(train_dataset.targets) < self.i)\n\n train_dataset.data = train_dataset.data[train_mask]\n train_dataset.targets = np.array(train_dataset.targets)[train_mask]\n\n train_loader = DataLoader(train_dataset,\n batch_size=batch_size, shuffle=True)\n return train_loader\n\n @staticmethod\n def get_backbone():\n return MNISTMLP_our(28 * 28, SequentialMNIST.N_TASKS\n * SequentialMNIST.N_CLASSES_PER_TASK * 50)\n\n @staticmethod\n def get_transform_mnist():\n transform = transforms.Compose([transforms.ToPILImage(), transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])\n return transform\n\n @staticmethod\n def get_rotation_transform():\n transform = transforms.Compose([transforms.ToPILImage(), Rotation(), transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])\n return transform\n\n @staticmethod\n def get_loss():\n return F.cross_entropy\n\n @staticmethod\n def get_normalization_transform():\n transform = None\n transform = transforms.Normalize((0.5,), (0.5,))\n return transform\n\n @staticmethod\n def get_denormalization_transform():\n transform = None\n transform = DeNormalize_one(0.5, 0.5)\n # return lambda x: x\n return transform\n\n @staticmethod\n def get_test_transform():\n test_transform = transforms.Compose(\n [transforms.ToPILImage(),\n transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,))])\n return test_transform\n\n\nclass Rotation(object):\n \"\"\"\n Defines a fixed rotation for a numpy array.\n \"\"\"\n\n def __init__(self, deg_min: int = -10, deg_max: int = 10) -> None: # 设为0,结果就不会再随机了\n \"\"\"\n Initializes the rotation with a random angle.\n :param deg_min: lower extreme of the possible random angle\n :param deg_max: upper extreme of the possible random angle\n \"\"\"\n self.deg_min = deg_min\n self.deg_max = deg_max\n self.degrees = np.random.uniform(self.deg_min, self.deg_max)\n\n def __call__(self, x: np.ndarray) -> np.ndarray:\n \"\"\"\n Applies the rotation.\n :param x: image to be rotated\n :return: rotated image\n \"\"\"\n return transofrms_f.rotate(x, self.degrees)\n","sub_path":"datasets/seq_mnist.py","file_name":"seq_mnist.py","file_ext":"py","file_size_in_byte":5727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"92562269","text":"#!/usr/bin/env python\n\"\"\"Trigger class defines condition for a Reaction to trigger.\"\"\"\n\nimport re\nfrom random import randrange\n\nimport discord\n\nfrom botly.knowledge import Knowledge\n\n\nclass Trigger:\n \"\"\"Defienes and check for the conditions.\"\"\"\n\n def __init__(self, eventName):\n self.eventName = eventName\n self.conditions = []\n self.advConditions = []\n self.requireMention = False\n self.triggerChance = 100\n\n\n def add_condition(self, variable, pattern):\n \"\"\"Add a simple condition where variable should match the pattern.\n \n The possible variable names are 'message' and 'author'.\n The pattern is a Regular Expression pattern that should respect\n the standard regex format. More information on this in the Python\n documentation.\n \"\"\"\n assert self._is_pattern_valid(pattern), \\\n 'Given regular expression pattern is invalid.'\n \n if 'message' == variable:\n assert 'message' in self.eventName, \\\n 'message variable not expected for this event.'\n elif 'author' == variable:\n assert eventName != 'on_ready', \\\n 'author not supported for on_ready event.'\n \n # TODO: add possibility to react on reactions with regular conditions\n # (for now, can only trigger on that based on adv_condition)\n\n condition = []\n condition.append(variable)\n condition.append(pattern)\n self.conditions.append(condition)\n \n def add_adv_condition(self, callback):\n \"\"\"Add a condition based on a callback variable.\n\n The function will be called upon trigger verification.\n It should return True if the condition is respected or False if not.\n The event info table will be passed as an argument\n \"\"\"\n assert callable(callback), 'Given argument must be a callback function'\n self.advConditions.append(callback)\n\n def set_trigger_chance(self, percent):\n \"\"\"Adds a chance for the trigger to activate. 100% is default value.\"\"\"\n \n assert isinstance(percent, int), 'Int expected for trigger chance.'\n percent = percent if percent >= 1 else 1\n percent = percent if percent <= 100 else 100\n self.triggerChance = percent\n\n def require_mention(self, value):\n \"\"\"Defines whether or not tihs bot has to be mentioned for trigger.\"\"\"\n assert isinstance(value, bool), 'Bool expected for require mention.'\n self.requireMention = value\n\n def is_triggered(self, bot, **eventInfo):\n \"\"\"This should only be called from Bot class.\n\n Checks whether or not the trigger object activates based on the\n given information. This is meant to be called from the Botly class\n that processes events and check for triggers.\n \"\"\"\n # Run random. Do we have a chance to trigger?\n if not self._can_we_trigger():\n return False\n \n # Checks if bot is mentioned if it is required:\n if self.requireMention and 'message' in self.eventName:\n if not bot.me.mentioned_in(eventInfo['message']):\n return False\n \n # Check for conditions:\n for condition in self.conditions:\n if not self._is_condition_true(condition, **eventInfo):\n return False\n\n # Check advanced conditions:\n for condition in self.advConditions:\n if not condition(**eventInfo):\n return False\n\n return True\n\n def _can_we_trigger(self):\n if self.triggerChance == 100:\n return True\n return randrange(1, 101) < self.triggerChance\n\n def _is_condition_true(self, condition, **eventInfo):\n variable = condition[0]\n pattern = condition[1]\n\n if variable == 'author':\n if 'message' in self.eventName:\n if re.match(pattern, eventInfo['message'].author.id):\n return True\n if 'on_typing' == self.eventName:\n if re.match(pattern, eventInfo['user'].id):\n return True\n elif variable == 'message':\n if 'message' in self.eventName:\n if re.match(pattern, eventInfo['message'].content):\n return True\n \n def _is_pattern_valid(self, pattern):\n try:\n re.compile(pattern)\n return True\n except re.error:\n return False\n\n\n","sub_path":"botly/trigger.py","file_name":"trigger.py","file_ext":"py","file_size_in_byte":4511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"476652525","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport os\nimport six\nimport sys\nimport unittest\n\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), \"..\"))\n\nimport pfp\nimport pfp.fields\nimport pfp.interp\nimport pfp.utils\n\nimport utils\n\nclass TestCompat(unittest.TestCase, utils.UtilsMixin):\n\tdef setUp(self):\n\t\tself._start_endian = pfp.fields.NumberBase.endian\n\t\n\tdef tearDown(self):\n\t\tpfp.fields.NumberBase.endian = self._start_endian\n\t\n\tdef test_big_endian(self):\n\t\t# just something different so that we know it changed\n\t\tpfp.fields.NumberBase.endian = pfp.fields.LITTLE_ENDIAN\n\t\tdom = self._test_parse_build(\n\t\t\t\"\",\n\t\t\t\"\"\"\n\t\t\t\tBigEndian();\n\t\t\t\"\"\"\n\t\t)\n\t\tself.assertEqual(pfp.fields.NumberBase.endian, pfp.fields.BIG_ENDIAN)\n\n\tdef test_little_endian(self):\n\t\t# just something different so that we know it changed\n\t\tpfp.fields.NumberBase.endian = pfp.fields.BIG_ENDIAN\n\t\tdom = self._test_parse_build(\n\t\t\t\"\",\n\t\t\t\"\"\"\n\t\t\t\tLittleEndian();\n\t\t\t\"\"\"\n\t\t)\n\t\tself.assertEqual(pfp.fields.NumberBase.endian, pfp.fields.LITTLE_ENDIAN)\n\t\n\tdef test_file_size(self):\n\t\tinput_ = six.StringIO(\"ABCDE\")\n\t\toutput_ = six.StringIO()\n\t\tsys.stdout = output_\n\t\tdom = pfp.parse(\n\t\t\tinput_,\n\t\t\t\"\"\"\n\t\t\tPrintf(\"%d\", FileSize());\n\t\t\t\"\"\",\n\t\t)\n\t\tsys.stdout = sys.__stdout__\n\n\t\tself.assertEqual(output_.getvalue(), \"5\")\n\nclass TestCompatInterface(unittest.TestCase, utils.UtilsMixin):\n\tdef setUp(self):\n\t\tpass\n\t\n\tdef tearDown(self):\n\t\tpass\n\t\n\tdef test_color_constants(self):\n\t\t# shouldn't error\n\t\tdom = self._test_parse_build(\n\t\t\t\"\",\n\t\t\t\"\"\"\n\t\t\tlocal int color;\n\t\t\tcolor = cBlack;\n\t\t\tcolor = cRed;\n\t\t\tcolor = cDkRed;\n\t\t\tcolor = cLtRed;\n\t\t\tcolor = cGreen;\n\t\t\tcolor = cDkGreen;\n\t\t\tcolor = cLtGreen;\n\t\t\tcolor = cBlue;\n\t\t\tcolor = cDkBlue;\n\t\t\tcolor = cLtBlue;\n\t\t\tcolor = cPurple;\n\t\t\tcolor = cDkPurple;\n\t\t\tcolor = cLtPurple;\n\t\t\tcolor = cAqua;\n\t\t\tcolor = cDkAqua;\n\t\t\tcolor = cLtAqua;\n\t\t\tcolor = cYellow;\n\t\t\tcolor = cDkYellow;\n\t\t\tcolor = cLtYellow;\n\t\t\tcolor = cDkGray;\n\t\t\tcolor = cGray;\n\t\t\tcolor = cSilver;\n\t\t\tcolor = cLtGray;\n\t\t\tcolor = cWhite;\n\t\t\tcolor = cNone;\n\t\t\t\"\"\",\n\t\t\tpredefines=True\n\t\t)\n\nclass TestCompatIO(unittest.TestCase, utils.UtilsMixin):\n\tdef setUp(self):\n\t\tpass\n\t\n\tdef tearDown(self):\n\t\tpass\n\t\n\tdef test_read_ushort(self):\n\t\tdom = self._test_parse_build(\n\t\t\t\"\\x80\\x01\",\n\t\t\t\"\"\"\n\t\t\t\tlocal ushort blah = ReadUShort();\n\t\t\t\tPrintf(\"%d|\", blah);\n\t\t\t\tPrintf(\"%d\", FTell());\n\t\t\t\"\"\",\n\t\t\tverify=False,\n\t\t\tstdout=\"32769|0\"\n\t\t)\n\t\n\tdef test_read_bytes_uchar(self):\n\t\tdom = self._test_parse_build(\n\t\t\t\"ab\\x00\\x01\",\n\t\t\t\"\"\"\n\t\t\t\tlocal uchar data[2];\n\t\t\t\tReadBytes(data, FTell(), 2);\n\t\t\t\tPrintf(data);\n\n\t\t\t\tuchar a;\n\t\t\t\tuchar b;\n\t\t\t\tPrintf(\"%d%d\", a, b);\n\t\t\t\"\"\",\n\t\t\tverify=False,\n\t\t\tstdout=\"ab9798\"\n\t\t)\n\t\n\tdef test_seek1(self):\n\t\tdom = self._test_parse_build(\n\t\t\t\"\\x01\\x02ABCD\\x03\\x04\",\n\t\t\t\"\"\"\n\t\t\t\tuchar a;\n\t\t\t\tuchar b;\n\t\t\t\tFSeek(FTell() + 4);\n\t\t\t\tuchar c;\n\t\t\t\tuchar d;\n\t\t\t\"\"\",\n\t\t)\n\n\t\tself.assertEqual(dom.a, 1)\n\t\tself.assertEqual(dom.b, 2)\n\t\tself.assertEqual(dom._skipped, \"ABCD\")\n\t\tself.assertEqual(dom.c, 3)\n\t\tself.assertEqual(dom.d, 4)\n\t\n\tdef test_seek2(self):\n\t\tdom = self._test_parse_build(\n\t\t\t\"\\x01\\x02ABCD\\x03EF\\x04\",\n\t\t\t\"\"\"\n\t\t\t\tuchar a;\n\t\t\t\tuchar b;\n\t\t\t\tFSeek(FTell() + 2);\n\t\t\t\tFSeek(FTell() + 2);\n\t\t\t\tuchar c;\n\t\t\t\tFSeek(FTell() + 2);\n\t\t\t\tuchar d;\n\t\t\t\"\"\",\n\t\t)\n\n\t\tself.assertEqual(dom.a, 1)\n\t\tself.assertEqual(dom.b, 2)\n\t\t# should be merged into one _skipped array\n\t\tself.assertEqual(dom._skipped_0, \"ABCD\")\n\t\tself.assertEqual(dom.c, 3)\n\t\tself.assertEqual(dom._skipped_1, \"EF\")\n\t\tself.assertEqual(dom.d, 4)\n\t\n\tdef test_seek3(self):\n\t\tdom = self._test_parse_build(\n\t\t\t\"ABCD\",\n\t\t\t\"\"\"\n\t\t\t\tPrintf(\"%d\", FSeek(FTell() + 4));\n\t\t\t\tPrintf(\"%d\", FSeek(FTell() + 2));\n\t\t\t\"\"\",\n\t\t\tverify=False,\n\t\t\tstdout=\"0-1\"\n\t\t)\n\t\n\tdef test_skip1(self):\n\t\tdom = self._test_parse_build(\n\t\t\t\"\\x01\\x02ABCD\\x03\\x04\",\n\t\t\t\"\"\"\n\t\t\t\tuchar a;\n\t\t\t\tuchar b;\n\t\t\t\tFSkip(4);\n\t\t\t\tuchar c;\n\t\t\t\tuchar d;\n\t\t\t\"\"\",\n\t\t)\n\n\t\tself.assertEqual(dom.a, 1)\n\t\tself.assertEqual(dom.b, 2)\n\t\tself.assertEqual(dom._skipped, \"ABCD\")\n\t\tself.assertEqual(dom.c, 3)\n\t\tself.assertEqual(dom.d, 4)\n\t\n\tdef test_seek2(self):\n\t\tdom = self._test_parse_build(\n\t\t\t\"\\x01\\x02ABCD\\x03EF\\x04\",\n\t\t\t\"\"\"\n\t\t\t\tuchar a;\n\t\t\t\tuchar b;\n\t\t\t\tFSkip(2);\n\t\t\t\tFSkip(2);\n\t\t\t\tuchar c;\n\t\t\t\tFSkip(2);\n\t\t\t\tuchar d;\n\t\t\t\"\"\",\n\t\t)\n\n\t\tself.assertEqual(dom.a, 1)\n\t\tself.assertEqual(dom.b, 2)\n\t\t# should be merged into one _skipped array\n\t\tself.assertEqual(dom._skipped_1, \"ABCD\")\n\t\tself.assertEqual(dom.c, 3)\n\t\tself.assertEqual(dom._skipped_2, \"EF\")\n\t\tself.assertEqual(dom.d, 4)\n\t\n\tdef test_skip3(self):\n\t\tdom = self._test_parse_build(\n\t\t\t\"ABCD\",\n\t\t\t\"\"\"\n\t\t\t\tPrintf(\"%d\", FSkip(4));\n\t\t\t\tPrintf(\"%d\", FSkip(2));\n\t\t\t\"\"\",\n\t\t\tverify=False,\n\t\t\tstdout=\"0-1\"\n\t\t)\n\nclass TestCompatString(unittest.TestCase, utils.UtilsMixin):\n\tdef setup(self):\n\t\tpass\n\t\n\tdef tearDown(self):\n\t\tpass\n\t\n\tdef test_memcpy1(self):\n\t\tdom = self._test_parse_build(\n\t\t\t\"abcd\",\n\t\t\t\"\"\"\n\t\t\tuchar bytes[4];\n\t\t\tlocal uchar local_bytes[4];\n\t\t\tMemcpy(local_bytes, bytes, 4);\n\n\t\t\tPrintf(local_bytes);\n\t\t\t\"\"\",\n\t\t\tstdout=\"abcd\"\n\t\t)\n\t\n\tdef test_memcpy2(self):\n\t\tdom = self._test_parse_build(\n\t\t\t\"abcd\",\n\t\t\t\"\"\"\n\t\t\tuchar bytes[4];\n\t\t\tlocal uchar local_bytes[4];\n\t\t\tMemcpy(local_bytes, bytes, 4);\n\n\t\t\tlocal uint i;\n\t\t\tfor(i = 0; i < 4; i++) {\n\t\t\t\tlocal_bytes[3 - i] = local_bytes[i];\n\t\t\t}\n\t\t\tPrintf(local_bytes);\n\t\t\tPrintf(bytes);\n\t\t\t\"\"\",\n\t\t\tstdout=\"abbaabcd\"\n\t\t)\n\t\n\tdef test_strchr1(self):\n\t\tdom = self._test_parse_build(\n\t\t\t\"\",\n\t\t\t\"\"\"\n\t\t\tlocal char b[30] = \"hellogoodbyte\";\n\t\t\tPrintf(\"%d\", Strchr(b, 'g'));\n\t\t\t\"\"\",\n\t\t\tstdout=\"5\"\n\t\t)\n\t\n\tdef test_strchr2(self):\n\t\tdom = self._test_parse_build(\n\t\t\t\"\",\n\t\t\t\"\"\"\n\t\t\tlocal char b[30] = \"hellogoodbyte\";\n\t\t\tPrintf(\"%d\", Strchr(b, 'X'));\n\t\t\t\"\"\",\n\t\t\tstdout=\"-1\"\n\t\t)\n\t\n\tdef test_strcpy(self):\n\t\tdom = self._test_parse_build(\n\t\t\t\"\",\n\t\t\t\"\"\"\n\t\t\tlocal char a[0];\n\t\t\tPrintf(\"%s\", a);\n\n\t\t\tlocal char b[30] = \"hellogoodbyte\";\n\t\t\tPrintf(\"%s\", b);\n\n\t\t\tStrcpy(a, b);\n\t\t\tPrintf(\"%s\", a);\n\t\t\t\"\"\",\n\t\t\tstdout=\"hellogoodbyte\\x00hellogoodbyte\\x00\"\n\t\t)\n\t\n\tdef test_strncpy(self):\n\t\tdom = self._test_parse_build(\n\t\t\t\"\",\n\t\t\t\"\"\"\n\t\t\tlocal char a[0];\n\t\t\tPrintf(\"%s\", a);\n\n\t\t\tlocal char b[30] = \"hellogoodbyte\";\n\t\t\tPrintf(\"%s\", b);\n\n\t\t\tStrncpy(a, b, 5);\n\t\t\tPrintf(\"%s\", a);\n\t\t\t\"\"\",\n\t\t\tstdout=\"hellogoodbyte\\x00hello\"\n\t\t)\n\t\n\tdef test_strcmp1(self):\n\t\tdom = self._test_parse_build(\n\t\t\t\"\",\n\t\t\t\"\"\"\n\t\t\tlocal string a = \"hellothere\";\n\t\t\tlocal string b = \"hellogoodbyte\";\n\t\t\tPrintf(\"%d\", Strcmp(a, b));\n\t\t\t\"\"\",\n\t\t\tstdout=\"1\"\n\t\t)\n\t\n\tdef test_strcmp2(self):\n\t\tdom = self._test_parse_build(\n\t\t\t\"\",\n\t\t\t\"\"\"\n\t\t\tlocal string a = \"hello\";\n\t\t\tlocal string b = \"hello\";\n\t\t\tPrintf(\"%d\", Strcmp(a, b));\n\t\t\t\"\"\",\n\t\t\tstdout=\"0\"\n\t\t)\n\n\tdef test_stricmp1(self):\n\t\tdom = self._test_parse_build(\n\t\t\t\"\",\n\t\t\t\"\"\"\n\t\t\tlocal string a = \"helLotherE\";\n\t\t\tlocal string b = \"hEllogoOdbyte\";\n\t\t\tPrintf(\"%d\", Stricmp(a, b));\n\t\t\t\"\"\",\n\t\t\tstdout=\"1\"\n\t\t)\n\t\n\tdef test_stricmp2(self):\n\t\tdom = self._test_parse_build(\n\t\t\t\"\",\n\t\t\t\"\"\"\n\t\t\tlocal string a = \"hElLo\";\n\t\t\tlocal string b = \"HeLlo\";\n\t\t\tPrintf(\"%d\", Stricmp(a, b));\n\t\t\t\"\"\",\n\t\t\tstdout=\"0\"\n\t\t)\n\t\n\tdef test_strncmp1(self):\n\t\tdom = self._test_parse_build(\n\t\t\t\"\",\n\t\t\t\"\"\"\n\t\t\tlocal string a = \"hellothere\";\n\t\t\tlocal string b = \"hellogoodbyte\";\n\t\t\tPrintf(\"%d\", Strncmp(a, b, 5));\n\t\t\t\"\"\",\n\t\t\tstdout=\"0\"\n\t\t)\n\t\n\tdef test_strncmp2(self):\n\t\tdom = self._test_parse_build(\n\t\t\t\"\",\n\t\t\t\"\"\"\n\t\t\tlocal string a = \"hellothere\";\n\t\t\tlocal string b = \"hellogoodbyte\";\n\t\t\tPrintf(\"%d\", Strncmp(a, b, 6));\n\t\t\t\"\"\",\n\t\t\tstdout=\"1\"\n\t\t)\n\t\n\t\n\tdef test_strnicmp1(self):\n\t\tdom = self._test_parse_build(\n\t\t\t\"\",\n\t\t\t\"\"\"\n\t\t\tlocal string a = \"hElLothere\";\n\t\t\tlocal string b = \"HeLlOgoodbyte\";\n\t\t\tPrintf(\"%d\", Strnicmp(a, b, 5));\n\t\t\t\"\"\",\n\t\t\tstdout=\"0\"\n\t\t)\n\t\n\tdef test_strnicmp2(self):\n\t\tdom = self._test_parse_build(\n\t\t\t\"\",\n\t\t\t\"\"\"\n\t\t\tlocal string a = \"hElLOthere\";\n\t\t\tlocal string b = \"helLogoOdbyte\";\n\t\t\tPrintf(\"%d\", Strnicmp(a, b, 6));\n\t\t\t\"\"\",\n\t\t\tstdout=\"1\"\n\t\t)\n\t\n\tdef test_strstr1(self):\n\t\tdom = self._test_parse_build(\n\t\t\t\"\",\n\t\t\t\"\"\"\n\t\t\tlocal string a = \"hellothere\";\n\t\t\tPrintf(\"%d\", Strstr(a, \"llo\"));\n\t\t\t\"\"\",\n\t\t\tstdout=\"2\"\n\t\t)\n\t\n\tdef test_strstr2(self):\n\t\tdom = self._test_parse_build(\n\t\t\t\"\",\n\t\t\t\"\"\"\n\t\t\tlocal string a = \"hellothere\";\n\t\t\tPrintf(\"%d\", Strstr(a, \"lloZ\"));\n\t\t\t\"\"\",\n\t\t\tstdout=\"-1\"\n\t\t)\n\nclass TestCompatTools(unittest.TestCase, utils.UtilsMixin):\n\tdef setUp(self):\n\t\tpass\n\t\n\tdef tearDown(self):\n\t\tpass\n\t\n\tdef test_find_all(self):\n\t\t# waiting on issue #3 to be implemented\n\t\treturn\n\t\tdom = self._test_parse_build(\n\t\t\t\"abcd HELLO THERE HELLO blah HELLO blkajsdf\",\n\t\t\t\"\"\"\n\t\t\t\tTFindResults results = FindAll(\"HELLO\");\n\t\t\t\"\"\",\n\t\t\tverify=False,\n\t\t\tpredefines=True\n\t\t)\n\nif __name__ == \"__main__\":\n\tunittest.main()\n","sub_path":"tests/test_compat.py","file_name":"test_compat.py","file_ext":"py","file_size_in_byte":8518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"66901952","text":"import argparse\nimport configparser\nimport datetime\nimport logging\nimport os\nimport sys\nimport time\n\nfrom pprint import pprint\n\nfrom binance.client import Client as BinanceClient\nfrom binance.websockets import BinanceSocketManager\n\nimport dateparser\nfrom multiprocessing import Process\nfrom pymongo import MongoClient\nfrom twisted.internet import reactor\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-m', '--market', type=str, default=None, help='Market for analysis (ex. XLMBTC).')\nparser.add_argument('-b', '--backtest', type=str, default=None, help='Length of time for historical trade data analysis (ex. 3 hours).')\nparser.add_argument('-i', '--interval', type=int, default=10, help='Interval (seconds) between analysis runs (ex. 30). [Default: 10]')\nparser.add_argument('--debug', action='store_true', default=False, help='Enable debug level output.')\nargs = parser.parse_args()\n\nuser_market = args.market\nbacktest_duration = args.backtest\nanalysis_interval = args.interval\ndebug_mode = args.debug\n\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\n\nif debug_mode == True:\n logger.setLevel(logging.DEBUG)\nelse:\n logger.setLevel(logging.INFO)\n\nconfig_path = 'config/config.ini'\n\nconfig = configparser.ConfigParser()\nconfig.read(config_path)\n\nbinance_api = config['binance']['api']\nbinance_secret = config['binance']['secret']\n\nbinance_client = BinanceClient(binance_api, binance_secret)\nbinance_ws = BinanceSocketManager(binance_client)\n\ndb = MongoClient(config['mongodb']['uri'])[config['mongodb']['db']]\n\ncollections = {'data': 'flowmeter', 'analysis': 'analysis'}\n\n\ndef process_message(msg, populate=False, symbol=None):\n process_message_success = True\n\n try:\n logger.debug('msg: ' + str(msg))\n\n trade_doc = {}\n\n update_required = False\n\n if populate == True or msg['e'] == 'aggTrade':\n \"\"\"\n {\n \"e\": \"aggTrade\", # event type\n \"E\": 1499405254326, # event time\n \"s\": \"ETHBTC\", # symbol\n \"a\": 70232, # aggregated tradeid\n \"p\": \"0.10281118\", # price\n \"q\": \"8.15632997\", # quantity\n \"f\": 77489, # first breakdown trade id\n \"l\": 77489, # last breakdown trade id\n \"T\": 1499405254324, # trade time\n \"m\": false, # whether buyer is a maker\n \"M\": true # can be ignored\n }\n \"\"\"\n\n trade_doc['_id'] = int(msg['a']) # Aggregate Trade ID\n if populate == True:\n trade_doc['type'] = 'populate'\n trade_doc['symbol'] = symbol\n else:\n trade_doc['type'] = msg['e']\n trade_doc['symbol'] = msg['s']\n trade_doc['price'] = float(msg['p'])\n trade_doc['quantity'] = float(msg['q'])\n trade_doc['trade_time'] = int(msg['T'])\n if msg['m'] == True:\n trade_doc['side'] = 'sell'\n else:\n trade_doc['side'] = 'buy'\n #trade_doc['event_time'] = int(msg['E'])\n #trade_doc['trade_id_first'] = int(msg['f'])\n #trade_doc['trade_id_last'] = int(msg['l'])\n\n update_required = True\n\n elif msg['e'] == 'trade':\n \"\"\"\n {\n \"e\": \"trade\", # Event type\n \"E\": 123456789, # Event time\n \"s\": \"BNBBTC\", # Symbol\n \"t\": 12345, # Trade ID\n \"p\": \"0.001\", # Price\n \"q\": \"100\", # Quantity\n \"b\": 88, # Buyer order Id\n \"a\": 50, # Seller order Id\n \"T\": 123456785, # Trade time\n \"m\": true, # Is the buyer the market maker?\n \"M\": true # Ignore.\n }\n \"\"\"\n\n trade_doc['_id'] = int(msg['t']) # Trade ID\n trade_doc['type'] = msg['e']\n trade_doc['symbol'] = msg['s']\n trade_doc['price'] = float(msg['p'])\n trade_doc['quantity'] = float(msg['q'])\n trade_doc['trade_time'] = int(msg['T'])\n if msg['m'] == True:\n trade_doc['side'] = 'sell'\n else:\n trade_doc['side'] = 'buy'\n #trade_doc['event_time'] = int(msg['E'])\n #trade_doc['buyer_order_id'] = int(msg['b'])\n #trade_doc['seller_order_id'] = int(msg['a'])\n\n update_required = True\n\n elif msg['e'] == 'error':\n logger.error('Error message received from websocket.')\n logger.error('Error: ' + msg['m'])\n\n process_message_success = False\n\n logger.warning('Restarting websocket connection.')\n\n # RESTART WEBSOCKET CONNECTION HERE\n\n else:\n logger.warning('Unknown event type: ' + msg['e'])\n\n process_message_success = False\n\n if update_required == True:\n try:\n inserted_id = db[collections['data']].insert_one(trade_doc).inserted_id\n\n logger_message = trade_doc['symbol'] + ' - ' + trade_doc['side'].upper() + ' '\n if trade_doc['side'] == 'buy': logger_message += ' '\n logger_message += '- ' + str(trade_doc['quantity']) + ' @ ' + str(trade_doc['price'])\n if trade_doc['type'] == 'populate': logger_message += ' [' + trade_doc['type'].upper() + ']'\n\n if populate == False:\n logger.info(logger_message)\n else:\n logger.debug(logger_message)\n\n except:\n logger.warning('Exception while creating trade document. Can be safely ignored if raised while populating database.')\n\n process_message_success = False\n\n except Exception as e:\n logger.exception(e)\n\n process_message_success = False\n\n finally:\n return process_message_success\n\n\ndef populate_historical(market, start_time):\n binance_api = config['binance']['api']\n binance_secret = config['binance']['secret']\n\n binance_client = BinanceClient(binance_api, binance_secret)\n\n # Get historical aggregated trade data as generator object and count number of historical trades\n historical_trades = binance_client.aggregate_trade_iter(symbol=market, start_str=start_time)\n\n logger.info('Counting historical trades for database population.')\n\n trade_count = sum(1 for trade in historical_trades)\n logger.debug('trade_count: ' + str(trade_count))\n\n # Get historical aggregated trade data again to refresh generator object (May make total count off by few trades)\n historical_trades = binance_client.aggregate_trade_iter(symbol=market, start_str=start_time)\n\n count = 0\n for trade in historical_trades:\n process_result = process_message(trade, populate=True, symbol=market)\n\n if process_result == False:\n logger.info('Database population complete.')\n break\n else:\n count += 1\n logger.info('Processed ' + str(count) + ' of ~' + str(trade_count) + ' historical trades.')\n\n\n#def analyze_data(market, feature, data, parameter, interval='1h', start=None):\ndef analyze_data(market, interval='1h', start=None):\n \"\"\"\n market - Market to analyze (ex. XLMBTC)\n interval - Duration to analyze (ex. 30s / 15m / 3h / 1d / 3w)\n start - UTC datetime object dictating start of analysis interval (Overrides interval argument)\n \"\"\"\n\n analyze_return = {'success': True, 'result': {'current': {'volume': {'all': None, 'buy': None, 'sell': None},\n 'price': {'all': None, 'buy': None, 'sell': None},\n 'count': {'all': None, 'buy': None, 'sell': None}},\n 'last': {'volume': {'all': None, 'buy': None, 'sell': None},\n 'price': {'all': None, 'buy': None, 'sell': None},\n 'count': {'all': None, 'buy': None, 'sell': None}},\n 'difference': {'volume': {'all': {'absolute': None, 'percent': None},\n 'buy': {'absolute': None, 'percent': None},\n 'sell': {'absolute': None, 'percent': None}},\n 'price': {'all': {'absolute': None, 'percent': None},\n 'buy': {'absolute': None, 'percent': None},\n 'sell': {'absolute': None, 'percent': None}},\n 'count': {'all': {'absolute': None, 'percent': None},\n 'buy': {'absolute': None, 'percent': None},\n 'sell': {'absolute': None, 'percent': None}}}}}\n\n try:\n if start != None:\n analysis_start = time.mktime(start.timetuple()) * 1000\n\n else:\n #unix_time_ms = int(time.mktime(datetime.datetime.utcnow().timetuple()) * 1000)\n unix_time_ms = int(time.mktime(datetime.datetime.now().timetuple()) * 1000)\n logger.debug('unix_time_ms: ' + str(unix_time_ms))\n\n numerical = ''\n identifier = ''\n for char in interval:\n if char.isnumeric():\n numerical += char\n else:\n identifier = char\n break\n logger.debug('numerical: ' + numerical)\n logger.debug('identifier: ' + identifier)\n\n num_input = int(numerical)\n\n if identifier == 's':\n analysis_delta = num_input * 1000\n elif identifier == 'm':\n analysis_delta = num_input * 60000\n elif identifier == 'h':\n analysis_delta = num_input * 3600000\n elif identifier == 'd':\n analysis_delta = num_input * 86400000\n elif identifier == 'w':\n analysis_delta = num_input * 604800000\n else:\n logger.error('Unrecognized interval identifier. Exiting.')\n sys.exit(1)\n\n logger.debug('analysis_delta: ' + str(analysis_delta))\n\n analysis_start = unix_time_ms - analysis_delta\n logger.debug('analysis_start: ' + str(analysis_start))\n analysis_start_last = analysis_start - analysis_delta\n logger.debug('analysis_start_last: ' + str(analysis_start_last))\n\n match_inputs = ['all', 'buy', 'sell']\n\n for match in match_inputs:\n ## Create Aggregation Pipeline ##\n pipeline_current = []\n pipeline_last = []\n\n # Match Stage\n match_pipeline_current = {'$match': {'symbol': market, 'trade_time': {'$gte': analysis_start}}}\n match_pipeline_last = {'$match': {'symbol': market, 'trade_time': {'$gte': analysis_start_last, '$lt': analysis_start}}}\n\n if match == 'all':\n pass\n elif match == 'buy':\n match_pipeline_current['$match']['side'] = 'buy'\n match_pipeline_last['$match']['side'] = 'buy'\n elif match == 'sell':\n match_pipeline_current['$match']['side'] = 'sell'\n match_pipeline_last['$match']['side'] = 'sell'\n\n logger.debug('match_pipeline_current: ' + str(match_pipeline_current))\n logger.debug('match_pipeline_last: ' + str(match_pipeline_last))\n\n pipeline_current.append(match_pipeline_current)\n pipeline_last.append(match_pipeline_last)\n\n # Sort Stage\n sort_pipeline = {'$sort': {'_id': 1}}\n\n logger.debug('sort_pipeline: ' + str(sort_pipeline))\n\n pipeline_current.append(sort_pipeline)\n pipeline_last.append(sort_pipeline)\n\n # Group Stage\n group_pipeline = {'$group': {'_id': match,\n 'volume': {'$sum': '$quantity'},\n 'price': {'$avg': '$price'},\n 'count': {'$sum': 1}}}\n\n logger.debug('group_pipeline: ' + str(group_pipeline))\n\n pipeline_current.append(group_pipeline)\n pipeline_last.append(group_pipeline)\n\n ## Run Aggregation Pipelines ##\n #aggregate_result_current = db.command('aggregate', collections['data'], cursor={}, pipeline=pipeline_current)\n aggregate_result_current = db[collections['data']].aggregate(pipeline_current)\n #aggregate_result_last = db.command('aggregate', collections['data'], cursor={}, pipeline=pipeline_last)\n aggregate_result_last = db[collections['data']].aggregate(pipeline_last)\n\n result_current = list(aggregate_result_current)[0]\n result_last = list(aggregate_result_last)[0]\n\n # Calculate differences to add to return dictionary\n vol_diff_absolute = result_current['volume'] - result_last['volume']\n vol_diff_percent = round(vol_diff_absolute / result_last['volume'], 4)\n price_diff_absolute = round(result_current['price'] - result_last['price'], 8)\n price_diff_percent = round(price_diff_absolute / result_last['price'], 4)\n count_diff_absolute = result_current['count'] - result_last['count']\n count_diff_percent = round(count_diff_absolute / result_last['count'], 4)\n\n # Add results to return dictionary\n analyze_return['result']['current']['volume'][result_current['_id']] = result_current['volume']\n analyze_return['result']['current']['price'][result_current['_id']] = round(result_current['price'], 8)\n analyze_return['result']['current']['count'][result_current['_id']] = result_current['count']\n analyze_return['result']['last']['volume'][result_last['_id']] = result_last['volume']\n analyze_return['result']['last']['price'][result_last['_id']] = round(result_last['price'], 8)\n analyze_return['result']['last']['count'][result_last['_id']] = result_last['count']\n analyze_return['result']['difference']['volume'][result_current['_id']]['absolute'] = vol_diff_absolute\n analyze_return['result']['difference']['volume'][result_current['_id']]['percent'] = vol_diff_percent\n analyze_return['result']['difference']['price'][result_current['_id']]['absolute'] = price_diff_absolute\n analyze_return['result']['difference']['price'][result_current['_id']]['percent'] = price_diff_percent\n analyze_return['result']['difference']['count'][result_current['_id']]['absolute'] = count_diff_absolute\n analyze_return['result']['difference']['count'][result_current['_id']]['percent'] = count_diff_percent\n\n # Calculate difference between requested interval start and first document trade time\n # Can use to warn user about data missing from requested calculation\n #first_doc_time = xyz\n\n except Exception as e:\n logger.exception(e)\n\n analyze_return['success'] = False\n\n finally:\n return analyze_return\n\n\nif __name__ == '__main__':\n try:\n ## Get list of available Binance markets to verify user input ##\n binance_info = binance_client.get_exchange_info()\n\n binance_markets = []\n\n for product in binance_info['symbols']:\n binance_markets.append(product['baseAsset'] + product['quoteAsset'])\n\n trade_sockets = {}\n\n ## Gather desired settings from user input ##\n if user_market == None:\n user_market = input('Choose a Binance market (ex. XLMBTC): ').upper()\n\n if user_market not in binance_markets:\n logger.error(user_market + ' is not a valid Binance market. Exiting.')\n sys.exit(1)\n else:\n logger.info('Selected Binance market ' + user_market + '.')\n\n if backtest_duration == None:\n backtest_duration = input('Input length of time for trade data backtesting/analysis (ex. 30 seconds/9 minutes/3 hours/2 days/1 week): ')\n\n test_duration = backtest_duration + ' ago UTC'\n logger.debug('test_duration: ' + test_duration)\n\n try:\n logger.debug('Testing user-provided historical data population input.')\n historical_trades = binance_client.aggregate_trade_iter(symbol=user_market, start_str=test_duration)\n logger.debug('Attempting count of trades in generator object.')\n trade_count = sum(1 for trade in historical_trades)\n populate_duration = test_duration\n logger.debug('populate_duration: ' + populate_duration)\n except:\n logger.error('Invalid input for start of historical data population. Exiting.')\n sys.exit(1)\n\n if user_market == None or backtest_duration == None:\n logger.error('Failed to gather valid user input. Exiting.')\n sys.exit(1)\n\n ## Delete existing data for market from database ##\n logger.info('Deleting existing ' + user_market + ' data from database.')\n\n delete_result = db[collections['data']].delete_many({'symbol': user_market})\n logger.debug('delete_result.deleted_count: ' + str(delete_result.deleted_count))\n\n ## Initialize aggregated trade websocket for market ##\n logger.info('Initializing trade websocket for ' + user_market + '.')\n\n #trade_sockets[user_market] = binance_ws.start_trade_socket(user_market, process_message)\n trade_sockets[user_market] = binance_ws.start_aggtrade_socket(user_market, process_message)\n\n ## Start websocket for market and begin processing data ##\n logger.info('Starting websocket connection for ' + user_market + '.')\n\n binance_ws.start()\n\n ## Populate database with historical trade data for extended backtesting/analysis ##\n logger.info('Populating database with historical trade data.')\n\n populate_start_dt = dateparser.parse(backtest_duration) - datetime.timedelta(hours=1) # Populate with 1 extra hour of data\n\n populate_start = int(time.mktime(populate_start_dt.timetuple()) * 1000)\n logger.debug('populate_start:' + str(populate_start))\n\n arguments = tuple()\n keyword_arguments = {'market': user_market, 'start_time': populate_start}\n\n populate_proc = Process(target=populate_historical, args=arguments, kwargs=keyword_arguments)\n\n logger.debug('Starting populate database process.')\n populate_proc.start()\n logger.debug('Joining populate database process.')\n populate_proc.join()\n logger.debug('Populate database process complete.')\n\n logger.info('Database ready for analysis.')\n\n while (True):\n delay_start = time.time()\n while (time.time() - delay_start) < analysis_interval:\n time.sleep(1)\n\n logger.info('Analyzing trade data.')\n\n analysis_results = analyze_data(market=user_market, interval='1h')\n\n if analysis_results['success'] == True:\n analysis_document = analysis_results['result'].copy()\n analysis_document['time'] = datetime.datetime.utcnow().isoformat()\n analysis_document['module'] = 'flowmeter'\n\n pprint(analysis_document)\n\n #logger.info('Updating analysis database.')\n logger.info('Creating new analysis document.')\n\n #update_result = db[collections['analysis']].update_one({'_id': user_market}, {'$set': analysis_document}, upsert=True)\n #logger.debug('update_result.matched_count: ' + str(update_result.matched_count))\n #logger.debug('update_result.modified_count: ' + str(update_result.modified_count))\n inserted_id = db[collections['analysis']].insert_one(analysis_document).inserted_id\n logger.debug('inserted_id: ' + str(inserted_id))\n\n else:\n logger.error('Error while analyzing trade data.')\n\n except Exception as e:\n logger.exception(e)\n\n except KeyboardInterrupt:\n logger.info('Exit signal received.')\n\n finally:\n if reactor.running:\n logger.info('Closing Binance socket manager.')\n binance_ws.close()\n\n logger.info('Stopping reactor.')\n reactor.stop()\n else:\n logger.info('No websocket connected or reactor running.')\n\n logger.info('Exiting.')\n","sub_path":"archive/flowmeter_072318-0016.py","file_name":"flowmeter_072318-0016.py","file_ext":"py","file_size_in_byte":21088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"619232708","text":"'''\n\nGiven the mapping a = 1, b = 2, ... z = 26, and an encoded message, count the number of ways it can be decoded.\nFor example, the message '111' would give 3, since it could be decoded as 'aaa', 'ka', and 'ak'.\n\nYou can assume that the messages are decodable. For example, '001' is not allowed.\n\n'''\n\n\ndef possible_decodings(message):\n code_length= len(message)\n letter_length_list = [1, 2]\n\n def build_node(this_length, d):\n for n in letter_length_list:\n new_length = this_length - n\n if new_length > 0:\n d[new_length] = {}\n build_node(new_length, d[new_length])\n elif new_length == 0:\n d[new_length] = None\n tree = {}\n build_node(code_length, tree)\n print(tree)\n\n def count_possibilities(node_tree, int_ptr):\n for node in node_tree:\n if node_tree[node]:\n count_possibilities(node_tree[node], int_ptr)\n else:\n int_ptr[0] += 1\n total_ptr = [0]\n count_possibilities(tree, total_ptr)\n return total_ptr[0] # dereferencing the decodings pointer\n\n\nif __name__ == \"__main__\":\n assert possible_decodings('111') == 3\n\n #extra test\n assert possible_decodings('2611') == possible_decodings('1111')\n","sub_path":"problem_07.py","file_name":"problem_07.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"241271179","text":"#\n# @lc app=leetcode.cn id=704 lang=python\n#\n# [704] Binary Search\n#\n# https://leetcode-cn.com/problems/binary-search/description/\n#\n# algorithms\n# Easy (46.98%)\n# Total Accepted: 6.8K\n# Total Submissions: 14.4K\n# Testcase Example: '[-1,0,3,5,9,12]\\n9'\n#\n# 给定一个 n 个元素有序的(升序)整型数组 nums 和一个目标值 target ,写一个函数搜索 nums 中的\n# target,如果目标值存在返回下标,否则返回 -1。\n# \n# \n# 示例 1:\n# \n# 输入: nums = [-1,0,3,5,9,12], target = 9\n# 输出: 4\n# 解释: 9 出现在 nums 中并且下标为 4\n# \n# \n# 示例 2:\n# \n# 输入: nums = [-1,0,3,5,9,12], target = 2\n# 输出: -1\n# 解释: 2 不存在 nums 中因此返回 -1\n# \n# \n# \n# \n# 提示:\n# \n# \n# 你可以假设 nums 中的所有元素是不重复的。\n# n 将在 [1, 10000]之间。\n# nums 的每个元素都将在 [-9999, 9999]之间。\n# \n# \n#\nclass Solution(object):\n def search(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n l = 0\n r = len(nums) - 1\n if l == r and target == nums[l]:\n return 0\n while l < r:\n mid = (l + r) // 2\n if nums[mid] == target:\n return mid\n elif nums[l] == target:\n return l\n elif nums[r] == target:\n return r\n elif nums[mid] < target:\n l = mid + 1\n elif nums[mid] > target:\n r = mid - 1\n return -1\n\n# better\n\n # try:\n # return nums.index(target)\n # except:\n # return -1\n","sub_path":"binary-search/704.binary-search.py","file_name":"704.binary-search.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"495180618","text":"import unittest\r\nfrom aquarium import Aquarium\r\nfrom fish import Fish\r\nimport controller\r\n\r\n\r\nclass TestFishStr(unittest.TestCase):\r\n def test_a_fish__str__exists(self):\r\n fish = Fish('cost', 'color', 'breed', 'gender', 'the_owner')\r\n self.assertTrue(type(fish).__str__ is not object.__str__)\r\n\r\n def test_b_fish__str__works(self):\r\n fish = Fish('cost', 'color', 'breed', 'gender', 'the_owner')\r\n returned = str(fish)\r\n self.assertEqual(returned, 'color breed [gender] worth $cost')\r\n\r\n\r\nclass TestAquarium_get_those_with_two_fish(unittest.TestCase):\r\n def test_c_aquarium_get_those_with_two_fish_exists(self):\r\n aquarium = Aquarium()\r\n self.assertTrue(hasattr(aquarium, 'get_those_with_two_fish'))\r\n self.assertTrue(callable(getattr(aquarium, 'get_those_with_two_fish', None)))\r\n\r\n def test_d_aquarium_get_those_with_two_fish_returns_string(self):\r\n aquarium = Aquarium()\r\n returned = aquarium.get_those_with_two_fish()\r\n self.assertTrue(isinstance(returned, str))\r\n\r\n def test_e_aquarium_get_those_with_two_fish_not_hard_coded(self):\r\n aquarium = Aquarium()\r\n returned = aquarium.get_those_with_two_fish()\r\n self.assertTrue(len(returned) == 0)\r\n\r\n # noinspection SpellCheckingInspection\r\n def test_f_aquarium_get_those_with_two_fish_works(self):\r\n aquarium = controller.setup()\r\n returned = aquarium.get_those_with_two_fish()\r\n self.assertEqual(returned,\r\n 'JOG=John Goff\\nBlack Killer Whale [M] worth $5000.01\\nGrey Shark [M] worth $123.45\\nPurple Siamese Fighting Fish [M] worth $2.55\\nRUT=Russel Turia\\nOrange Carp [F] worth $5.56\\nGold Carp [F] worth $10.99\\nGold GoldFish [F] worth $9.87\\n'\r\n )\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main(verbosity=3)\r\n","sub_path":"automated/question07.py","file_name":"question07.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"248580403","text":"import os\nimport time\nimport shutil\nimport uuid\n\nimport yaml\nfrom watchdog.observers import Observer\nfrom watchdog.events import *\n\nCONFIG_FILE_NAME = 'config.yml'\nPATH_SETTING_KEY = 'path'\nSKIP_LIST_SETTING_KEY = 'skip_list'\n\n\ndef load_config():\n try:\n with open(CONFIG_FILE_NAME) as f:\n _config = yaml.load(f)\n if _config is None:\n print('The configuration file has no content')\n exit(2)\n print('load paths setting...')\n if PATH_SETTING_KEY not in _config:\n print('Missing directory configuration')\n exit(3)\n _sync_paths = _config[PATH_SETTING_KEY]\n if len(_sync_paths) < 2:\n print('Requires two or more folders')\n exit(4)\n print('Check the path settings...')\n _check_paths_result = True\n for _sync_path in _sync_paths:\n if not os.path.exists(_sync_path):\n print(\"The directory (%s) does not exist\" % _sync_path)\n _check_paths_result = False\n # TODO Check if one of them is another subfolder\n if not _check_paths_result:\n print('Path setting has error')\n exit(5)\n print('Check the path settings...done.')\n print('load paths setting...done.')\n print('load skip list settings...')\n _pattern_str_list = []\n if SKIP_LIST_SETTING_KEY in _config:\n _pattern_str_list = _config[SKIP_LIST_SETTING_KEY]\n _skip_list = []\n for _pattern_str in _pattern_str_list:\n _skip_list.append(re.compile(_pattern_str))\n _config[SKIP_LIST_SETTING_KEY] = _skip_list\n return _config\n except FileNotFoundError:\n print('The configuration file %s was not found' % CONFIG_FILE_NAME)\n exit(1)\n\n\ndef write_node_map(node_map, file_path, node):\n node_map[file_path] = node\n\n\ndef delete_node(node_map, file_path):\n node_map.pop(file_path, None)\n\n\ndef change_node_key(node_map, original_key, new_key):\n node = node_map.pop(original_key, None)\n if node is not None:\n node_map[new_key] = node\n\n\ndef sync_folder(_node_map, _skip_list, _sync_paths, _current_sync_path, _parent_path='/'):\n _current_absolute_path = _current_sync_path + _parent_path\n for _sync_path in _sync_paths:\n if not os.path.exists(_sync_path + _parent_path):\n print('The folder %s does not exist' % (_sync_path + _parent_path))\n os.mkdir(_sync_path + _parent_path)\n _file_and_dir_list = os.listdir(_current_absolute_path)\n for item in _file_and_dir_list:\n if os.path.isdir(_current_absolute_path + item):\n sync_folder(_node_map, _skip_list, _sync_paths, _current_sync_path, _parent_path + item + '/')\n else:\n sync_file(_node_map, _skip_list, _sync_paths, _current_absolute_path, _parent_path, item)\n\n\ndef is_sync_tmp_file(_file_absolute_path):\n tmp_file_pattern = re.compile(\".*\\.\\.sync\\.tmp\\..*\")\n return tmp_file_pattern.match(_file_absolute_path)\n\n\ndef in_skip_list(_skip_list, _file_path):\n for _skip_pattern in _skip_list:\n if _skip_pattern.match(_file_path):\n return True\n return False\n\n\ndef is_node_equals_in_map(_node_map, _file_absolute_path, _file_path):\n if _file_path not in _node_map:\n return False\n src = _node_map[_file_path]\n dst = get_node(_file_absolute_path)\n return is_node_equals(src, dst)\n\n\ndef is_node_equals(src, dst):\n if src is None or dst is None:\n return False\n return src.file_last_modified_time == dst.file_last_modified_time and src.file_size == dst.file_size\n\n\ndef delete_tmp_file(_file_path):\n print('Delete the tmp file : %s' % _file_path)\n os.remove(_file_path)\n\n\ndef sync_file(_node_map, _skip_list, _sync_paths, _current_absolute_path, _parent_path, _file_name):\n _file_absolute_path = _current_absolute_path + _file_name\n if is_sync_tmp_file(_file_name):\n delete_tmp_file(_file_absolute_path)\n return\n _file_path = _parent_path + _file_name\n if in_skip_list(_skip_list, _file_path):\n return\n if is_node_equals_in_map(_node_map, _file_absolute_path, _file_path):\n return\n _latest_node = None\n _latest_sync_path = \"\"\n for _sync_path in _sync_paths:\n _node = get_node(_sync_path + _file_path)\n if _node is None:\n continue\n if is_more_new_than_current(_node, _latest_node):\n _latest_node = _node\n _latest_sync_path = _sync_path\n if _latest_node is None:\n return\n copy_file(_node_map, _sync_paths, _latest_sync_path, _latest_node, _file_path)\n\n\ndef copy_file(_node_map, _sync_paths, _latest_sync_path, _latest_node, _file_path, _ignore_list=None):\n if _ignore_list is None:\n _ignore_list = []\n\n _src_absolute_path = _latest_sync_path + _file_path\n for _current_sync_path in _sync_paths:\n if _current_sync_path == _latest_node:\n continue\n _dst_absolute_path = _current_sync_path + _file_path\n _dst_dir_absolute_path = os.path.dirname(_dst_absolute_path)\n if not os.path.exists(_dst_dir_absolute_path):\n os.makedirs(_dst_dir_absolute_path)\n _dst_node = get_node(_dst_absolute_path)\n if is_node_equals(_latest_node, _dst_node):\n continue\n print('Copy the file from %s to %s...' % (_src_absolute_path, _dst_absolute_path))\n if os.path.exists(_dst_absolute_path):\n ignore_list.append(_dst_absolute_path)\n _tmp_file_name = '..sync.tmp.' + str(uuid.uuid1())\n _tmp_file_absolute_path = _dst_dir_absolute_path + '/' + _tmp_file_name\n try:\n shutil.copy2(_src_absolute_path, _tmp_file_absolute_path)\n except FileNotFoundError:\n if os.path.exists(_tmp_file_absolute_path):\n delete_tmp_file(_tmp_file_absolute_path)\n return\n os.rename(_tmp_file_absolute_path, _dst_absolute_path)\n _ignore_list.append(_dst_absolute_path)\n write_node_map(_node_map, _file_path, _latest_node)\n\n\ndef delete_file(_node_map, _sync_paths, _sync_path, _file_path, _ignore_list):\n for _current_sync_path in _sync_paths:\n if _current_sync_path == _sync_path:\n continue\n _dst_absolute_path = _current_sync_path + _file_path\n if not os.path.exists(_dst_absolute_path):\n continue\n print('Delete the %s file' % _dst_absolute_path)\n _ignore_list.append(_dst_absolute_path)\n os.remove(_dst_absolute_path)\n delete_node(_node_map, _file_path)\n\n\ndef create_dir(_sync_paths, _sync_path, _dir_path):\n for _current_sync_path in _sync_paths:\n if _current_sync_path == _sync_path:\n continue\n _dir_absolute_path = _current_sync_path + _dir_path\n if not os.path.exists(_dir_absolute_path):\n print('Create a folder %s' % _dir_absolute_path)\n os.makedirs(_dir_absolute_path)\n\n\ndef delete_dir(_sync_paths, _sync_path, _dir_path, _ignore_list):\n for _current_sync_path in _sync_paths:\n if _current_sync_path == _sync_path:\n continue\n _dir_absolute_path = _current_sync_path + _dir_path\n _ignore_list.append(_dir_absolute_path)\n if os.path.exists(_dir_absolute_path):\n print('Delete a folder %s' % _dir_absolute_path)\n shutil.rmtree(_dir_absolute_path)\n\n\ndef move_dir(_sync_paths, _sync_path, _src_dir_path, _dst_dir_path):\n for _current_sync_path in _sync_paths:\n if _current_sync_path == _sync_path:\n continue\n _src_absolute_path = _current_sync_path + _src_dir_path\n _dst_absolute_path = _current_sync_path + _dst_dir_path\n if not os.path.exists(_src_absolute_path):\n continue\n if os.path.exists(_dst_absolute_path):\n continue\n print('Move the folder %s to %s' % (_src_absolute_path, _dst_absolute_path))\n shutil.move(_src_absolute_path, _dst_absolute_path)\n\n\ndef move_file(_node_map, _sync_paths, _sync_path, _src_file_path, _dst_file_path, _ignore_list):\n _need_sync_file = False\n for _current_sync_path in _sync_paths:\n if _current_sync_path == _sync_path:\n continue\n _src_file_absolute_path = _current_sync_path + _src_file_path\n _dst_file_absolute_path = _current_sync_path + _dst_file_path\n _dst_dir_absolute_path = os.path.dirname(_dst_file_absolute_path)\n if not os.path.exists(_src_file_absolute_path) and not is_sync_tmp_file(_src_file_absolute_path):\n _need_sync_file = True\n continue\n if not os.path.exists(_dst_dir_absolute_path):\n continue\n print('Move the file %s to %s' % (_src_file_absolute_path, _dst_file_absolute_path))\n shutil.move(_src_file_absolute_path, _dst_file_absolute_path)\n if _need_sync_file:\n _node = get_node(_sync_path + _dst_file_path)\n copy_file(_node_map, _sync_paths, _sync_path, _node, _dst_file_path, _ignore_list)\n change_node_key(_node_map, _src_file_path, _dst_file_path)\n\n\ndef is_more_new_than_current(_new_node, _current_node):\n if _current_node is None:\n return True\n if _new_node.file_last_modified_time == _current_node.file_last_modified_time:\n return _new_node.file_size > _current_node.file_size\n return _new_node.file_last_modified_time > _current_node.file_last_modified_time\n\n\ndef get_node(_file_absolute_path):\n if not os.path.isfile(_file_absolute_path):\n return None\n _node = Node()\n _node.file_create_time = os.path.getctime(_file_absolute_path)\n _node.file_last_modified_time = os.path.getmtime(_file_absolute_path)\n _node.file_size = os.path.getsize(_file_absolute_path)\n return _node\n\n\ndef watch_folder(_observer, _handler, _sync_absolute_path):\n _observer.schedule(_handler, _sync_absolute_path, True)\n print('Watching directory %s...' % _sync_absolute_path)\n\n\ndef get_path(_absolute_path, _sync_paths):\n for _current_sync_path in _sync_paths:\n if _absolute_path.startswith(_current_sync_path):\n return _current_sync_path\n return None\n\n\ndef get_file_path(_file_absolute_path, _sync_path):\n return _file_absolute_path[len(_sync_path):]\n\n\nclass Node:\n file_name = \"\"\n file_size = 0\n file_create_time = 0\n file_last_modified_time = 0\n\n\nclass SystemEventHandler(FileSystemEventHandler):\n file_node_map = None\n skip_list = None\n sync_paths = None\n ignore_list = None\n\n def __init__(self, _node_map, _skip_list, _sync_paths, _ignore_list):\n self.file_node_map = _node_map\n self.skip_list = _skip_list\n self.sync_paths = _sync_paths\n self.ignore_list = _ignore_list\n\n def on_created(self, event):\n _src_absolute_path = event.src_path\n if _src_absolute_path in ignore_list:\n ignore_list.remove(_src_absolute_path)\n return\n if is_sync_tmp_file(_src_absolute_path):\n return\n if in_skip_list(skip_list, _src_absolute_path):\n return\n _sync_path = get_path(_src_absolute_path, sync_paths)\n _file_path = get_file_path(_src_absolute_path, _sync_path)\n if event.is_directory:\n create_dir(sync_paths, _sync_path, _file_path)\n return\n _node = get_node(_sync_path + _file_path)\n if _node is None:\n return\n copy_file(file_node_map, sync_paths, _sync_path, _node, _file_path, ignore_list)\n\n def on_modified(self, event):\n _src_absolute_path = event.src_path\n if is_sync_tmp_file(_src_absolute_path):\n return\n if in_skip_list(skip_list, _src_absolute_path):\n return\n _sync_path = get_path(_src_absolute_path, sync_paths)\n _file_path = get_file_path(_src_absolute_path, _sync_path)\n if event.is_directory:\n return\n _node = get_node(_sync_path + _file_path)\n copy_file(file_node_map, sync_paths, _sync_path, _node, _file_path, ignore_list)\n\n def on_deleted(self, event):\n _src_absolute_path = event.src_path\n if _src_absolute_path in ignore_list:\n ignore_list.remove(_src_absolute_path)\n return\n if is_sync_tmp_file(_src_absolute_path):\n return\n if in_skip_list(skip_list, _src_absolute_path):\n return\n _sync_path = get_path(_src_absolute_path, sync_paths)\n _file_path = get_file_path(_src_absolute_path, _sync_path)\n if event.is_directory:\n delete_dir(sync_paths, _sync_path, _file_path, ignore_list)\n return\n delete_file(file_node_map, sync_paths, _sync_path, _file_path, ignore_list)\n\n def on_moved(self, event):\n src_path = event.src_path\n dst_path = event.dest_path\n if is_sync_tmp_file(src_path):\n if dst_path in ignore_list:\n ignore_list.remove(dst_path)\n return\n _sync_path = get_path(src_path, sync_paths)\n _src_file_path = get_file_path(src_path, _sync_path)\n _dst_file_path = get_file_path(dst_path, _sync_path)\n if event.is_directory:\n move_dir(sync_paths, _sync_path, _src_file_path, _dst_file_path)\n return\n move_file(file_node_map, sync_paths, _sync_path, _src_file_path, _dst_file_path, ignore_list)\n\n\nif __name__ == '__main__':\n print(\"load config...\")\n config = load_config()\n print(\"load config...done.\")\n sync_paths = config[PATH_SETTING_KEY]\n skip_list = config[SKIP_LIST_SETTING_KEY]\n file_node_map = {}\n for sync_path in sync_paths:\n sync_folder(file_node_map, skip_list, sync_paths, sync_path)\n ignore_list = []\n observer = Observer()\n handler = SystemEventHandler(file_node_map, skip_list, sync_paths, ignore_list)\n for sync_path in sync_paths:\n watch_folder(observer, handler, sync_path)\n observer.start()\n try:\n while True:\n time.sleep(0.5)\n except KeyboardInterrupt:\n print('\\nstop sync...')\n observer.stop()\n observer.join()\n print('stop sync...done.')\n","sub_path":"Sync.py","file_name":"Sync.py","file_ext":"py","file_size_in_byte":14188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"553059659","text":"#!/usr/bin/env python\n\nimport argparse\nimport os\nfrom pprint import pprint\nfrom redis import StrictRedis\nimport torch\n\nfrom catalyst.utils.config import parse_args_uargs\nfrom catalyst.utils.misc import set_global_seeds, import_module\nfrom catalyst.rl.offpolicy.trainer import Trainer\n\nset_global_seeds(42)\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\ntorch.set_num_threads(1)\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"--config\",\n type=str,\n required=True)\nparser.add_argument(\n \"--algorithm\",\n type=str,\n default=None)\nparser.add_argument(\n \"--logdir\",\n type=str,\n default=None)\nargs, unknown_args = parser.parse_known_args()\nargs, config = parse_args_uargs(args, unknown_args, dump_config=True)\n\nalgorithm_module = import_module(\"algo_module\", args.algorithm)\nalgorithm_kwargs = algorithm_module.ALGORITHM.prepare_for_trainer(config)\n\nredis_server = StrictRedis(port=config.get(\"redis\", {}).get(\"port\", 12000))\nredis_prefix = config.get(\"redis\", {}).get(\"prefix\", \"\")\n\npprint(config[\"trainer\"])\npprint(algorithm_kwargs)\n\n\ntrainer = Trainer(\n **config[\"trainer\"],\n **algorithm_kwargs,\n logdir=args.logdir,\n redis_server=redis_server,\n redis_prefix=redis_prefix)\n\npprint(trainer)\n\ntrainer.run()\n","sub_path":"rl/offpolicy/scripts/run_trainer.py","file_name":"run_trainer.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"356875554","text":"\"\"\"\n instabot example\n\n Workflow:\n 1) Repost photo or video to your account\n\"\"\"\n\nimport os\nimport sys\nfrom creds import *\nimport json, requests, operator, random\n\nsys.path.append(os.path.join(sys.path[0], '../'))\nfrom instabot import Bot\nfrom instabot.bot.bot_support import read_list_from_file\n\n# get top media by hashtag\ntag = 'funnyvideo' # change to your hashtag\ninstaurl = 'https://www.instagram.com/explore/tags/{}/?__a=1'.format( tag )\nr = requests.get( instaurl )\ndata = r.json()['graphql']['hashtag']['edge_hashtag_to_top_posts']['edges']\nposts = {}\nfor i in data:\n shortcode = i['node']['shortcode']\n likes = i['node']['edge_liked_by']['count']\n posts[shortcode] = likes\n\nsorted_d = sorted(posts.items(), key=operator.itemgetter(1), reverse=True )\nprint( sorted_d )\nurl = 'https://instagram.com/p/{}'.format ( list( sorted_d )[0][0] )\nprint( url )\n\ndef exists_in_posted_medias(new_media_id, path='posted_medias.txt'):\n medias = read_list_from_file(path)\n return new_media_id in medias\n\ndef update_posted_medias(new_media_id, path='posted_medias.txt'):\n medias = read_list_from_file(path)\n medias.append(str(new_media_id))\n with open(path, 'w') as file:\n file.writelines('\\n'.join(medias))\n return True\n\n\ndef repost( bot, new_media_id, path='posted_medias.txt'):\n if exists_in_posted_medias(new_media_id, path):\n bot.logger.warning(\"Media {0} was uploaded earlier\".format(new_media_id))\n return False\n\n media_type = bot.get_media_info( new_media_id )[0]['media_type']\n path = ''\n if media_type == 2:\n path = bot.download_video( new_media_id )\n if bot.upload_video( path, caption ):\n bot.logger.info('Media_id {0} is saved in {1}'.format(new_media_id, path))\n\n elif media_type == 1:\n path = bot.download_photo( new_media_id )\n if bot.upload_photo( path, caption ):\n bot.logger.info('Media_id {0} is saved in {1}'.format(new_media_id, path))\n\n if not path:\n return False\n\ndef get_media_id( bot, url ):\n media_id = bot.get_media_id_from_link( url )\n return media_id\n\ndef get_media_owner_username( bot, new_media_id ):\n pk = bot.get_media_owner( new_media_id )\n username = bot.get_user_info( pk )['username']\n return username\n\ndef get_hashtags_to_post( bot, tag ):\n bot.api.search_tags( tag )\n res = bot.api.last_json\n\n tags = []\n for i in res[\"results\"]:\n tags.append( i['name'] )\n\n tags_to_post = random.sample( tags, 29 )\n tags_to_post = [ '#' + s for s in tags_to_post ]\n tags_to_post = ' '.join( tags_to_post )\n return tags_to_post\n\nbot = Bot()\nbot.login( username=username, password=password )\nmedia_id = get_media_id( bot, url )\nusername = get_media_owner_username( bot, media_id )\nuse_tags = get_hashtags_to_post( bot, tag )\ncaption = 'your caption goes here #{} {} ( via {} )'.format( tag, use_tags, username ) # change 'your caption goes here'\n\nprint( caption )\n\nif not media_id:\n print('Media id is empty!')\n exit(1)\n\nrepost( bot, media_id )","sub_path":"reposter.py","file_name":"reposter.py","file_ext":"py","file_size_in_byte":3036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"373266950","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n######\n# Initial test of LSTM model for Fake News Challenge\n# Based on starter code from PS3-CS224n\n# Based on Stephen's rnn_test1\n######\n## General libraries\nimport tensorflow as tf\nimport numpy as np\nimport random\n\n## Our Own Code\n# from our_model import Config\nfrom bow_model_config import BOWModel\nfrom run_text_processing import save_data_pickle, get_data\n# from run_text_processing import get_data\n## currently using: split_indices\n# from our_util import Progbar, minibatches, pack_labels, split_data, split_indices, softmax, get_performance\nfrom our_util import split_indices, softmax, get_performance, convertOutputs #M\nbase_path = '/Users/Monu/NLP/Stance/code'\n#base_path = '/home/jupiter/Manisha/code/'\ndef run_save_data_pickle(): ## Needs NLTK to be installed!\n save_data_pickle(outfilename = '/glove/twitter50d_h_ids_b_ids_pickle.p',\n embedding_type = 'twitter.27B.50d',\n parserOption = 'nltk')\n\ndef run_bow(config, split = True, outputpath = base_path + '/xp', final = False): #M\n\n\n\n ## Get data\n # config, y, h, b, h_len, b_len = get_BOW_data(config, reload = True, save_data = False)\n config, data_dict = get_data(config, \n filename_embeddings = '/glove/glove.twitter.27B.50d.txt',\n pickle_path = '/glove/twitter50d_h_ids_b_ids_pickle.p',\n concat = False)\n\n ## pass data into local namespace:\n y = data_dict['y']\n h = data_dict['h_np']\n b = data_dict['b_np']\n h_len = data_dict['h_seqlen']\n b_len = data_dict['b_seqlen']\n \n # Do shortening of dataset ## affects number of samples and max_len.\n if config.num_samples is not None:\n ## Random seed\n np.random.seed(1)\n ind = range(np.shape(h)[0])\n random.shuffle(ind)\n indices = ind[0:config.num_samples ]\n h = h[indices,:]\n b = b[indices,:]\n h_len = h_len[indices]\n b_len = b_len[indices]\n y = y[indices]\n\n if config.h_max_len is not None:\n h_max_len = config.h_max_len\n if np.shape(h)[1] > h_max_len:\n h = h[:, 0:h_max_len]\n h_len = np.minimum(h_len, h_max_len)\n\n if config.b_max_len is not None:\n b_max_len = config.b_max_len\n if np.shape(b)[1] > b_max_len:\n b = b[:, 0:b_max_len]\n b_len = np.minimum(b_len, b_max_len)\n\n if split:\n # Split data\n train_indices, dev_indices, test_indices = split_indices(np.shape(h)[0])\n # Divide data\n train_h = h[train_indices,:]\n train_b = b[train_indices,:]\n train_h_len = h_len[train_indices]\n train_b_len = b_len[train_indices]\n train_y = y[train_indices]\n\n # Development\n dev_h = h[dev_indices,:]\n dev_b = b[dev_indices,:]\n dev_h_len = h_len[dev_indices]\n dev_b_len = b_len[dev_indices]\n dev_y = y[dev_indices]\n\n if final:\n # Combine train and dev\n train_dev_indices = train_indices + dev_indices\n train_h = h[train_dev_indices,:]\n train_b = b[train_dev_indices,:]\n train_h_len = h_len[train_dev_indices]\n train_b_len = b_len[train_dev_indices]\n train_y = y[train_dev_indices]\n\n # Set dev to test\n dev_h = h[test_indices,:]\n dev_b = b[test_indices,:]\n dev_h_len = h_len[test_indices]\n dev_b_len = b_len[test_indices]\n dev_y = y[test_indices]\n\n\n \n ## Passing parameter_dict to config settings\n ## Changes to config based on data shape\n assert(np.shape(train_h)[0] == np.shape(train_b)[0] == np.shape(train_y)[0] == np.shape(train_h_len)[0] == np.shape(train_b_len)[0])\n config.num_samples = np.shape(train_h)[0]\n config.h_max_len = np.shape(train_h)[1]\n config.b_max_len = np.shape(train_b)[1]\n \n ## Start Tensorflow!\n print('Starting TensorFlow operations')\n print('With hidden layers: ', config.n_layers) ## hidden layer?\n with tf.Graph().as_default():\n tf.set_random_seed(1)\n model = BOWModel(config)\n init = tf.global_variables_initializer()\n with tf.Session() as session:\n session.run(init)\n losses_ep, dev_performances_ep, dev_predicted_classes_ep, dev_predictions_ep = model.fit(session, train_h, train_b, train_h_len, train_b_len, train_y, dev_h, dev_b, dev_h_len, dev_b_len, dev_y) #M\n\n # Write results to csv\n convertOutputs(outputpath, config, losses_ep, dev_performances_ep)\n\n #print('Losses ', losses_ep)\n #print('Dev Performance ', dev_performances_ep) #M\n return losses_ep, dev_predicted_classes_ep, dev_performances_ep #M\n\n## for debugging\nif __name__ == \"__main__\":\n print('Doing something!')\n losses, dev_predicted_classes, dev_performance = run_bow(num_samples = 1028)\n print('Execution Complete')\n","sub_path":"code/execute_bow_config.py","file_name":"execute_bow_config.py","file_ext":"py","file_size_in_byte":4881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"239231792","text":"from brian2 import *\nimport matplotlib.pyplot as plt\n\nN = 100\ntau = 10*ms\nv0_max = 3.\nduration = 1000*ms\n\neqs = '''\ndv/dt = (v0-v)/tau : 1 (unless refractory)\nv0 : 1\n'''\n\nG = NeuronGroup(N, eqs, threshold='v>1', reset='v=0', refractory=5*ms, method='exact')\nM = SpikeMonitor(G)\nSM = StateMonitor(G, 'v', record=[0, 25, 50, 75, 99])\n\nG.v0 = 'i*v0_max/(N-1)'\n\nrun(duration)\n\nplt.figure(figsize=(12,8))\nplt.subplot(121)\nplt.plot(M.t/ms, M.i, '.k')\nplt.xlabel('Time (ms)')\nplt.ylabel('Neuron index')\nplt.subplot(122)\nplt.plot(G.v0, M.count/duration)\nplt.xlabel('v0')\nplt.ylabel('Firing rate (sp/s)');\nplt.savefig('images/multiple-neurons_firing-rate.svg', transparent=True)\nplt.savefig('images/multiple-neurons_firing-rate.png', transparent=True)\n\nplt.figure(figsize=(12,18))\nplt.subplot(511)\nplt.plot(SM.t / ms, SM[99].v / mV)\nplt.xlabel('v[99]')\nplt.ylabel('V[mV]');\nplt.subplot(512)\nplt.plot(SM.t / ms, SM[75].v / mV)\nplt.xlabel('v[75]')\nplt.ylabel('V[mV]');\nplt.subplot(513)\nplt.plot(SM.t / ms, SM[50].v / mV)\nplt.xlabel('v[50]')\nplt.ylabel('V[mV]');\nplt.subplot(514)\nplt.plot(SM.t / ms, SM[25].v / mV)\nplt.xlabel('v[25]')\nplt.ylabel('V[mV]');\nplt.subplot(515)\nplt.plot(SM.t / ms, SM[0].v / mV)\nplt.xlabel('v[0]')\nplt.ylabel('V[mV]');\nplt.savefig('images/multiple-neurons_v.svg', transparent=True)\nplt.savefig('images/multiple-neurons_v.png', transparent=True)\n","sub_path":"python/brian2/multiple-neurons.py","file_name":"multiple-neurons.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"434365406","text":"from roboclaw_3 import Roboclaw\r\n\r\n#Linux comport name\r\nrc = Roboclaw(\"/dev/ttyACM0\",115200)\r\n\r\nrc.Open()\r\nprint(\"-----------------------------------------------------------\")\r\nif rc.Open() == 0:\r\n print(\"No roboclaw found on the comport\\n\")\r\nelse:\r\n print(\"Roboclaw connected\\n\")\r\n\r\nname = ['claw', 'claw_2', 'claw_3']\r\naddress = [128, 129, 130]\r\n\r\nfor i in range(3):\r\n try:\r\n print('\\t'.join((name[i], 'set up', str(rc.ReadError(address[i])))))\r\n except:\r\n print('\\t'.join((name[i], \"not found\")))\r\nprint(\"-----------------------------------------------------------\")\r\n\r\ndef pitchStop():\r\n rc.SpeedM1(128, 0)\r\n\r\ndef rotationStop():\r\n rc.SpeedM2(128, 0)\r\n\r\ndef columnStop():\r\n rc.SpeedM1(129, 0)\r\n\r\ndef launchStop():\r\n rc.SpeedM2(129, 0)\r\n\r\ndef caseStop():\r\n rc.SpeedM1M2(130, 0, 0)\r\n\r\ndef allStop():\r\n for i in address:\r\n rc.SpeedM1M2(i, 0, 0)\r\n\r\ndef pitchEnc():\r\n print(rc.ReadEncM1(128))\r\n\r\ndef rotationEnc():\r\n print(rc.ReadEncM2(128))\r\n\r\ndef columnEnc():\r\n print(rc.ReadEncM1(129))\r\n\r\ndef launchEnc():\r\n print(rc.ReadEncM2(129))\r\n\r\ndef caseRightEnc():\r\n print(rc.ReadEncM1(130))\r\n\r\ndef caseLeftEnc():\r\n print(rc.ReadEncM2(130))\r\n\r\ndef pitchSet(x):\r\n rc.SetEncM1(128, x)\r\n print('\\t'.join(('Updated encoder value: ', rc.ReadEncM1(128))))\r\n\r\ndef rotationSet(x):\r\n rc.SetEncM2(128, x)\r\n print('\\t'.join(('Updated encoder value: ', rc.ReadEncM2(128))))\r\n\r\ndef columnSet(x):\r\n rc.SetEncM1(129, x)\r\n print('\\t'.join(('Updated encoder value: ', rc.ReadEncM1(129))))\r\n\r\ndef launchSet(x):\r\n rc.SetEncM2(129, x)\r\n print('\\t'.join(('Updated encoder value: ', rc.ReadEncM2(129))))\r\n\r\ndef caseSet(x):\r\n rc.SetEncM1(130, x)\r\n rc.SetEncM2(130, x)\r\n print('\\t'.join(('Updated encoder value: ', rc.ReadEncM1(130), rc.ReadEncM2(130))))\r\n\r\ndata = \"\"\"\r\npitch_pulses = 355000 \r\npitch_length = 90.0 \r\npitch_speed_pulses = 7000 \r\npitch_speed_manual = 50 \r\npitch_ready = 80.0 \r\n\r\nrotation_pulses = 950000 \r\nrotation_length = 180.0 \r\nrotation_speed_pulses = 16000 \r\nrotation_speed_manual = 15 \r\nrotation_ready = 5.0 \r\n\r\nlift_pulses = 19000 \r\nlift_length = 130.0 \r\nlift_speed_pulses = 420 \r\nlift_speed_manual = 127 \r\nlift_ready = lift_length \r\n\r\nlaunch_pulses = 14800 \r\nlaunch_length = 111.0 \r\nlaunch_speed_pulses = 6*13400 \r\nlaunch_speed_pulses_slow = 2500 \r\nlaunch_speed_manual = 40 \r\nlaunch_acceleration = (launch_speed_pulses**2)/13400 \r\nlaunch_max_speed = 10 \r\nlaunch_min_speed = 1 \r\nlaunch_max_acceleration = 48 \r\nlaunch_min_acceleration = 1 \r\nlaunch_standby = 8000 \r\nlaunch_mount = 17000 \r\nlaunch_break = 21000 \r\nlaunch_bottom = 0 \r\nlaunch_connect = 2190 \r\n\"\"\"\r\nprint(data)\r\n","sub_path":"test_prototype/bare_minimum_3_claw.py","file_name":"bare_minimum_3_claw.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"581815586","text":"import cv2\r\nimport numpy as np\r\nimport sqlite3\r\nimport os\r\n\r\ndef insert0rUpdate(id, name):\r\n\r\n conn = sqlite3.connect('C:/Users/Administrator/Desktop/data.db')\r\n\r\n query = \"SELECT * FROM people WHERE ID=\" + str(id)\r\n cusror = conn.execute(query)\r\n\r\n isRecordExist = 0\r\n\r\n for row in cusror:\r\n isRecordExist = 1\r\n\r\n if(isRecordExist == 0):\r\n query = \"INSERT INTO people(ID, Name) VALUES(\"+str(id)+\",'\"+str(name)+ \"')\"\r\n else:\r\n query = \"UPDATE people SET Name='\"+str(name)+\"' WHERE ID=\"+ str(id)\r\n\r\n conn.execute(query)\r\n conn.commit()\r\n conn.close()\r\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\r\ncap = cv2.VideoCapture(0,cv2.CAP_DSHOW)\r\nid = input(\"Enter your ID: \")\r\nname = input(\"Enter your Naem: \")\r\ninsert0rUpdate(id, name)\r\nsampleNum = 0\r\nwhile 1:\r\n ret, img = cap.read()\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\r\n for (x,y,w,h) in faces:\r\n cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\r\n roi_gray = gray[y:y+h, x:x+w]\r\n roi_color = img[y:y+h, x:x+w]\r\n\r\n if not os.path.exists('dataSet'):\r\n os.makedirs('dataSet')\r\n sampleNum +=1\r\n cv2.imwrite('dataSet/User.'+str(id)+'.'+str(sampleNum)+ '.jpg', gray[y: y+h, x: x+w])\r\n cv2.imshow('nhan dien',img)\r\n cv2.waitKey(1)\r\n if sampleNum >100:\r\n break;\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n\r\n\r\n","sub_path":"ketnoiSQLite.py","file_name":"ketnoiSQLite.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"21041772","text":"import socket\nimport sys\nimport traceback\nimport os\nimport mimetypes\nimport base64\n\ndef response_ok(body=b\"This is a minimal response\", mimetype=b\"text/plain\"):\n \"\"\"\n returns a basic HTTP response\n Ex:\n response_ok(\n b\"Welcome:
\",\n b\"text/html\"\n ) ->\n\n b'''\n HTTP/1.1 200 OK\\r\\n\n Content-Type: text/html\\r\\n\n \\r\\n\n Welcome:
\\r\\n\n '''\n \"\"\"\n return b\"\\r\\n\".join([\n b\"HTTP/1.1 200 OK\",\n b\"Content-Type: \" + mimetype,\n b\"\",\n body])\n\n\ndef response_method_not_allowed():\n \"\"\"Returns a 405 Method Not Allowed response\"\"\"\n\n # TODO: Implement response_method_not_allowed\n return b\"\\r\\n\".join([\n b\"HTTP/1.1 405 Method Not Allowed\",\n b\"\",\n b\"You can't do that on this server! response_method_not_allowed\"])\n\n\ndef response_not_found():\n \"\"\"Returns a 404 Not Found response\"\"\"\n\n # TODO: Implement response_not_found\n return b\"\\r\\n\".join([\n b\"HTTP/1.1 404 Not Found response\",\n b\"\",\n b\"You can't do that on this server! response_not_found\"])\n\n\ndef parse_request(request):\n \"\"\"\n Given the content of an HTTP request, returns the path of that request.\n\n This server only handles GET requests, so this method shall raise a\n NotImplementedError if the method of the request is not GET.\n \"\"\"\n print(f\"parse_request func, request received is {request}\")\n method, path, version = request.split(\"\\r\\n\")[0].split(\" \")\n print(\"parse_request: the path is {}\".format(path))\n\n if method != \"GET\":\n raise NotImplementedError\n return path\n\ndef response_path(path):\n \"\"\"\n This method should return appropriate content and a mime type.\n\n If the requested path is a directory, then the content should be a\n plain-text listing of the contents with mimetype `text/plain`.\n\n If the path is a file, it should return the contents of that file\n and its correct mimetype.\n\n If the path does not map to a real location, it should raise an\n exception that the server can catch to return a 404 response.\n\n Ex:\n response_path('/a_web_page.html') -> (b\"North Carolina...\",\n b\"text/html\")\n\n response_path('/images/sample_1.png')\n -> (b\"A12BCF...\", # contents of sample_1.png\n b\"image/png\")\n\n response_path('/') -> (b\"images/, a_web_page.html, make_type.py,...\",\n b\"text/plain\")\n\n response_path('/a_page_that_doesnt_exist.html') -> Raises a NameError\n\n \"\"\"\n # TODO: Fill in the appropriate content and mime_type give the path.\n # See the assignment guidelines for help on \"mapping mime-types\", though\n # you might need to create a special case for handling make_time.py\n #\n # If the path is \"make_time.py\", then you may OPTIONALLY return the\n # result of executing `make_time.py`. But you need only return the\n # CONTENTS of `make_time.py`.\n try:\n if 'webroot' not in os.getcwd():\n current_path = os.getcwd()\n current_path += '/webroot'\n os.chdir(current_path)\n print(f\"response parse func, changed to webroot, print cwd:{current_path}\")\n current_path = os.getcwd()\n new_path = current_path + path\n print(f\"this is new_path, current+path argument:{new_path}\")\n if not os.path.isdir(new_path):\n print(\"The path is a file, path argument:{}\".format(path))\n f_name = path.split(\"/\")\n file_name = f_name[-1]\n print(f\"file name is {file_name}\")\n mim = mimetypes.guess_type(file_name)[0]\n mime_type = mim.encode('utf-8')\n print(\"This is the mime_type:{}\".format(mime_type))\n if mime_type == b\"text/html\":\n content = b\"\"\n print(\"file name is {}\".format(file_name))\n with open(file_name, 'r') as reader:\n for line in reader.readlines():\n li_b = line.strip().encode('utf-8')\n li_b += b\"\\r\\n\"\n content += li_b\n print(\"line content: {}\".format(content))\n print(\"This is the whole content: {}\".format(content))\n\n if mime_type == b'text/plain':\n content = b\"\"\n print(\"file name is {}\".format(file_name))\n with open(file_name, 'r') as reader:\n for line in reader.readlines():\n li_b = line.strip().encode('utf-8')\n li_b += b\"\\r\\n\"\n content += li_b\n print(\"line content: {}\".format(content))\n print(\"This is the whole content: {}\".format(content))\n\n if mime_type == b\"image/png\":\n print(f\"the image/png file going to open is {file_name}\")\n with open(file_name, 'rb') as f:\n img = f.read()\n content = img\n print(\"image/png content is done\")\n\n if mime_type == b'image/jpeg':\n print(f\"the image/png file going to open is {file_name}\")\n with open(file_name, 'rb') as f:\n img = f.read()\n content = img\n print(\"image/jpeg content is done\")\n\n elif os.path.isdir(new_path):\n os.chdir(new_path)\n dirs = os.listdir(new_path)\n content = b\"\"\n for i in dirs:\n i += \"\\n\"\n item = i.encode('utf-8')\n print(item)\n content += item\n mime_type = b\"text/plain\"\n print(f\"new dir so new cwd :{os.getcwd()}\")\n except Exception as e:\n print(\"response_path error message: {}\".format(e))\n print(\"requested content is not present under webroot\")\n raise NameError\n\n return content, mime_type\n\n\ndef server(log_buffer=sys.stderr):\n address = ('127.0.0.1', 10000)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n print(\"making a server on {0}:{1}\".format(*address), file=log_buffer)\n sock.bind(address)\n sock.listen(1)\n\n try:\n while True:\n print('waiting for a connection', file=log_buffer)\n conn, addr = sock.accept() # blocking\n n = 0\n try:\n print('connection - {0}:{1}'.format(*addr), file=log_buffer)\n #request is decoded.\n request = ''\n while n<20:\n n +=1\n print(\"start receiving data buffer\")\n data = conn.recv(1024)\n print(\"request buffer received\")\n request += data.decode('utf8')\n print(f\"buffer is added, request by now is {request}\")\n\n if '\\r\\n\\r\\n' in request:\n #conn.close()\n break\n print(\"Request received:\\n{}\\n\\n\".format(request))\n\n try:\n\n # TODO: Use parse_request to retrieve the path from the request.\n path = parse_request(request)\n\n # TODO: Use response_path to retrieve the content and the mimetype,\n # based on the request path.\n body, mimetype = response_path(path)\n\n # TODO; If parse_request raised a NotImplementedError, then let\n # response be a method_not_allowed response. If response_path raised\n # a NameError, then let response be a not_found response. Else,\n # use the content and mimetype from response_path to build a \n # response_ok.\n response = response_ok(\n body,\n mimetype\n )\n except NotImplementedError:\n response = response_method_not_allowed()\n except NameError:\n response = response_not_found()\n #except KeyboardInterrupt:\n # sock.close()\n # conn.close()\n # print(\"closed all\")\n # break\n # return\n\n conn.sendall(response)\n #conn.close()\n except:\n #conn.close()\n traceback.print_exc()\n finally:\n conn.close() \n\n except KeyboardInterrupt:\n conn.close()\n sock.close()\n print(\"Keyboard interrupt: conn and sock closed\")\n return\n except:\n traceback.print_exc()\n\n\nif __name__ == '__main__':\n server()\n sys.exit(0)\n\n\n","sub_path":"http_server.py","file_name":"http_server.py","file_ext":"py","file_size_in_byte":8895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"231886637","text":"#! /usr/bin/env python\n#\n# Copyright INRA-URGI 2009-2010\n# \n# This software is governed by the CeCILL license under French law and\n# abiding by the rules of distribution of free software. You can use,\n# modify and/ or redistribute the software under the terms of the CeCILL\n# license as circulated by CEA, CNRS and INRIA at the following URL\n# \"http://www.cecill.info\".\n# \n# As a counterpart to the access to the source code and rights to copy,\n# modify and redistribute granted by the license, users are provided only\n# with a limited warranty and the software's author, the holder of the\n# economic rights, and the successive licensors have only limited\n# liability.\n# \n# In this respect, the user's attention is drawn to the risks associated\n# with loading, using, modifying and/or developing or reproducing the\n# software by the user in light of its specific status of free software,\n# that may mean that it is complicated to manipulate, and that also\n# therefore means that it is reserved for developers and experienced\n# professionals having in-depth computer knowledge. Users are therefore\n# encouraged to load and test the software's suitability as regards their\n# requirements in conditions enabling the security of their systems and/or\n# data to be ensured and, more generally, to use and operate it in the\n# same conditions as regards security.\n# \n# The fact that you are presently reading this means that you have had\n# knowledge of the CeCILL license and that you accept its terms.\n#\n\nimport os\nimport struct\nfrom optparse import OptionParser\nfrom commons.core.parsing.GffParser import GffParser\nfrom commons.core.writer.Gff3Writer import Gff3Writer\n\nLONGSIZE = struct.calcsize('l')\n\nclass FindOverlaps_naif(object):\n \n def __init__(self, inputRefGff3FileName, inputQueryGff3FileName):\n self._inputRefGff3FileName = inputRefGff3FileName\n self._inputQueryGff3FileName = inputQueryGff3FileName\n \n def close(self):\n self._iGff3Writer.close()\n \n def setGff3FileName(self, fileName):\n self._inputRefGff3FileName = fileName\n \n def setQueryGff3FileName(self, fileName):\n self._inputQueryGff3FileName = fileName\n \n def setOutputGff3FileName(self, outputGff3FileName):\n if outputGff3FileName != '':\n self._outputGff3FileName = outputGff3FileName\n self._iGff3Writer = Gff3Writer(self._outputGff3FileName)\n \n def run(self):\n queryParser = GffParser(self._inputQueryGff3FileName, 0)\n for queryTranscript in queryParser.getIterator():\n ids = []\n refParser = GffParser(self._inputRefGff3FileName, 0)\n for refTranscript in refParser.getIterator():\n if queryTranscript.overlapWith(refTranscript):\n ids.append(refTranscript.getTagValue('ID'))\n if ids:\n queryTranscript.setTagValue(\"nbOverlaps\", len(ids))\n queryTranscript.setTagValue(\"overlapsWith\", \"--\".join(ids))\n self._iGff3Writer.addTranscript(queryTranscript)\n \nif __name__ == \"__main__\":\n description = \"FindOverlapsWithSeveralInterval: Finds overlaps with several query intervals.\"\n\n parser = OptionParser(description = description)\n parser.add_option(\"-i\", \"--inputRef\", dest=\"inputRefGff3FileName\", action=\"store\", type=\"string\", help=\"Reference input file [compulsory] [format: file in gff3 format]\")\n parser.add_option(\"-j\", \"--inputQuery\", dest=\"inputQueryGff3FileName\", action=\"store\", type=\"string\", help=\"Query input file [compulsory] [format: file in gff3 format]\")\n parser.add_option(\"-o\", \"--output\", dest=\"outputGff3FileName\", action=\"store\", type=\"string\", help=\"output file [compulsory] [format: output file in gff3 format]\")\n (options, args) = parser.parse_args()\n \n iFON = FindOverlaps_naif(options.inputRefGff3FileName, options.inputQueryGff3FileName)\n iFON.setOutputGff3FileName(options.outputGff3FileName)\n iFON.run()\n iFON.close()\n","sub_path":"SMART/Java/Python/ncList/FindOverlaps_naif.py","file_name":"FindOverlaps_naif.py","file_ext":"py","file_size_in_byte":4008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"42742608","text":"# Built-in imports\nfrom typing import List\n\n# Third party import \nimport torch\n\nfrom allennlp.data.batch import Batch\nfrom allennlp.nn.util import move_to_device\n\n# Custom imports\nfrom facade.finetuners.finetuner import FineTuner\nfrom facade.util.misc import compute_rank, get_stop_ids, create_labeled_instances\n\nclass NLI_FineTuner(FineTuner):\n def __init__(self, model, reader, train_data, dev_data, vocab, args, regularize=False):\n super().__init__(\n model, \n reader, \n train_data, \n dev_data, \n vocab, \n args, \n outdir=\"nli_facade_experiments\" if not regularize else \"nli_rp_experiments\",\n regularize=regularize\n )\n\n self.attack_target = args.attack_target\n\n if self.regularize:\n self.model_dir = \"nli_rp_models\"\n else:\n self.model_dir = \"nli_facade_models\"\n\n self.log_meta_data()\n \n def log(\n self,\n iter: int,\n entropy_loss,\n grad_loss, \n rank: List[int],\n gradients, \n loss, \n output_probs, \n output_logits, \n raw_gradients\n ) -> None: \n self.model.eval() # model should be in eval() already, but just in case\n\n total_grad_rank = 0\n\n total_ent = 0 \n\n total_stop_word_attribution_premise = 0\n total_stop_word_attribution_hypothesis = 0 \n total_stop_word_grad_value = 0\n\n total_first_token_attribution = 0\n total_first_token_grad_rank = 0\n total_first_token_grad_value = 0\n\n total_last_token_attribution = 0\n total_last_token_grad_rank = 0\n total_last_token_grad_value = 0\n\n for i, batch in enumerate(self.batched_dev_instances): \n print(i)\n print(torch.cuda.memory_summary(device=0, abbreviated=True)) # NOTE: comment out to check cuda memory\n data = Batch(batch)\n data.index_instances(self.vocab)\n model_input = data.as_tensor_dict()\n model_input = move_to_device(model_input, cuda_device=0) if self.cuda else model_input\n with torch.no_grad(): \n outputs = self.model(**model_input)\n\n new_instances = create_labeled_instances(self.predictor, outputs, batch, self.cuda)\n grads, raw_grads = self.simple_gradient_interpreter.sst_interpret_from_instances(\n new_instances, \n self.embedding_op, \n self.normalization, \n self.normalization2, \n self.cuda, \n higher_order_grad=False\n )\n \n if self.importance == 'stop_token':\n # calculate attribution of stop tokens in all sentences\n # of the batch \n premise_stop_ids = []\n hypothesis_stop_ids = []\n for instance in new_instances:\n premise_stop_ids.append(get_stop_ids(instance, self.stop_words, \"premise\"))\n hypothesis_stop_ids.append(get_stop_ids(instance, self.stop_words, \"hypothesis\"))\n \n for j, grad in enumerate(grads):\n total_stop_word_attribution_premise += torch.sum(torch.abs(grad[premise_stop_ids[j]])).detach()\n total_stop_word_attribution_hypothesis += torch.sum(torch.abs(grad[hypothesis_stop_ids[j]])).detach()\n\n if self.importance == 'first_token':\n for j, grad in enumerate(grads): \n total_first_token_attribution += torch.abs(torch.sum(grad[1]).detach())\n total_first_token_grad_rank += compute_rank(grad, {1})[0]\n total_first_token_grad_value += torch.abs(raw_grads[j][1])\n\n \n total_ent += self.criterion(outputs['probs'])\n\n avg_entropy = total_ent/len(self.dev_data)\n\n avg_stop_word_attribution_premise = total_stop_word_attribution_premise/len(self.dev_data)\n avg_stop_word_attribution_hypothesis = total_stop_word_attribution_hypothesis/len(self.dev_data)\n\n avg_first_token_attribution = total_first_token_attribution/len(self.dev_data)\n avg_first_token_grad_rank = total_first_token_grad_rank/len(self.dev_data)\n avg_first_token_grad_value = total_first_token_grad_value/len(self.dev_data)\n\n avg_last_token_attribution = total_last_token_attribution/len(self.dev_data)\n avg_last_token_grad_rank = total_last_token_grad_rank/len(self.dev_data)\n avg_last_token_grad_value = total_last_token_grad_value/len(self.dev_data)\n\n with open(self.entropy_dev_file_name, \"a\") as f:\n f.write(\"Iter #{}: {}\\n\".format(iter, avg_entropy))\n\n # Stop word files\n with open(self.stop_word_attribution_dev_file_name, \"a\") as f:\n f.write(\"Iter #{} premise: {}\\n\".format(iter, avg_stop_word_attribution_premise))\n f.write(\"Iter #{} hypothesis: {}\\n\".format(iter, avg_stop_word_attribution_hypothesis))\n f.write(\"Iter #{} total: {}\\n\".format(iter, avg_stop_word_attribution_premise + avg_stop_word_attribution_hypothesis))\n\n # First token files \n with open(self.first_token_attribution_dev_file_name, \"a\") as f:\n f.write(\"Iter #{}: {}\\n\".format(iter, avg_first_token_attribution))\n with open(self.avg_first_token_grad_rank_dev_file_name, \"a\") as f:\n f.write(\"Iter #{}: {}\\n\".format(iter, avg_first_token_grad_rank))\n with open(self.avg_first_token_grad_value_dev_file_name, \"a\") as f:\n f.write(\"Iter #{}: {}\\n\".format(iter, avg_first_token_grad_value))\n\n # Last token files \n with open(self.last_token_attribution_dev_file_name, \"a\") as f:\n f.write(\"Iter #{}: {}\\n\".format(iter, avg_last_token_attribution))\n with open(self.avg_last_token_grad_rank_dev_file_name, \"a\") as f:\n f.write(\"Iter #{}: {}\\n\".format(iter, avg_last_token_grad_rank))\n with open(self.avg_last_token_grad_value_dev_file_name, \"a\") as f:\n f.write(\"Iter #{}: {}\\n\".format(iter, avg_last_token_grad_value))\n \n if iter != 0:\n with open(self.entropy_loss_file_name, \"a\") as f: \n f.write(\"Iter #{}: {}\\n\".format(iter, entropy_loss))\n with open(self.grad_loss_file_name, \"a\") as f: \n f.write(\"Iter #{}: {}\\n\".format(iter, grad_loss))\n with open(self.grad_rank_file_name, \"a\") as f:\n f.write(\"Iter #{}: {}\\n\".format(iter, rank))\n with open(self.grad_file_name, \"a\") as f:\n f.write(\"Iter #{}: {}\\n\".format(iter, gradients))\n with open(self.total_loss_file_name, \"a\") as f: \n f.write(\"Iter #{}: {}\\n\".format(iter, loss))\n with open(self.output_probs_file_name, \"a\") as f:\n f.write(\"Iter #{}: {}\\n\".format(iter, output_probs))\n with open(self.output_logits_file_name, \"a\") as f:\n f.write(\"Iter #{}: {}\\n\".format(iter, output_logits))\n with open(self.raw_grads_file_name, \"a\") as f: \n f.write(\"Iter #{}: {}\\n\".format(iter, raw_gradients))","sub_path":"finetuners/nli_finetuner.py","file_name":"nli_finetuner.py","file_ext":"py","file_size_in_byte":7183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"462472246","text":"from math import sin, cos, radians\nfrom subprocess import Popen, PIPE\n\ndef mem(*params):\n \"\"\"\n Communicate with the C memory management utility via subprocess.Popen\n Takes in a series of arguments as parameters for mem.exe\n Returns parsed response, raises all exceptions\n \"\"\"\n params = ['mem.exe'] + [str(param) for param in params]\n proc = Popen(params, stdout=PIPE, stderr=PIPE, universal_newlines=True)\n stdout, stderr = proc.communicate()\n if stderr:\n raise Exception(stderr)\n if stdout:\n return [float(o) for o in stdout.split(' ')]\n\nclass HLMVModel(object):\n def __init__(self, initial):\n \"\"\"\n Iniitial model setup. Enable normal maps, set the background to white,\n and load the current rotation and translation from memory.\n If initial values are specified, rot and trans will be set instead.\n \"\"\"\n mem('nm', 1)\n mem('color', '1.0', '1.0', '1.0', '1.0')\n if initial['rotation']:\n self.rotation = initial['rotation']\n mem('rot', *self.rotation)\n else: # Load from current state\n self.rotation = mem('rot')\n if initial['translation']:\n self.translation = initial['translation']\n mem('trans', *self.translation)\n else:\n self.translation = mem('trans')\n if initial['rotation_offset']:\n self.rot_offset = initial['rotation_offset']\n else:\n self.rot_offset = 0\n if initial['vertical_offset']:\n self.vert_offset = initial['vertical_offset']\n else:\n self.vert_offset = 0\n\n def set_background(self, value):\n \"\"\"\n Set the HLMV background to a given value.\n \"\"\"\n mem('bg', value*1)\n\n def rotate(self, x, y):\n \"\"\"\n Rotate the model to coordinates x, y from its initial rotation.\n X rotation is around the vertical axis, aka yaw\n Y rotation is around the horizontal axis, aka pitch\n\n Note that HLMV uses degrees while python uses radians.\n \"\"\"\n\n mem('rot',\n self.rotation[0] + x,\n self.rotation[1] + y,\n self.rotation[2]\n )\n\n x = radians(x)\n y = radians(y)\n\n xy_shift = sin(x)*sin(y)*self.vert_offset\n mem('trans',\n self.translation[0] + cos(y)*self.rot_offset + xy_shift,\n self.translation[1] + sin(y)*self.rot_offset + xy_shift,\n self.translation[2] - sin(x)*self.rot_offset\n )\n","sub_path":"HLMVModel.py","file_name":"HLMVModel.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"463222810","text":"import sys \n#sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages') #this line is useful in cases when ros and cv have conflict\nimport cv2\n#sys.path.append('/opt/ros/kinetic/lib/python2.7/dist-packages')\n\npath = ' ' #add custom path \n\n#CHANGE HERE \nvidcap = cv2.VideoCapture(path + 'mp.mp4') #path to video file\n\nsuccess,image = vidcap.read()\ncount = 1\nwhile success:\n\t#CHANGE HERE \n\tcv2.imwrite(path + 'Cup/%d.jpg' % count, image) # save frame as JPEG file \n\n\tsuccess,image = vidcap.read()\n\tprint('Read a new frame: ', success)\n\tcount += 1\n\tif count == 50:\n\t\tbreak\n\tprint(count)\n\tif count == int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT)):\n\t\tbreak\n","sub_path":"src/Fragmenting_videos_to_frames.py","file_name":"Fragmenting_videos_to_frames.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"346474433","text":"from flask import jsonify\nfrom flask.ext.restful import Resource, reqparse\nfrom myapi import db, app\nfrom myapi.model.recommend import RecommendTypeModel, RecommendItemModel\n\npost_parser = reqparse.RequestParser()\npost_parser.add_argument('typeid', type=int, location='json')\npost_parser.add_argument('itemid', type=int, location='json')\npost_parser.add_argument('targetitemid', type=int, location='json')\npost_parser.add_argument('name', type=str, location='json')\npost_parser.add_argument('title', type=str, location='json')\npost_parser.add_argument('description', type=str, location='json')\npost_parser.add_argument('image', type=str, location='json')\npost_parser.add_argument('url', type=str, location='json')\npost_parser.add_argument('orderid', type=int, location='json')\n\nclass RecommendType(Resource):\n def get(self, id):\n category = RecommendTypeModel.query.get(id)\n return jsonify(data=category.serialize()) if category else jsonify(data='')\n \n def post(self):\n args = post_parser.parse_args()\n\n category = RecommendTypeModel(args.name)\n db.session.add(category)\n db.session.commit()\n\n return jsonify(data=category.serialize())\n\n def put(self):\n args = post_parser.parse_args()\n category = RecommendTypeModel.query.get(args.typeid)\n category.name = args.name\n db.session.commit()\n return jsonify(data=category.serialize())\n\n def delete(self):\n args = post_parser.parse_args()\n category = RecommendTypeModel.query.get(args.typeid)\n for item in category.items:\n db.session.delete(item)\n db.session.delete(category)\n db.session.commit()\n return jsonify(result='true')\n\nclass RecommendTypeList(Resource):\n def get(self):\n categorys = RecommendTypeModel.query.all()\n return jsonify(data=[category.serialize() for category in categorys])\n\nclass RecommendItem(Resource):\n def get(self, id):\n item = RecommendItemModel.query.get(id)\n return jsonify(data=item.serialize()) if item else jsonify(data='')\n \n def post(self):\n args = post_parser.parse_args()\n item = RecommendItemModel(args.title, args.description, args.image, args.url, args.orderid)\n db.session.add(item)\n\n category = RecommendTypeModel.query.get(args.typeid)\n category.items.append(item)\n db.session.commit()\n return jsonify(data=item.serialize())\n\n def put(self):\n args = post_parser.parse_args()\n item = RecommendItemModel.query.get(args.itemid)\n item.title = args.title\n item.description = args.description\n item.image = args.image\n item.url = args.url\n item.orderid = args.orderid\n\n # target = RecommendItemModel.query.get(args.targetitemid)\n # item.orderid = target.orderid\n # target.orderid = orderid\n\n db.session.commit()\n return jsonify(data=item.serialize())\n\n def delete(self):\n args = post_parser.parse_args()\n item = RecommendItemModel.query.get(args.itemid)\n\n db.session.delete(item)\n db.session.commit()\n return jsonify(result='true')\n\nclass RecommendItemList(Resource):\n def get(self, typeid):\n category = RecommendTypeModel.query.get(typeid)\n if category and category.items:\n return jsonify(data=[item.serialize() for item in category.items])\n else:\n return jsonify(data='')\n\n","sub_path":"myapi/resources/recommend.py","file_name":"recommend.py","file_ext":"py","file_size_in_byte":3470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"459059501","text":"import matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport numpy as np\nfrom ddic import *\n\ndef cm2inch(*tupl): # Conversion de cm vers inch\n\tinch = 2.54\n\tif isinstance(tupl[0], tuple):\n\t\treturn tuple(i/inch for i in tupl[0])\n\telse:\n\t\treturn tuple(i/inch for i in tupl)\n\ndef trace_track(x0,y0,limits,w,d,color):\n\t(vmin, vmax) = limits\n\tfor i in range(101):\n\t\tplt.plot([x0+(1+4*i/10000)*w,x0+(1+4*i/10000)*w],[y0-i/100*d/2,y0-d+i/100*d/2],color=color,linewidth=0.4)\n\tn = vmax-vmin+1\n\tif color == '#000000':\n\t\tc = '#f6f6f6'\n\t\tif vmin == 0:\n\t\t\tR = range(1,n)\n\t\telse:\n\t\t\tR = range(1,n-2)\n\telse:\n\t\tc = 'k'\n\t\tR = range(n)\n\tfor k in range(n):\n\t\tif k in R:\n\t\t\tplt.plot([x0 + (k+1)*w/n,x0 + (k+1)*w/n],[y0,y0-d],color=c,linewidth=0.7)\n\t\tnum = (3-len(str(vmin+k)))*' ' + str(vmin+k)\n\t\tif vmin+k == 0:\n\t\t\tax.add_patch(patches.Rectangle((x0+k*w/n,y0-d),w/n,d,facecolor='#f6f6f6'))\n\t\t\tplt.text(x0 + k*w/n + 0.2*w/n,y0-d/1.8,num,fontsize = 6,color=color)\n\t\telse:\n\t\t\tax.add_patch(patches.Rectangle((x0+k*w/n,y0-d),w/n,d,facecolor=color))\n\t\t\tplt.text(x0 + k * w / n + 0.2 * w / n, y0 - d / 1.8, num, fontsize=6, color='w')\n\tplt.plot([x0+w,x0,x0,x0+w,x0+1.04*w,x0+w],[y0-d,y0-d,y0,y0,y0-d/2,y0-d],color='k',linewidth=1)\n\nW,H = cm2inch(21,29.7) # Dimensions d'une feuille A4\nmid = 1.5*H/2 #Position de la separatrice\n\nfig = plt.figure(figsize=(H,W))\nax = fig.add_subplot(111,aspect = 'equal')\nplt.plot([0,H,H,0,0],[W,W,0,0,W],color='k') #Contours A4\n\ncols_res = ['#ffc800','#ff0000','#000000','#55bcff','#954e00','#8d8d8d']\nbornes = [(0,20),(-10,10),(0,20),(-20,0),(-10,10),(-10,10),(-10,10)]\nd = W/15 #Largeur d'une jauge\ns = (W - len(bornes)*d)/7 #Espace inter-jauges\nL_n = [1,1,2,1,1,1]\n\ni = 0\nfor k in range(6):\n\tfor l in range(L_n[k]):\n\t\tx0 = 0.07*H/2\n\t\ty0 = W - (i*d + (k+1)*s) - l*d/5\n\t\ttrace_track(x0,y0,bornes[i],0.86*mid,d,cols_res[k])\n\t\ti += 1\n\nplt.plot([mid,mid],[0,W],color='k',linewidth=2) #Zones de stockage\nplt.plot([mid,H],[2*W/3,2*W/3],color='k',linewidth=2)\nplt.plot([mid,H],[W/3,W/3],color='k',linewidth=2)\nplt.plot([(H+mid)/2,(H+mid)/2],[W/3,0],color='k',linewidth=2)\nplt.text(1.05*mid,9.5*W/12,'$UM$',color=cols_res[0],fontsize=60,alpha=0.7)\nplt.text(1.11*mid,5.5*W/12,'$H$',color=cols_res[2],fontsize=60,alpha=0.7)\nplt.text(1.025*mid,1.5*W/12,'$P$',color=cols_res[5],fontsize=60,alpha=0.7)\nplt.text(1.185*mid,1.67*W/12,'$Env$',color='#20c22e',fontsize=30,alpha=0.7)\n\nplt.axis('equal')\nplt.axis('off')\nplt.show()\n\nplt.savefig('jauges.png',format='png',dpi=500)\nresize('jauges.png')","sub_path":"DDIC_Jauges.py","file_name":"DDIC_Jauges.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"443974742","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nfrom django.contrib.auth.models import User\nfrom django.db import models\n\n\n# Create your models here.\n\n\nclass UserProfile(models.Model):\n\tuser = models.OneToOneField(User)\n\tpicture = models.ImageField(upload_to='/media', blank=True)\n\trating = models.IntegerField(default=0)\n\tstatus = models.CharField(max_length=10, choices=(('Stuff', 'Stuff'),\n\t\t\t\t\t\t\t\t\t\t\t\t\t ('Manager', 'Manager')))\n\tgender = models.CharField(max_length=10, choices=(('male', 'male'),\n\t\t\t\t\t\t\t\t\t\t\t\t\t ('female', 'femaie')))\n\temail = models.EmailField(max_length=50)\n\n\tdef __unicode__(self):\n\t\treturn self.user.username\n\n\tdef __str__(self):\n\t\treturn self.user.username\n\n\nclass City(models.Model):\n\tname = models.CharField(max_length=40)\n\n\tdef __unicode__(self):\n\t\treturn self.name\n\n\nclass Post(models.Model):\n\tcity = models.ForeignKey(City)\n\ttitle = models.CharField(max_length=50)\n\tcontent = models.TextField()\n\tauthor = models.ForeignKey(UserProfile)\n\tcomment_number = models.IntegerField(default=0)\n\thas_author = models.BooleanField(default=True)\n\n\nclass Message(models.Model):\n\ttext = models.TextField()\n\treceiver = models.ForeignKey(UserProfile, related_name='receiver')\n\tauthor = models.ForeignKey(UserProfile, related_name='author')\n\n\nclass Task(models.Model):\n\ttask_name = models.CharField(max_length=30, blank=True, null=True)\n\tlong_description = models.TextField()\n\tshort_description = models.CharField(max_length=150)\n\tassigned_to = models.ForeignKey(UserProfile)\n\tstatus = models.CharField(max_length=10, choices=(('A', 'active'), ('O', 'over'),\n\t\t\t\t\t\t\t\t\t\t\t\t\t ('C', 'cancelled')))\n\n\nclass Comment(models.Model):\n\ttopic = models.ForeignKey(Post)\n\tauthor = models.ForeignKey(UserProfile)\n\ttext = models.TextField()\n\tdate = models.DateTimeField()\n","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"612536465","text":"# -*- coding: utf-8 -*-\n\nimport boto3\n\n# DynamoDB\nclass Dynamo(object):\n def __init__(self, table_name, key_schema=None, attribute_definitions=None, \n provisioned_throughput=None, local_mode=False):\n \n # 接続\n if local_mode:\n self.resource = boto3.resource(\n \"dynamodb\",\n region_name = \"ap-northeast-1.\",\n endpoint_url = \"http://localhost:8000\",\n aws_access_key_id = \"ACCESS_ID\",\n aws_secret_access_key = \"ACCESS_KEY\"\n )\n else:\n self.resource = boto3.resource(\"dynamodb\")\n \n # テーブル接続\n if self._exists_table(table_name)==False: \n # テーブル作成\n self.table = self.resource.create_table(\n TableName = table_name,\n KeySchema = key_schema,\n AttributeDefinitions = attribute_definitions,\n ProvisionedThroughput = provisioned_throughput\n )\n else:\n self.table = self.resource.Table(name=table_name)\n \n # リージョンの違うテーブルは存在確認できないかも...\n def _exists_table(self, table_name):\n return self.resource.Table(name=table_name) in self.resource.tables.all()\n \n def show_tables(self):\n for table in self.resource.tables.all(): print(table)\n \n def delete_table(self, table_name):\n self.resource.Table(name=table_name).delete()\n \n def put_items(self, item_list):\n with self.table.batch_writer() as batch:\n for item in item_list: batch.put_item(Item=item)\n \n def scan(self, filter_expression):\n result = self.table.scan(FilterExpression=filter_expression)\n return result[\"Items\"]\n \n def query(self, key_condition_expression):\n result = self.table.query(KeyConditionExpression=key_condition_expression)\n return result[\"Items\"]\n \n","sub_path":"google/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"530676541","text":"# Problem [1258] : 행렬찾기\n\ndef find_Arr(arr):\n result = list()\n col_size = len(arr)\n row_size = len(arr[0])\n check = [[0]*row_size for _ in range(col_size)]\n c_col = 0\n c_row = 0\n for i in range(col_size):\n for j in range(row_size):\n if check[i][j] == 0 and arr[i][j] != 0:\n c_col = i\n c_row = j\n while 1:\n c_col += 1\n if arr[c_col][c_row] == 0:\n c_col -= 1\n break\n if c_col == col_size -1:\n break \n while 1:\n c_row += 1\n if arr[c_col][c_row] == 0:\n c_row -= 1\n break\n if c_row == row_size -1:\n break\n for x in range(i,c_col+1):\n for y in range(j,c_row+1):\n check[x][y] = 1\n if [c_col-i+1,c_row-j+1] not in result:\n result.append([c_col-i+1,c_row-j+1])\n return result\n\n\n\n\nT = int(input())\nfor tc in range(1,T+1):\n N = int(input())\n data = [list(map(int,input().split())) for _ in range(N)]\n sorted_data = sorted(find_Arr(data),key = lambda x : (x[0]*x[1],x[0]))\n cnt = len(sorted_data)\n result = list()\n for data in sorted_data:\n result.extend(data)\n print('#{} {} {}'.format(tc,cnt,' '.join(map(str,result))))","sub_path":"SWEA/D4/SWEA_1258.py","file_name":"SWEA_1258.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"414496606","text":"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Numpy implementations of `tf.linalg.LinearOperator` class and subclasses.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport logging\n\n# Dependency imports\nimport numpy as np\n\nfrom tensorflow_probability.python.internal.backend.numpy.internal import utils\n\n\n__all__ = [\n \"LinearOperator\",\n \"LinearOperatorBlockDiag\",\n \"LinearOperatorDiag\",\n \"LinearOperatorFullMatrix\",\n \"LinearOperatorIdentity\",\n \"LinearOperatorScaledIdentity\",\n \"LinearOperatorLowRankUpdate\",\n \"LinearOperatorLowerTriangular\",\n]\n\n\ndef _to_ndarray(x):\n if isinstance(x, (np.ndarray, np.generic)):\n return x\n return np.array(x)\n\n\ndef _adjoint(x):\n return np.conj(np.transpose(x, axes=[-1, -2]))\n\n\ndef _matmul_with_broadcast(a,\n b,\n transpose_a=False, # pylint: disable=unused-argument\n transpose_b=False, # pylint: disable=unused-argument\n adjoint_a=False,\n adjoint_b=False,\n a_is_sparse=False, # pylint: disable=unused-argument\n b_is_sparse=False, # pylint: disable=unused-argument\n name=None): # pylint: disable=unused-argument\n \"\"\"Multiplies matrix `a` by matrix `b`, producing `a @ b`.\n\n Works identically to `tf.matmul`, but broadcasts batch dims\n of `a` and `b` if they are determined statically to be different, or if static\n shapes are not fully defined. Attempts are made to avoid unnecessary\n replication of data, but this is not always possible.\n\n The inputs must be matrices (or tensors of rank > 2, representing batches of\n matrices).\n\n Both matrices must be of the same type. The supported types are:\n `float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.\n\n Either matrix can be transposed or adjointed (conjugated and transposed) on\n the fly by setting one of the corresponding flag to `True`. These are `False`\n by default.\n\n If one or both of the matrices contain a lot of zeros, a more efficient\n multiplication algorithm can be used by setting the corresponding\n `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.\n This optimization is only available for plain matrices (rank-2 tensors) with\n datatypes `bfloat16` or `float32`.\n\n For example:\n\n ```python\n # A 2-batch of 3x4 matrices\n a = tf.random.normal(shape=(2, 3, 4))\n\n # A single 4x5 matrix\n b = tf.random.normal(shape=(4, 5))\n\n result = matmul_with_broadcast(a, b)\n\n result.shape\n ==> (2, 3, 5)\n\n result[0,...]\n ==> tf.matmul(a[0,...], b)\n\n result[1,...]\n ==> tf.matmul(a[1,...], b)\n ```\n\n Args:\n a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`,\n `complex128` and `rank > 1`.\n b: `Tensor` with same type as `a` having compatible matrix dimensions and\n broadcastable batch dimensions.\n transpose_a: If `True`, `a` is transposed before multiplication.\n transpose_b: If `True`, `b` is transposed before multiplication.\n adjoint_a: If `True`, `a` is conjugated and transposed before\n multiplication.\n adjoint_b: If `True`, `b` is conjugated and transposed before\n multiplication.\n a_is_sparse: If `True`, `a` is treated as a sparse matrix.\n b_is_sparse: If `True`, `b` is treated as a sparse matrix.\n name: Name for the operation (optional).\n\n Returns:\n A `Tensor` of the same type as `a` and `b` where each inner-most matrix is\n the product of the corresponding matrices in `a` and `b`, e.g. if all\n transpose or adjoint attributes are `False`:\n\n The leading shape of `output` is the result of broadcasting the leading\n dimensions of `a` and `b`.\n\n `output`[..., i, j] = sum_k (`a`[..., i, k] * `b`[..., k, j]),\n for all indices i, j.\n\n Note: This is matrix product, not element-wise product.\n\n\n Raises:\n ValueError: If transpose_a and adjoint_a, or transpose_b and adjoint_b\n are both set to True.\n \"\"\"\n a = np.array(a)\n b = np.array(b, dtype=a.dtype)\n\n if adjoint_a:\n a = _adjoint(a)\n if adjoint_b:\n b = _adjoint(b)\n\n return np.matmul(a, b)\n\n\ndef _matrix_solve_with_broadcast(matrix, rhs, adjoint=False):\n \"\"\"Solve systems of linear equations.\"\"\"\n matrix = np.array(matrix)\n rhs = np.array(rhs, dtype=matrix.dtype)\n\n if adjoint:\n matrix = _adjoint(matrix)\n\n # matrix, rhs = np.broadcast_arrays(matrix, rhs)\n\n return np.linalg.solve(matrix, rhs)\n\n\ndef _reshape_for_efficiency(a,\n b,\n transpose_a=False,\n transpose_b=False,\n adjoint_a=False,\n adjoint_b=False):\n \"\"\"Maybe reshape a, b, and return an inverse map. For matmul/solve.\"\"\"\n def identity(x):\n return x\n\n # At this point, we have not taken transpose/adjoint of a/b.\n still_need_to_transpose = True\n\n if a.shape.ndims is None or b.shape.ndims is None:\n return a, b, identity, still_need_to_transpose\n\n # This could be handled in the future, but seems less common.\n if a.shape.ndims >= b.shape.ndims:\n return a, b, identity, still_need_to_transpose\n\n # From now on, we might modify b, but will not modify a.\n\n # Suppose:\n # a.shape = C + [m, n], b.shape =\n # b.shape = S + C + [n, r]\n b_extra_ndims = b.shape.ndims - a.shape.ndims\n\n # b_extra_sh = S, b_main_sh = C + [n, r]\n b_extra_sh = b.shape[:b_extra_ndims]\n b_main_sh = b.shape[b_extra_ndims:]\n\n # No reason to flip unless the extra dims of b are big enough. Why?\n # Assume adjoint/transpose = False. Then...\n # By not flipping, we have to replicate a to shape\n # b_extra_sh + a.shape,\n # which could use extra memory. But in all cases, the final output has shape\n # b_extra_sh + a.shape[:-1] + [b.shape[-1]]\n # So we only end up creating a larger object if the end dim of b is smaller\n # than the end dim of a. This often happens, e.g. if b was a vector that was\n # expanded to a matrix (by appending a singleton).\n\n # Since adjoint/transpose may not be False, we must make adjustments here.\n # The dim of b that holds the multiple equations.\n a_domain_sz_ = a.shape[-2 if adjoint_a or transpose_a else -1]\n b_eq_sz_ = b.shape[-2 if adjoint_b or transpose_b else -1]\n b_extra_sz_ = (\n np.prod(b.shape[:b_extra_ndims].as_list())\n if b.shape[:b_extra_ndims].is_fully_defined() else None)\n if (a_domain_sz_ is not None and b_eq_sz_ is not None and\n b_extra_sz_ is not None):\n if b_extra_sz_ < 2 or a_domain_sz_ <= b_eq_sz_:\n return a, b, identity, still_need_to_transpose\n\n # At this point, we're flipping for sure!\n # Any transposes/adjoints will happen here explicitly, rather than in calling\n # code. Why? To avoid having to write separate complex code for each case.\n if adjoint_a:\n a = _adjoint(a)\n elif transpose_a:\n a = np.transpose(a, axes=[-2, -1])\n if adjoint_b:\n b = _adjoint(b)\n elif transpose_b:\n b = np.transpose(b, axes=[-2, -1])\n still_need_to_transpose = False\n\n # Recompute shapes, since the transpose/adjoint may have changed them.\n b_extra_sh = b.shape[:b_extra_ndims]\n b_main_sh = b.shape[b_extra_ndims:]\n\n # Permutation to put the extra dims at the end.\n perm = (\n np.concatenate(\n (np.arange(b_extra_ndims, b.shape.ndims),\n np.arange(0, b_extra_ndims)), 0))\n b_extra_on_end = np.transpose(b, axes=perm)\n\n # Now squash this end into one long dim.\n b_squashed_end = np.reshape(\n b_extra_on_end, np.concatenate((b_main_sh[:-1], [-1]), 0))\n\n def reshape_inv(y):\n # Expand the extra dims hanging off the end, \"b_extra_sh\".\n # Note we use y_sh[:-1] + [b_main_sh[-1]] rather than b_main_sh, because y\n # Could have different batch dims than a and b, because of broadcasting.\n y_extra_shape = np.concatenate(\n (y.shape[:-1], [b_main_sh[-1]], b_extra_sh), 0)\n y_extra_on_end = np.reshape(y, y_extra_shape)\n inverse_perm = np.argsort(perm)\n return np.transpose(y_extra_on_end, axes=inverse_perm)\n\n return a, b_squashed_end, reshape_inv, still_need_to_transpose\n\n\nclass LinearOperator(object):\n \"\"\"Reimplementation of tf.linalg.LinearOperator.\"\"\"\n\n def __init__(self,\n dtype,\n graph_parents=None,\n is_non_singular=None,\n is_self_adjoint=None,\n is_positive_definite=None,\n is_square=None,\n name=None):\n r\"\"\"Initialize the `LinearOperator`.\n\n **This is a private method for subclass use.**\n **Subclasses should copy-paste this `__init__` documentation.**\n\n Args:\n dtype: The type of the this `LinearOperator`. Arguments to `matmul` and\n `solve` will have to be this type.\n graph_parents: Ignored.\n is_non_singular: Expect that this operator is non-singular.\n is_self_adjoint: Expect that this operator is equal to its hermitian\n transpose. If `dtype` is real, this is equivalent to being symmetric.\n is_positive_definite: Expect that this operator is positive definite,\n meaning the quadratic form `x^H A x` has positive real part for all\n nonzero `x`. Note that we do not require the operator to be\n self-adjoint to be positive-definite. See:\n https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices\n is_square: Expect that this operator acts like square [batch] matrices.\n name: Ignored.\n\n Raises:\n ValueError if flags are inconsistent.\n \"\"\"\n # Check and auto-set flags.\n if is_positive_definite:\n if is_non_singular is False: # pylint: disable=g-bool-id-comparison\n raise ValueError(\"A positive definite matrix is always non-singular.\")\n is_non_singular = True\n\n if is_non_singular is True: # pylint: disable=g-bool-id-comparison\n if is_square is False: # pylint: disable=g-bool-id-comparison\n raise ValueError(\"A non-singular matrix is always square.\")\n is_square = True\n\n if is_self_adjoint:\n if is_square is False: # pylint: disable=g-bool-id-comparison\n raise ValueError(\"A self-adjoint matrix is always square.\")\n is_square = True\n\n self._is_square_set_or_implied_by_hints = is_square\n\n self._dtype = dtype\n self._is_non_singular = is_non_singular\n self._is_self_adjoint = is_self_adjoint\n self._is_positive_definite = is_positive_definite\n self._graph_parents = graph_parents\n self._name = name + \"/\"\n\n @property\n def dtype(self):\n \"\"\"The `DType` of `Tensor`s handled by this `LinearOperator`.\"\"\"\n return self._dtype\n\n @property\n def name(self):\n \"\"\"Name prepended to all ops created by this `LinearOperator`.\"\"\"\n return self._name\n\n @property\n def graph_parents(self):\n \"\"\"List of graph dependencies of this `LinearOperator`.\"\"\"\n return self._graph_parents\n\n @property\n def is_non_singular(self):\n return self._is_non_singular\n\n @property\n def is_self_adjoint(self):\n return self._is_self_adjoint\n\n @property\n def is_positive_definite(self):\n return self._is_positive_definite\n\n @property\n def is_square(self):\n \"\"\"Return `True/False` depending on if this operator is square.\"\"\"\n # Static checks done after __init__. Why? Because domain/range dimension\n # sometimes requires lots of work done in the derived class after init.\n auto_square_check = self.domain_dimension == self.range_dimension\n if self._is_square_set_or_implied_by_hints is False and auto_square_check: # pylint: disable=g-bool-id-comparison\n raise ValueError(\n \"User set is_square hint to False, but the operator was square.\")\n if self._is_square_set_or_implied_by_hints is None:\n return auto_square_check\n\n return self._is_square_set_or_implied_by_hints\n\n @abc.abstractmethod\n def _shape(self):\n # Write this in derived class to enable all static shape methods.\n raise NotImplementedError(\"_shape is not implemented.\")\n\n @property\n def shape(self):\n \"\"\"`TensorShape` of this `LinearOperator`.\n\n If this operator acts like the batch matrix `A` with\n `A.shape = [B1,...,Bb, M, N]`, then this returns\n `TensorShape([B1,...,Bb, M, N])`, equivalent to `A.get_shape()`.\n\n Returns:\n `TensorShape`, statically determined, may be undefined.\n \"\"\"\n return self._shape()\n\n def shape_tensor(self, name=\"shape_tensor\"): # pylint: disable=unused-argument\n \"\"\"Shape of this `LinearOperator`, determined at runtime.\n\n If this operator acts like the batch matrix `A` with\n `A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding\n `[B1,...,Bb, M, N]`, equivalent to `tf.shape(A)`.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `int32` `Tensor`\n \"\"\"\n return self.shape\n\n @property\n def batch_shape(self):\n \"\"\"`TensorShape` of batch dimensions of this `LinearOperator`.\n\n If this operator acts like the batch matrix `A` with\n `A.shape = [B1,...,Bb, M, N]`, then this returns\n `TensorShape([B1,...,Bb])`, equivalent to `A.get_shape()[:-2]`\n\n Returns:\n `TensorShape`, statically determined, may be undefined.\n \"\"\"\n # Derived classes get this \"for free\" once .shape is implemented.\n return self.shape[:-2]\n\n def batch_shape_tensor(self, name=\"batch_shape_tensor\"): # pylint: disable=unused-argument\n \"\"\"Shape of batch dimensions of this operator, determined at runtime.\n\n If this operator acts like the batch matrix `A` with\n `A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding\n `[B1,...,Bb]`.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `int32` `Tensor`\n \"\"\"\n return self.batch_shape\n\n @property\n def tensor_rank(self, name=\"tensor_rank\"): # pylint: disable=unused-argument\n \"\"\"Rank (in the sense of tensors) of matrix corresponding to this operator.\n\n If this operator acts like the batch matrix `A` with\n `A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n Python integer, or None if the tensor rank is undefined.\n \"\"\"\n return len(self.shape) or None\n\n def tensor_rank_tensor(self, name=\"tensor_rank_tensor\"): # pylint: disable=unused-argument\n \"\"\"Rank (in the sense of tensors) of matrix corresponding to this operator.\n\n If this operator acts like the batch matrix `A` with\n `A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `int32` `Tensor`, determined at runtime.\n \"\"\"\n return self.tensor_rank(name)\n\n @property\n def domain_dimension(self):\n \"\"\"Dimension (in the sense of vector spaces) of the domain of this operator.\n\n If this operator acts like the batch matrix `A` with\n `A.shape = [B1,...,Bb, M, N]`, then this returns `N`.\n\n Returns:\n int or None.\n \"\"\"\n if len(self.shape): # pylint: disable=g-explicit-length-test\n return self.shape[-1]\n return None\n\n def domain_dimension_tensor(self, name=\"domain_dimension_tensor\"): # pylint: disable=unused-argument\n \"\"\"Dimension (in the sense of vector spaces) of the domain of this operator.\n\n Determined at runtime.\n\n If this operator acts like the batch matrix `A` with\n `A.shape = [B1,...,Bb, M, N]`, then this returns `N`.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `int32` `Tensor`\n \"\"\"\n return self.domain_dimension\n\n @property\n def range_dimension(self):\n \"\"\"Dimension (in the sense of vector spaces) of the range of this operator.\n\n If this operator acts like the batch matrix `A` with\n `A.shape = [B1,...,Bb, M, N]`, then this returns `M`.\n\n Returns:\n `Dimension` object.\n \"\"\"\n # Derived classes get this \"for free\" once .shape is implemented.\n if len(self.shape) > 1:\n return self.shape[-2]\n else:\n return None\n\n def range_dimension_tensor(self, name=\"range_dimension_tensor\"): # pylint: disable=unused-argument\n \"\"\"Dimension (in the sense of vector spaces) of the range of this operator.\n\n Determined at runtime.\n\n If this operator acts like the batch matrix `A` with\n `A.shape = [B1,...,Bb, M, N]`, then this returns `M`.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `int32` `Tensor`\n \"\"\"\n return self.range_dimension\n\n def assert_non_singular(self, name=\"assert_non_singular\"): # pylint: disable=unused-argument\n \"\"\"Returns an `Op` that asserts this operator is non singular.\n\n This operator is considered non-singular if\n\n ```\n ConditionNumber < max{100, range_dimension, domain_dimension} * eps,\n eps := np.finfo(self.dtype.as_numpy_dtype).eps\n ```\n\n Args:\n name: A string name to prepend to created ops.\n\n Returns:\n An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if\n the operator is singular.\n \"\"\"\n return None\n\n def assert_positive_definite(self, name=\"assert_positive_definite\"): # pylint: disable=unused-argument\n \"\"\"Returns an `Op` that asserts this operator is positive definite.\n\n Here, positive definite means that the quadratic form `x^H A x` has positive\n real part for all nonzero `x`. Note that we do not require the operator to\n be self-adjoint to be positive definite.\n\n Args:\n name: A name to give this `Op`.\n\n Returns:\n An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if\n the operator is not positive definite.\n \"\"\"\n return None\n\n def assert_self_adjoint(self, name=\"assert_self_adjoint\"): # pylint: disable=unused-argument\n \"\"\"Returns an `Op` that asserts this operator is self-adjoint.\n\n Here we check that this operator is *exactly* equal to its hermitian\n transpose.\n\n Args:\n name: A string name to prepend to created ops.\n\n Returns:\n An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if\n the operator is not self-adjoint.\n \"\"\"\n return None\n\n @abc.abstractmethod\n def _matmul(self, x, adjoint=False, adjoint_arg=False):\n raise NotImplementedError(\"_matmul is not implemented.\")\n\n def matmul(self, x, adjoint=False, adjoint_arg=False, name=\"matmul\"): # pylint: disable=unused-argument\n \"\"\"Transform [batch] matrix `x` with left multiplication: `x --> Ax`.\n\n ```python\n # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]\n operator = LinearOperator(...)\n operator.shape = [..., M, N]\n\n X = ... # shape [..., N, R], batch matrix, R > 0.\n\n Y = operator.matmul(X)\n Y.shape\n ==> [..., M, R]\n\n Y[..., :, r] = sum_j A[..., :, j] X[j, r]\n ```\n\n Args:\n x: `LinearOperator` or `Tensor` with compatible shape and same `dtype` as\n `self`. See class docstring for definition of compatibility.\n adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.\n adjoint_arg: Python `bool`. If `True`, compute `A x^H` where `x^H` is\n the hermitian transpose (transposition and complex conjugation).\n name: A name for this `Op`.\n\n Returns:\n A `LinearOperator` or `Tensor` with shape `[..., M, R]` and same `dtype`\n as `self`.\n \"\"\"\n if isinstance(x, LinearOperator):\n if adjoint or adjoint_arg:\n raise ValueError(\".matmul not supported with adjoints.\")\n if (x.range_dimension is not None and\n self.domain_dimension is not None and\n x.range_dimension != self.domain_dimension):\n raise ValueError(\n \"Operators are incompatible. Expected `x` to have dimension\"\n \" {} but got {}.\".format(self.domain_dimension, x.range_dimension))\n return np.matmul(self.to_dense(), x.to_dense())\n\n return self._matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg)\n\n def _matvec(self, x, adjoint=False):\n x_mat = x[..., None]\n y_mat = self.matmul(x_mat, adjoint=adjoint)\n return np.squeeze(y_mat, axis=-1)\n\n def matvec(self, x, adjoint=False, name=\"matvec\"): # pylint: disable=unused-argument\n \"\"\"Transform [batch] vector `x` with left multiplication: `x --> Ax`.\n\n ```python\n # Make an operator acting like batch matric A. Assume A.shape = [..., M, N]\n operator = LinearOperator(...)\n\n X = ... # shape [..., N], batch vector\n\n Y = operator.matvec(X)\n Y.shape\n ==> [..., M]\n\n Y[..., :] = sum_j A[..., :, j] X[..., j]\n ```\n\n Args:\n x: `Tensor` with compatible shape and same `dtype` as `self`.\n `x` is treated as a [batch] vector meaning for every set of leading\n dimensions, the last dimension defines a vector.\n See class docstring for definition of compatibility.\n adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.\n name: A name for this `Op`.\n\n Returns:\n A `Tensor` with shape `[..., M]` and same `dtype` as `self`.\n \"\"\"\n x = np.array(x)\n return self._matvec(x, adjoint=adjoint)\n\n def _determinant(self):\n logging.warn(\n \"Using (possibly slow) default implementation of determinant.\"\n \" Requires conversion to a dense matrix and O(N^3) operations.\")\n if self._can_use_cholesky():\n return np.exp(self.log_abs_determinant())\n return np.linalg.det(self.to_dense())\n\n def determinant(self, name=\"det\"): # pylint: disable=unused-argument\n \"\"\"Determinant for every batch member.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `Tensor` with shape `self.batch_shape` and same `dtype` as `self`.\n\n Raises:\n NotImplementedError: If `self.is_square` is `False`.\n \"\"\"\n if self.is_square is False: # pylint: disable=g-bool-id-comparison\n raise NotImplementedError(\n \"Determinant not implemented for an operator that is expected to \"\n \"not be square.\")\n return self._determinant()\n\n def _log_abs_determinant(self):\n logging.warn(\n \"Using (possibly slow) default implementation of determinant.\"\n \" Requires conversion to a dense matrix and O(N^3) operations.\")\n if self._can_use_cholesky():\n diag = np.diagonal(\n np.linalg.cholesky(self.to_dense()), axis1=-2, axis2=-1)\n return 2 * np.sum(np.log(diag), axis=-1)\n _, log_abs_det = np.linalg.slogdet(self.to_dense())\n return log_abs_det\n\n def log_abs_determinant(self, name=\"log_abs_det\"): # pylint: disable=unused-argument\n \"\"\"Log absolute value of determinant for every batch member.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `Tensor` with shape `self.batch_shape` and same `dtype` as `self`.\n\n Raises:\n NotImplementedError: If `self.is_square` is `False`.\n \"\"\"\n if self.is_square is False: # pylint: disable=g-bool-id-comparison\n raise NotImplementedError(\n \"Determinant not implemented for an operator that is expected to \"\n \"not be square.\")\n return self._log_abs_determinant()\n\n def _solve(self, rhs, adjoint=False, adjoint_arg=False):\n \"\"\"Default implementation of _solve.\"\"\"\n if self.is_square is False: # pylint: disable=g-bool-id-comparison\n raise NotImplementedError(\n \"Solve is not yet implemented for non-square operators.\")\n logging.warn(\n \"Using (possibly slow) default implementation of solve.\"\n \" Requires conversion to a dense matrix and O(N^3) operations.\")\n rhs = _adjoint(rhs) if adjoint_arg else rhs\n # if self._can_use_cholesky():\n # return _matrix_solve_with_broadcast( # TODO(iansf): Use cholesky_solve.\n # np.linalg.cholesky(self.to_dense()), rhs)\n return _matrix_solve_with_broadcast(\n self.to_dense(), rhs, adjoint=adjoint)\n\n def solve(self, rhs, adjoint=False, adjoint_arg=False, name=\"solve\"): # pylint: disable=unused-argument\n \"\"\"Solve (exact or approx) `R` (batch) systems of equations: `A X = rhs`.\n\n The returned `Tensor` will be close to an exact solution if `A` is well\n conditioned. Otherwise closeness will vary. See class docstring for details.\n\n Examples:\n\n ```python\n # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]\n operator = LinearOperator(...)\n operator.shape = [..., M, N]\n\n # Solve R > 0 linear systems for every member of the batch.\n RHS = ... # shape [..., M, R]\n\n X = operator.solve(RHS)\n # X[..., :, r] is the solution to the r'th linear system\n # sum_j A[..., :, j] X[..., j, r] = RHS[..., :, r]\n\n operator.matmul(X)\n ==> RHS\n ```\n\n Args:\n rhs: `Tensor` with same `dtype` as this operator and compatible shape.\n `rhs` is treated like a [batch] matrix meaning for every set of leading\n dimensions, the last two dimensions defines a matrix.\n See class docstring for definition of compatibility.\n adjoint: Python `bool`. If `True`, solve the system involving the adjoint\n of this `LinearOperator`: `A^H X = rhs`.\n adjoint_arg: Python `bool`. If `True`, solve `A X = rhs^H` where `rhs^H`\n is the hermitian transpose (transposition and complex conjugation).\n name: A name scope to use for ops added by this method.\n\n Returns:\n `Tensor` with shape `[...,N, R]` and same `dtype` as `rhs`.\n\n Raises:\n NotImplementedError: If `self.is_non_singular` or `is_square` is False.\n \"\"\"\n if self.is_non_singular is False: # pylint: disable=g-bool-id-comparison\n raise NotImplementedError(\n \"Exact solve not implemented for an operator that is expected to \"\n \"be singular.\")\n if self.is_square is False: # pylint: disable=g-bool-id-comparison\n raise NotImplementedError(\n \"Exact solve not implemented for an operator that is expected to \"\n \"not be square.\")\n rhs = np.array(rhs)\n\n return self._solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)\n\n def _solvevec(self, rhs, adjoint=False):\n \"\"\"Default implementation of _solvevec.\"\"\"\n rhs_mat = rhs[..., None]\n solution_mat = self.solve(rhs_mat, adjoint=adjoint)\n return np.squeeze(solution_mat, axis=-1)\n\n def solvevec(self, rhs, adjoint=False, name=\"solve\"): # pylint: disable=unused-argument\n \"\"\"Solve single equation with best effort: `A X = rhs`.\n\n The returned `Tensor` will be close to an exact solution if `A` is well\n conditioned. Otherwise closeness will vary. See class docstring for details.\n\n Examples:\n\n ```python\n # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]\n operator = LinearOperator(...)\n operator.shape = [..., M, N]\n\n # Solve one linear system for every member of the batch.\n RHS = ... # shape [..., M]\n\n X = operator.solvevec(RHS)\n # X is the solution to the linear system\n # sum_j A[..., :, j] X[..., j] = RHS[..., :]\n\n operator.matvec(X)\n ==> RHS\n ```\n\n Args:\n rhs: `Tensor` with same `dtype` as this operator.\n `rhs` is treated like a [batch] vector meaning for every set of leading\n dimensions, the last dimension defines a vector. See class docstring\n for definition of compatibility regarding batch dimensions.\n adjoint: Python `bool`. If `True`, solve the system involving the adjoint\n of this `LinearOperator`: `A^H X = rhs`.\n name: A name scope to use for ops added by this method.\n\n Returns:\n `Tensor` with shape `[...,N]` and same `dtype` as `rhs`.\n\n Raises:\n NotImplementedError: If `self.is_non_singular` or `is_square` is False.\n \"\"\"\n rhs = np.array(rhs)\n\n return self._solvevec(rhs, adjoint=adjoint)\n\n def adjoint(self, name=\"adjoint\"): # pylint: disable=unused-argument\n \"\"\"Returns the adjoint of the current `LinearOperator`.\n\n Given `A` representing this `LinearOperator`, return `A*`.\n Note that calling `self.adjoint()` and `self.H` are equivalent.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `LinearOperator` which represents the adjoint of this `LinearOperator`.\n \"\"\"\n if self.is_self_adjoint is True: # pylint: disable=g-bool-id-comparison\n return self\n return _adjoint(self)\n\n # self.H is equivalent to self.adjoint().\n H = property(adjoint, None)\n\n def inverse(self, name=\"inverse\"): # pylint: disable=unused-argument\n \"\"\"Returns the Inverse of this `LinearOperator`.\n\n Given `A` representing this `LinearOperator`, return a `LinearOperator`\n representing `A^-1`.\n\n Args:\n name: A name scope to use for ops added by this method.\n\n Returns:\n `LinearOperator` representing inverse of this matrix.\n\n Raises:\n ValueError: When the `LinearOperator` is not hinted to be `non_singular`.\n \"\"\"\n if self.is_square is False: # pylint: disable=g-bool-id-comparison\n raise ValueError(\"Cannot take the Inverse: This operator represents \"\n \"a non square matrix.\")\n if self.is_non_singular is False: # pylint: disable=g-bool-id-comparison\n raise ValueError(\"Cannot take the Inverse: This operator represents \"\n \"a singular matrix.\")\n\n return np.linalg.inv(self.to_dense())\n\n def cholesky(self, name=\"cholesky\"): # pylint: disable=unused-argument\n \"\"\"Returns a Cholesky factor as a `LinearOperator`.\n\n Given `A` representing this `LinearOperator`, if `A` is positive definite\n self-adjoint, return `L`, where `A = L L^T`, i.e. the cholesky\n decomposition.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `LinearOperator` which represents the lower triangular matrix\n in the Cholesky decomposition.\n\n Raises:\n ValueError: When the `LinearOperator` is not hinted to be positive\n definite and self adjoint.\n \"\"\"\n\n if not self._can_use_cholesky():\n raise ValueError(\"Cannot take the Cholesky decomposition: \"\n \"Not a positive definite self adjoint matrix.\")\n return np.linalg.cholesky(self.to_dense())\n\n def _to_dense(self):\n \"\"\"Generic and often inefficient implementation. Override often.\"\"\"\n logging.warn(\"Using (possibly slow) default implementation of to_dense.\"\n \" Converts by self.matmul(identity).\")\n batch_shape = self.batch_shape\n\n n = self.domain_dimension\n\n eye = np.eye(n, dtype=self.dtype)\n eye = eye * np.ones(batch_shape, dtype=self.dtype)[..., None]\n\n return self.matmul(eye)\n\n def to_dense(self, name=\"to_dense\"): # pylint: disable=unused-argument\n \"\"\"Return a dense (batch) matrix representing this operator.\"\"\"\n return self._to_dense()\n\n def _diag_part(self):\n \"\"\"Generic and often inefficient implementation. Override often.\"\"\"\n return np.diagonal(self.to_dense(), axis1=-2, axis2=-1)\n\n def diag_part(self, name=\"diag_part\"): # pylint: disable=unused-argument\n \"\"\"Efficiently get the [batch] diagonal part of this operator.\n\n If this operator has shape `[B1,...,Bb, M, N]`, this returns a\n `Tensor` `diagonal`, of shape `[B1,...,Bb, min(M, N)]`, where\n `diagonal[b1,...,bb, i] = self.to_dense()[b1,...,bb, i, i]`.\n\n ```\n my_operator = LinearOperatorDiag([1., 2.])\n\n # Efficiently get the diagonal\n my_operator.diag_part()\n ==> [1., 2.]\n\n # Equivalent, but inefficient method\n tf.matrix_diag_part(my_operator.to_dense())\n ==> [1., 2.]\n ```\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n diag_part: A `Tensor` of same `dtype` as self.\n \"\"\"\n return self._diag_part()\n\n def _trace(self):\n return np.sum(self.diag_part(), axis=-1)\n\n def trace(self, name=\"trace\"): # pylint: disable=unused-argument\n \"\"\"Trace of the linear operator, equal to sum of `self.diag_part()`.\n\n If the operator is square, this is also the sum of the eigenvalues.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n Shape `[B1,...,Bb]` `Tensor` of same `dtype` as `self`.\n \"\"\"\n return self._trace()\n\n def _add_to_tensor(self, x):\n # Override if a more efficient implementation is available.\n return self.to_dense() + x\n\n def add_to_tensor(self, x, name=\"add_to_tensor\"): # pylint: disable=unused-argument\n \"\"\"Add matrix represented by this operator to `x`. Equivalent to `A + x`.\n\n Args:\n x: `Tensor` with same `dtype` and shape broadcastable to `self.shape`.\n name: A name to give this `Op`.\n\n Returns:\n A `Tensor` with broadcast shape and same `dtype` as `self`.\n \"\"\"\n return self._add_to_tensor(x)\n\n def _can_use_cholesky(self):\n return self.is_self_adjoint and self.is_positive_definite\n\n\nclass LinearOperatorFullMatrix(LinearOperator):\n \"\"\"LinearOperatorFullMatrix numpy implementation.\"\"\"\n\n def __init__(self,\n matrix,\n is_non_singular=None,\n is_self_adjoint=None,\n is_positive_definite=None,\n is_square=None,\n name=\"LinearOperatorFullMatrix\"):\n r\"\"\"Initialize a `LinearOperatorFullMatrix`.\n\n Args:\n matrix: Shape `[B1,...,Bb, M, N]` with `b >= 0`, `M, N >= 0`.\n Allowed dtypes: `float16`, `float32`, `float64`, `complex64`,\n `complex128`.\n is_non_singular: Expect that this operator is non-singular.\n is_self_adjoint: Expect that this operator is equal to its hermitian\n transpose.\n is_positive_definite: Expect that this operator is positive definite,\n meaning the quadratic form `x^H A x` has positive real part for all\n nonzero `x`. Note that we do not require the operator to be\n self-adjoint to be positive-definite. See:\n https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices\n is_square: Expect that this operator acts like square [batch] matrices.\n name: A name for this `LinearOperator`.\n\n Raises:\n TypeError: If `diag.dtype` is not an allowed type.\n \"\"\"\n self._matrix = _to_ndarray(matrix)\n\n super(LinearOperatorFullMatrix, self).__init__(\n dtype=self._matrix.dtype,\n graph_parents=[self._matrix],\n is_non_singular=is_non_singular,\n is_self_adjoint=is_self_adjoint,\n is_positive_definite=is_positive_definite,\n is_square=is_square,\n name=name)\n\n def _shape(self):\n return self._matrix.shape\n\n def _matmul(self, x, adjoint=False, adjoint_arg=False):\n return _matmul_with_broadcast(\n self._matrix, x, adjoint_a=adjoint, adjoint_b=adjoint_arg)\n\n def _to_dense(self):\n return self._matrix\n\n\nclass LinearOperatorIdentity(LinearOperatorFullMatrix):\n \"\"\"LinearOperatorIdentity numpy implementation.\"\"\"\n\n def __init__(self,\n num_rows,\n batch_shape=None,\n dtype=None,\n is_non_singular=True,\n is_self_adjoint=True,\n is_positive_definite=True,\n is_square=True,\n assert_proper_shapes=False, # pylint: disable=unused-argument\n name=\"LinearOperatorIdentity\"):\n r\"\"\"Initialize a `LinearOperatorIdentity`.\n\n The `LinearOperatorIdentity` is initialized with arguments defining `dtype`\n and shape.\n\n This operator is able to broadcast the leading (batch) dimensions, which\n sometimes requires copying data. If `batch_shape` is `None`, the operator\n can take arguments of any batch shape without copying. See examples.\n\n Args:\n num_rows: Scalar non-negative integer `Tensor`. Number of rows in the\n corresponding identity matrix.\n batch_shape: Optional `1-D` integer `Tensor`. The shape of the leading\n dimensions. If `None`, this operator has no leading dimensions.\n dtype: Data type of the matrix that this operator represents.\n is_non_singular: Expect that this operator is non-singular.\n is_self_adjoint: Expect that this operator is equal to its hermitian\n transpose.\n is_positive_definite: Expect that this operator is positive definite,\n meaning the quadratic form `x^H A x` has positive real part for all\n nonzero `x`. Note that we do not require the operator to be\n self-adjoint to be positive-definite. See:\n https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices\n is_square: Expect that this operator acts like square [batch] matrices.\n assert_proper_shapes: Python `bool`. Ignored\n name: A name for this `LinearOperator`\n\n Raises:\n ValueError: If `num_rows` is determined statically to be non-scalar, or\n negative.\n ValueError: If `batch_shape` is determined statically to not be 1-D, or\n negative.\n ValueError: If any of the following is not `True`:\n `{is_self_adjoint, is_non_singular, is_positive_definite}`.\n \"\"\"\n dtype = dtype or np.float32\n if not is_self_adjoint:\n raise ValueError(\"An identity operator is always self adjoint.\")\n if not is_non_singular:\n raise ValueError(\"An identity operator is always non-singular.\")\n if not is_positive_definite:\n raise ValueError(\"An identity operator is always positive-definite.\")\n if not is_square:\n raise ValueError(\"An identity operator is always square.\")\n\n matrix = np.eye(num_rows, dtype=dtype)\n if batch_shape is not None:\n matrix *= np.ones(batch_shape, dtype=dtype)[..., None]\n\n super(LinearOperatorIdentity, self).__init__(\n matrix=matrix,\n is_non_singular=is_non_singular,\n is_self_adjoint=is_self_adjoint,\n is_positive_definite=is_positive_definite,\n is_square=is_square,\n name=name)\n\n\nclass LinearOperatorDiag(LinearOperatorFullMatrix):\n \"\"\"LinearOperatorDiag numpy implementation.\"\"\"\n\n def __new__(cls,\n diag,\n is_non_singular=None,\n is_self_adjoint=None,\n is_positive_definite=None,\n is_square=None,\n name=\"LinearOperatorDiag\"):\n r\"\"\"Initialize a `LinearOperatorDiag`.\n\n Args:\n diag: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.\n The diagonal of the operator. Allowed dtypes: `float16`, `float32`,\n `float64`, `complex64`, `complex128`.\n is_non_singular: Expect that this operator is non-singular.\n is_self_adjoint: Expect that this operator is equal to its hermitian\n transpose. If `diag.dtype` is real, this is auto-set to `True`.\n is_positive_definite: Expect that this operator is positive definite,\n meaning the quadratic form `x^H A x` has positive real part for all\n nonzero `x`. Note that we do not require the operator to be\n self-adjoint to be positive-definite. See:\n https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices\n is_square: Expect that this operator acts like square [batch] matrices.\n name: A name for this `LinearOperator`.\n\n Raises:\n TypeError: If `diag.dtype` is not an allowed type.\n ValueError: If `diag.dtype` is real, and `is_self_adjoint` is not `True`.\n\n Returns:\n LinearOperatorFullMatrix.\n \"\"\"\n diag = _to_ndarray(diag)\n\n # Check and auto-set hints.\n if not utils.is_complex(diag.dtype):\n if is_self_adjoint is False: # pylint: disable=g-bool-id-comparison\n raise ValueError(\"A real diagonal operator is always self adjoint.\")\n else:\n is_self_adjoint = True\n\n if is_square is False: # pylint: disable=g-bool-id-comparison\n raise ValueError(\"Only square diagonal operators currently supported.\")\n is_square = True\n\n if not np.array(diag).shape:\n raise ValueError(\"Diagonal must have at least 1 dimension\")\n\n matrix = np.tile(\n np.eye(diag.shape[-1]), [1] * len(diag.shape) + [1]) * diag[..., None]\n return LinearOperatorFullMatrix(\n matrix=matrix,\n is_non_singular=is_non_singular,\n is_self_adjoint=is_self_adjoint,\n is_positive_definite=is_positive_definite,\n is_square=is_square,\n name=name)\n\n\nclass LinearOperatorScaledIdentity(LinearOperatorFullMatrix):\n \"\"\"LinearOperatorScaledIdentity numpy implementation.\"\"\"\n\n def __new__(cls,\n num_rows,\n multiplier,\n is_non_singular=None,\n is_self_adjoint=None,\n is_positive_definite=None,\n is_square=True,\n assert_proper_shapes=False, # pylint: disable=unused-argument\n name=\"LinearOperatorScaledIdentity\"):\n r\"\"\"Initialize a `LinearOperatorScaledIdentity`.\n\n The `LinearOperatorScaledIdentity` is initialized with `num_rows`, which\n determines the size of each identity matrix, and a `multiplier`,\n which defines `dtype`, batch shape, and scale of each matrix.\n\n This operator is able to broadcast the leading (batch) dimensions.\n\n Args:\n num_rows: Scalar non-negative integer `Tensor`. Number of rows in the\n corresponding identity matrix.\n multiplier: `Tensor` of shape `[B1,...,Bb]`, or `[]` (a scalar).\n is_non_singular: Expect that this operator is non-singular.\n is_self_adjoint: Expect that this operator is equal to its hermitian\n transpose.\n is_positive_definite: Expect that this operator is positive definite,\n meaning the quadratic form `x^H A x` has positive real part for all\n nonzero `x`. Note that we do not require the operator to be\n self-adjoint to be positive-definite. See:\n https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices\n is_square: Expect that this operator acts like square [batch] matrices.\n assert_proper_shapes: Python `bool`. If `False`, only perform static\n checks that initialization and method arguments have proper shape.\n If `True`, and static checks are inconclusive, add asserts to the graph.\n name: A name for this `LinearOperator`\n\n Raises:\n ValueError: If `num_rows` is determined statically to be non-scalar, or\n negative.\n\n Returns:\n LinearOperatorFullMatrix.\n \"\"\"\n multiplier = _to_ndarray(multiplier)\n\n # Check and auto-set hints.\n if not utils.is_complex(multiplier.dtype):\n if is_self_adjoint is False: # pylint: disable=g-bool-id-comparison\n raise ValueError(\"A real diagonal operator is always self adjoint.\")\n else:\n is_self_adjoint = True\n\n if not is_square:\n raise ValueError(\"A ScaledIdentity operator is always square.\")\n\n matrix = np.eye(num_rows, dtype=multiplier.dtype)\n matrix = multiplier[..., None, None] * matrix\n\n return LinearOperatorFullMatrix(\n matrix=matrix,\n is_non_singular=is_non_singular,\n is_self_adjoint=is_self_adjoint,\n is_positive_definite=is_positive_definite,\n is_square=is_square,\n name=name)\n\n\nclass LinearOperatorLowerTriangular(LinearOperatorFullMatrix):\n \"\"\"LinearOperatorLowerTriangular numpy implementation.\"\"\"\n\n def __new__(cls,\n tril,\n is_non_singular=None,\n is_self_adjoint=None,\n is_positive_definite=None,\n is_square=None,\n name=\"LinearOperatorLowerTriangular\"):\n r\"\"\"Initialize a `LinearOperatorLowerTriangular`.\n\n Args:\n tril: Shape `[B1,...,Bb, N, N]` with `b >= 0`, `N >= 0`.\n The lower triangular part of `tril` defines this operator. The strictly\n upper triangle is ignored.\n is_non_singular: Expect that this operator is non-singular.\n This operator is non-singular if and only if its diagonal elements are\n all non-zero.\n is_self_adjoint: Expect that this operator is equal to its hermitian\n transpose. This operator is self-adjoint only if it is diagonal with\n real-valued diagonal entries. In this case it is advised to use\n `LinearOperatorDiag`.\n is_positive_definite: Expect that this operator is positive definite,\n meaning the quadratic form `x^H A x` has positive real part for all\n nonzero `x`. Note that we do not require the operator to be\n self-adjoint to be positive-definite. See:\n https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices\n is_square: Expect that this operator acts like square [batch] matrices.\n name: A name for this `LinearOperator`.\n\n Raises:\n ValueError: If `is_square` is `False`.\n\n Returns:\n LinearOperatorFullMatrix.\n \"\"\"\n if is_square is False: # pylint: disable=g-bool-id-comparison\n raise ValueError(\n \"Only square lower triangular operators supported at this time.\")\n is_square = True\n\n return LinearOperatorFullMatrix(\n matrix=tril,\n is_non_singular=is_non_singular,\n is_self_adjoint=is_self_adjoint,\n is_positive_definite=is_positive_definite,\n is_square=is_square,\n name=name)\n\n\n################################################################################\n# LinearOperators that definitely don't work are below.\n################################################################################\nclass LinearOperatorBlockDiag(LinearOperatorFullMatrix):\n\n def __new__(cls, *args, **kwargs):\n raise NotImplementedError\n\n\nclass LinearOperatorLowRankUpdate(LinearOperatorFullMatrix):\n\n def __new__(cls, *args, **kwargs):\n raise NotImplementedError\n\n","sub_path":"tensorflow_probability/python/internal/backend/numpy/linear_operator.py","file_name":"linear_operator.py","file_ext":"py","file_size_in_byte":45852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"6792747","text":"'''\nThis is the main function of our correction model\ninclude the process of:\nini data,\ntraining noisy channel and language model,\nevaluate the accuracy and output the result text\n\n\nBy Hanze Dong, Oct, 2017\n\n'''\nfrom lm import *\nfrom inidata import *\nfrom choose_corpus import *\nfrom nc import *\nimport time\nimport nltk\nimport numpy as np\nfrom pre_trained_nc import *\nimport os\n\nt1 = time.clock()\n# Ini data and set parameters\ntext,num_of_err,param = inidata()\nparam['smoothing_method']='kn'\nparam['dict']=set(np.loadtxt('vocab.txt',dtype=bytes).astype(str))\nparam['n']=3\nparam['choose_corpus']=False\nparam['candi_vocab'] = pre_train_nc()\nn_ans,ans=np.loadtxt('ans.txt',dtype=bytes,delimiter='\\t',unpack=True).astype('str')\nif param['choose_corpus']==True:\n\ttr_dataset =choose_corpus(text)\nelse:\n\ttr_dataset = np.array(nltk.corpus.reuters.words()[:100000])\n# Train language model\ntrain_model = n_gram(param)\ntrain_model.train(tr_dataset)\n# Train noisy channel\nstart = time.clock()\nnc = noisy_channel(param['dict'],param['dict'],param)\nnc.confusion_matrix()\nend = time.clock()\nprint('Time of noisy channel:',end-start)\n# Corrction\ntext_edit = ['' for i in range(len(text))]\n\nfor i in range(len(text_edit)):\n\tstart = time.clock()\n\ttext_edit[i]=check_sent(text[i], num_of_err[i], train_model, nc, param)\n\tend = time.clock()\n\tprint(text_edit[i])\n\tprint('Time of sentence %d:'%(i+1), end - start)\n# Evaluation\nk = np.zeros([1000])\nfor i in range(len(ans)):\n\tif ans[i].replace(' ','') == text_edit[i].replace(' ',''):k[i]=1\nprint('ACCURACY =',sum(k)/1000)\nprint(np.where(k==0))\nt2 = time.clock()\nprint('Total Time=',t2-t1)\n# Output\nresult = open('result.txt','w')\nfor i in range(len(text_edit)):\n\tresult.write('%d\\t'%(i+1))\n\tresult.write(text_edit[i])\n\tresult.write('\\n')\n\n\n\n\n\n\n\n\n\n\n\t\t\n","sub_path":"program/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"75799294","text":"# Local Storage Class\n\nfrom custom_print import print_dbg,print_err\nimport time\nimport os\n\nclass StorageUnit:\n def __init__(self,time,temp,rain, rainday,hmdty,pres,aws,mws,wdir,dewpt):\n self.time = time\n self.temperature = temp\n self.rainfall = rain\n self.rainfall_day = rainday\n self.humidity = hmdty\n self.pressure = pres\n self.avgwspd = aws\n self.maxwspd = mws\n self.wdir = wdir\n self.dewpoint = dewpt\n\n def str(self):\n return \"{},{},{},{},{},{},{},{},{},{}\".format(self.time,self.temperature,self.rainfall, self.rainfall_day,self.humidity,self.pressure,self.avgwspd,self.maxwspd,self.wdir,self.dewpoint)\n\nclass LocalStorage:\n def __init__(self, location):\n self.location = location\n self.str_store = []\n self.store = []\n\n # If the file data.csv exists, read all data from it as a string,\n # making each line an array of the elements within that line, \n # converting values to integers where appropriate.\n # Data is stored in the following order:\n # time, temp, rain(past whole hour), rain(past day), humidity, pressure, avg wind spd, max wind spd, wind dir, dewpoint\n if os.path.isfile(location):\n with open(location,'r') as f:\n self.str_store = f.read().splitlines()\n\n for line in self.str_store:\n tmp = [item.strip() for item in line.split(',')]\n if len(tmp) < 10:\n print_dbg(\"LocalStorage-init\",\"Corrupt or incomplete line in storage: \"+line)\n continue\n for i in range(len(tmp)):\n if tmp[i] != '':\n try:\n tmp[i] = int(tmp[i])\n except ValueError:\n try:\n tmp[i] = float(tmp[i])\n except ValueError:\n pass\n self.store.append(StorageUnit(tmp[0],tmp[1],tmp[2],tmp[3],tmp[4],tmp[5],tmp[6],tmp[7],tmp[8],tmp[9]))\n\n else:\n with open(location,'w') as f:\n pass\n\n print_dbg(\"LocalStorage-store\",self.store)\n\n\n def get_last_four_hours(self):\n ret = {}\n for storage_unit in self.store:\n if storage_unit.time >= time.time()-4*60*60:\n if ret == {}:\n ret['WindSp'] = storage_unit.avgwspd\n ret['WindMax'] = storage_unit.maxwspd\n ret['Temperature'] = storage_unit.temperature\n ret['MaxTemperature'] = storage_unit.temperature\n ret['MinTemperature'] = storage_unit.temperature\n ret['Pressure'] = storage_unit.pressure\n ret['Humidity'] = storage_unit.humidity\n ret['DewPoint'] = storage_unit.dewpoint\n ret['Rain24Hours'] = storage_unit.rainfall_day\n continue\n\n if storage_unit.maxwspd > ret['WindMax']:\n ret['WindMax'] = storage_unit.maxwspd\n\n if storage_unit.temperature > ret['MaxTemperature']:\n ret['MaxTemperature'] = storage_unit.temperature\n\n if storage_unit.temperature < ret['MinTemperature']:\n ret['MinTemperature'] = storage_unit.temperature\n\n ret['WindSp'] = (ret['WindSp']+storage_unit.avgwspd)/2.0\n ret['Temperature'] = (ret['Temperature']+storage_unit.temperature)/2.0\n ret['Humidity'] = (ret['Humidity']+storage_unit.humidity)/2.0\n ret['Pressure'] = (ret['Pressure']+storage_unit.pressure)/2.0\n ret['DewPoint'] = (ret['DewPoint']+storage_unit.dewpoint)/2.0\n\n ret['Rain24Hours'] = storage_unit.rainfall_day\n\n return ret\n\n\n def get_last_day(self):\n ret = {}\n for storage_unit in self.store:\n if storage_unit.time >= time.time()-24*60*60:\n if ret == {}:\n ret['WindSp'] = storage_unit.avgwspd\n ret['WindMax'] = storage_unit.maxwspd\n ret['Temperature'] = storage_unit.temperature\n ret['MaxTemperature'] = storage_unit.temperature\n ret['MinTemperature'] = storage_unit.temperature\n ret['Pressure'] = storage_unit.pressure\n ret['Humidity'] = storage_unit.humidity\n ret['DewPoint'] = storage_unit.dewpoint\n ret['Rain24Hours'] = storage_unit.rainfall_day\n continue\n\n if storage_unit.maxwspd > ret['WindMax']:\n ret['WindMax'] = storage_unit.maxwspd\n\n if storage_unit.temperature > ret['MaxTemperature']:\n ret['MaxTemperature'] = storage_unit.temperature\n\n if storage_unit.temperature < ret['MinTemperature']:\n ret['MinTemperature'] = storage_unit.temperature\n\n ret['WindSp'] = (ret['WindSp']+storage_unit.avgwspd)/2.0\n ret['Temperature'] = (ret['Temperature']+storage_unit.temperature)/2.0\n ret['Humidity'] = (ret['Humidity']+storage_unit.humidity)/2.0\n ret['Pressure'] = (ret['Pressure']+storage_unit.pressure)/2.0\n ret['DewPoint'] = (ret['DewPoint']+storage_unit.dewpoint)/2.0\n\n ret['Rain24Hours'] = storage_unit.rainfall_day\n\n return ret\n\n\n def get_last_week(self):\n ret = {}\n\n last_day_rain = 0\n for storage_unit in self.store:\n if storage_unit.time >= time.time()-24*60*60*7:\n if ret == {}:\n ret['WindSp'] = storage_unit.avgwspd\n ret['WindMax'] = storage_unit.maxwspd\n ret['Temperature'] = storage_unit.temperature\n ret['MaxTemperature'] = storage_unit.temperature\n ret['MinTemperature'] = storage_unit.temperature\n ret['Pressure'] = storage_unit.pressure\n ret['Humidity'] = storage_unit.humidity\n ret['DewPoint'] = storage_unit.dewpoint\n ret['RainWeek'] = storage_unit.rainfall_day\n continue\n\n if storage_unit.maxwspd > ret['WindMax']:\n ret['WindMax'] = storage_unit.maxwspd\n\n if storage_unit.temperature > ret['MaxTemperature']:\n ret['MaxTemperature'] = storage_unit.temperature\n\n if storage_unit.temperature < ret['MinTemperature']:\n ret['MinTemperature'] = storage_unit.temperature\n\n ret['WindSp'] = (ret['WindSp']+storage_unit.avgwspd)/2.0\n ret['Temperature'] = (ret['Temperature']+storage_unit.temperature)/2.0\n ret['Humidity'] = (ret['Humidity']+storage_unit.humidity)/2.0\n ret['Pressure'] = (ret['Pressure']+storage_unit.pressure)/2.0\n ret['DewPoint'] = (ret['DewPoint']+storage_unit.dewpoint)/2.0\n\n if storage_unit.rainfall_day == 0 or last_day_rain != storage_unit.rainfall_day:\n ret['RainWeek'] += storage_unit.rainfall_day\n last_day_rain = storage_unit.rainfall_day\n\n return ret\n\n def append(self, storage_unit):\n if isinstance(storage_unit,StorageUnit):\n self.store.append(storage_unit)\n print_dbg(\"LocalStorage\",\"Added StorageUnit to store:\"+storage_unit.str())\n else:\n print_err(\"LocalStorage\",\"Tried to add non-StorageUnit item to store:\"+str(storage_unit))\n\n def current(self):\n return self.store[-1]\n\n def current_dict(self):\n cur = self.current()\n return {'WindSp' : cur.avgwspd, 'WindMax' : cur.maxwspd, 'Temperature' : cur.temperature, 'Pressure' : cur.pressure, 'Humidity' : cur.humidity, 'DewPoint' : cur.dewpoint, 'RainThisHour' : cur.rainfall, 'Rain24Hours' : cur.rainfall_day, 'WindDir' : cur.wdir}\n\n def __exit__(self):\n with open(self.location,'w') as f:\n for storage_unit in self.store:\n f.write(storage_unit.str()+\"\\n\")\n \n \n\n","sub_path":"LocalStorage.py","file_name":"LocalStorage.py","file_ext":"py","file_size_in_byte":8242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"410115665","text":"# -*- coding: utf-8 -*-\nimport json\nimport requests\nfrom emergency_enrich import settings\n\n\ndef handle_dispatch(data):\n ''' Handles provided data'''\n # some data pre parsing to simplifiy the following functions\n lat = data['apparatus'][0]['unit_status']['arrived']['latitude']\n lon = data['apparatus'][0]['unit_status']['arrived']['longitude']\n day, hour = data['apparatus'][0]['unit_status']['arrived']['timestamp'].split('T')\n # parse down to just the hour of the day, assuming 24 template\n hour = hour.split(':')[0]\n weather_data = get_weather_data(lat, lon, day)\n data['weather'] = parse_weather(weather_data, hour)\n\n # Get Parcel data\n data['parcel_data'] = get_parcel_data(str(lat), str(lon))\n # convert to geojson\n return data_to_geojson(data, lat, lon)\n\ndef get_weather_data(lat, lon, date):\n ''' Get weather api data'''\n url = settings.WEATHER_API + 'key=' \\\n + settings.WEATHER_KEY + '&q=' + \\\n str(lat) + ','+ str(lon) +'&format=json&date=' + date\n res = requests.get(url)\n return json.loads(res.text)\n\ndef parse_weather(data, hour):\n ''' Get hour data'''\n hours = data['data']['weather'][0]['hourly']\n return closest_hour(hours, hour)\n\ndef closest_hour(list, hour):\n ''' Gets the closest by the hour entry from the weather api'''\n best_entry = None\n best_dist = None\n # loop through and grab the lowest absolute value \n for entry in list:\n dist = float(hour + '00') - float(entry['time'])\n if abs(dist) < best_dist or best_dist is None:\n best_entry = entry\n best_dist = abs(dist)\n # return the closest hourly weather data recorded for the day\n return best_entry\n\ndef get_parcel_data(lat, lon):\n ''' Get data from the richmond arc service'''\n coords = '{},{}'.format(lon, lat)\n url = settings.PARCEL_ARC_REST.format(coords)\n res = json.loads(requests.get(url).text)\n if len(res['features']) == 0:\n # return simple message so at least weather data is returned when no parcel data is found\n return {'Parcel': 'No Parcel data found'}\n else:\n return get_specific_parcel(str(res['features'][0]['attributes']['OBJECTID']))\n\ndef get_specific_parcel(obj_id):\n ''' Parse individual obj data'''\n url = settings.PARCEL_URL.format(obj_id)\n res = json.loads(requests.get(url).text)\n if res.has_key('feature') is True:\n res['feature']['properties'] = res['feature'].pop('attributes')\n geom = res['feature']['geometry']\n geom['coordinates'] = geom.pop('rings')\n geom['type'] = 'Polygon'\n return res\n else:\n return {'Parcel': 'No Parcel data found'}\n\ndef data_to_geojson(data, lat, lon):\n ''' convert to geojson'''\n return {\"type\": \"FeatureCollection\",\n \"features\": [{\n \"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": [lon, lat]\n },\n \"properties\": {\n \"parcel_data\": data['parcel_data'],\n \"address\": data['address'],\n \"description\": data['description'],\n \"weather\": data['weather']\n }\n }\n ]\n }\n","sub_path":"enrich/parse_weather.py","file_name":"parse_weather.py","file_ext":"py","file_size_in_byte":3378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"457818470","text":"import requests\nimport re\n\n\ndef find_title(url):\n resp = requests.get(url)\n title = re.findall(\"[a-zA-Z0-9.]+\", resp.text)\n title = title[0]\n return title[7:-8]\n\n\ndef find_emails(url):\n resp = requests.get(url)\n email_list = re.findall(\"[a-zA-Z0-9.]+@[a-zA-Z]+.[a-zA-Z]+\", resp.text)\n return email_list\n\n\ndef find_phone_numbers(url):\n resp = requests.get(url)\n phone_number_list = re.findall(\"'+'\", resp.text)\n return phone_number_list\n\ndef jetBrains():\n for i in range(500, 5000):\n resp = requests.get('https://jb.gg/' + str(i))\n print('https://jb.gg/' + str(i))\n print(resp.text)\n\n\ndef main():\n while True:\n url = input(\"Please enter the url or address of web page:\")\n action = input(\"m > emails || t > title || n > phone numbers\\nChoose an option : \")\n if 'http' not in url and 'https' not in url:\n url = 'http://' + url\n if action == 'm':\n print(find_emails(url))\n elif action == 't':\n print(find_title(url))\n elif action == 'n':\n print(find_phone_numbers(url))\n# def main():\n# # jetBrains()\n# resp = requests.get('https://jb.gg/' + '501')\n# print('https://jb.gg/' + '500')\n# print(len(resp.text))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"find-in-webpages.py","file_name":"find-in-webpages.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"192126542","text":"#\n# Implementation of the Forsythia key exchange protocol\n# (c) 2020 Sergey Grebnev, s.v.grebnev@yandex.ru\n#\n\n#from ecver.globals import p, initialize\nfrom gfp2 import GFp2element, getp, initialize as modulo_initialize\nfrom montgomery import MontgomeryCurve\nfrom montgomery import isogen2, isogen3, isoex2, isoex3\nfrom parameters import params\nimport time\nfrom random import randint\n\ne2 = 0\ne3 = 0\nf = 0\nxp2 = None\nxq2 = None\nxr2 = None\nxp3 = None\nxq3 = None\nxr3 = None\nA = None\nC = None\nE0 = None\n\n\ndef ParseParameters(params):\n global p, e2, e3, f, xp2, xq2, xr2, xp3, xq3, xr3, A, C, E0\n e2 = params['eA']\n e3 = params['eB']\n f = params['f']\n# Very first step -- initialization of the modulus\n modulo_initialize((2 ** e2) * (3 ** e3) * f - 1)\n A = GFp2element(params['A'][0], params['A'][1])\n E0 = MontgomeryCurve(A, GFp2element(1))\n xp2 = GFp2element(params['xp2'][0], params['xp2'][1])\n xq2 = GFp2element(params['xq2'][0], params['xq2'][1])\n xr2 = GFp2element(params['xr2'][0], params['xr2'][1])\n xp3 = GFp2element(params['xp3'][0], params['xp3'][1])\n xq3 = GFp2element(params['xq3'][0], params['xq3'][1])\n xr3 = GFp2element(params['xr3'][0], params['xr3'][1])\n\nParseParameters(params['forsythia128'])\nprint(\"p =\", getp())\nprint('E0:', E0, ';\\nj(E0) =', E0.jinv())\n\nstart = time.time()\nsk2 = randint(0, 2 ** e2)\nprint('skAlice =', sk2)\npkAlice = isogen2(E0, sk2, e2, xp2, xq2, xr2, xp3, xq3, xr3)\nprint('pkAlice = ', pkAlice)\n\n#c = MontgomeryCurve(1, 1)\n#c.seta(pkAlice[0], pkAlice[1], pkAlice[2])\n#print('Alice image curve', c)\n\nsk3 = randint(0, 3 ** e3)\nprint('skBob =', sk3)\npkBob = isogen3(E0, sk3, e3, xp2, xq2, xr2, xp3, xq3, xr3)\nprint('pkBob = ', pkBob)\n\n#c.seta(pkBob[0], pkBob[1], pkBob[2])\n#print('Bob image curve', c)\n\nj1 = isoex2(sk2, e2, pkBob)\nj2 = isoex3(sk3, e3, pkAlice)\n\nprint('jAlice = ', j1)\nprint('jBob = ', j2)\n\nend = time.time()\nprint('Time elapsed:', end - start, 's')\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"405037947","text":"from django.conf import settings\n\nfrom web.models import *\n\nimport logging, subprocess, sys, os\nfrom datetime import datetime\n\nlogger = logging.getLogger(\"site\")\n\ndef compress_video(video, src, dst, \n fps=settings.VIDEO_FPS, \n video_bitrate=settings.VIDEO_BITRATE, \n width=settings.VIDEO_WIDTH, \n height=settings.VIDEO_HEIGHT, \n codec=settings.VIDEO_CODEC,\n audio_encoder=settings.AUDIO_ENCODER,\n audio_bitrate=settings.AUDIO_BITRATE):\n \"\"\" Compresses a src video to dst using handbrake. All handbrake arguments\n must be strings\n\n HandBrakeCLI documentation: https://handbrake.fr/docs/en/latest/cli/cli-guide.html\n \"\"\"\n\n full_dst_path = os.path.join(settings.MEDIA_ROOT, dst)\n earlier = datetime.now()\n \n logger.info(\"Processing video...\")\n\n proc = subprocess.Popen([\"HandBrakeCLI\", \n \"-i\", src, # input video\n \"-o\", full_dst_path, # output video\n \"-e\", codec, # video codec\n \"-b\", video_bitrate, # video bitrate\n \"-r\", fps, # output fps\n \"-w\", width, # output video width\n \"-l\", height, # output video height\n \"-E\", audio_encoder, # audio encoder\n \"-B\", audio_bitrate], # audio bitrate \n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True)\n\n (std_output, err_output) = proc.communicate()\n result = proc.wait()\n\n logger.info(\"Video processing took %.2f seconds.\" % (datetime.now() - earlier).total_seconds())\n\n if result == 0:\n src_size = os.path.getsize(src) / (1024 * 1024)\n dst_size = os.path.getsize(full_dst_path) / (1024 * 1024)\n\n logger.info(\"Compressed video (id %d) from %.1fMB to %.1fMB (%d%% of original)\" \n % (video.id, src_size, dst_size, (dst_size / src_size) * 100))\n\n # Delete the temp file since we didn't let Django do it for us\n os.remove(src)\n\n video.file.name = dst\n video.ready = True\n video.save()\n\n notif = Notification()\n notif.type = Notification.PROCESSING_SUCCESS\n notif.data = {\n \"video\": video.id\n }\n notif.owner = video.uploaded_by\n notif.clean()\n notif.save()\n else:\n logger.error((\"Processing error occured on video, error code %d\\n\" +\n \"*******************************************************\\n\" +\n \"** START OF ERROR DUMP **\\n\" +\n \"*******************************************************\\n\" +\n \"%s\\n\" +\n \"*******************************************************\\n\" +\n \"** END OF DUMP **\\n\" +\n \"*******************************************************\") \n % (result, std_output.rstrip(\"\\n\")))\n\n notif = Notification()\n notif.type = Notification.PROCESSING_FAILED\n notif.data = {\n \"video_title\": video.title\n }\n notif.owner = video.uploaded_by\n notif.clean()\n notif.save()\n\n video.delete()\n\n try:\n os.remove(src)\n except:\n pass\n\n # In case the file wasn't actually written to\n try:\n os.remove(full_dst_path)\n except:\n pass","sub_path":"lib/scripts/compress_video.py","file_name":"compress_video.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"293767608","text":"import torch\nimport argparse\n\nfrom cogdl import options\nfrom cogdl.tasks import build_task\nfrom cogdl.datasets import build_dataset\nfrom cogdl.models import build_model\nfrom cogdl.utils import build_args_from_dict\n\n\ndef accuracy_check(x):\n for _, value in x.items():\n assert value > 0\n\n\ndef get_default_args():\n cuda_available = torch.cuda.is_available()\n default_dict = {'task': 'unsupervised_graph_classification',\n 'gamma': 0.5,\n 'device_id': [0 if cuda_available else 'cpu'],\n 'num_shuffle': 1,\n 'save_dir': '.',\n 'dropout': 0.5,\n 'patience': 1,\n 'epoch': 2,\n 'cpu': not cuda_available,\n 'lr': 0.001,\n 'weight_decay': 5e-4}\n return build_args_from_dict(default_dict)\n\ndef add_infograp_args(args):\n args.hidden_size = 64\n args.batch_size = 20\n args.target = 0\n args.train_num = 5000\n args.num_layers = 3\n args.unsup = True\n args.epoch = 3\n args.nn = True\n args.lr = 0.0001\n args.train_ratio = 0.7\n args.test_ratio = 0.1\n args.model = 'infograph'\n args.degree_feature = False\n return args\n\n\ndef add_graph2vec_args(args):\n args.hidden_size = 128\n args.window_size = 0\n args.min_count = 5\n args.dm = 0\n args.sampling = 0.0001\n args.iteration = 2\n args.epoch = 4\n args.nn = False\n args.lr = 0.001\n args.model = 'graph2vec'\n args.degree_feature = False\n return args\n\n\ndef add_dgk_args(args):\n args.hidden_size = 128\n args.window_size = 2\n args.min_count = 5\n args.sampling = 0.0001\n args.iteration = 2\n args.epoch = 4\n args.nn = False\n args.alpha = 0.01\n args.model = 'dgk'\n args.degree_feature = False\n return args \n \n\ndef test_infograph_proteins():\n args = get_default_args()\n args = add_infograp_args(args)\n args.dataset = 'proteins'\n task = build_task(args)\n ret = task.train()\n accuracy_check(ret)\n\n\n# def test_infograph_collab():\n# args = get_default_args()\n# args = add_infograp_args(args)\n# args.dataset = 'collab'\n# task = build_task(args)\n# ret = task.train()\n# accuracy_check(ret)\n\n\ndef test_infograph_imdb_binary():\n args = get_default_args()\n args = add_infograp_args(args)\n args.dataset = 'imdb-b'\n args.degree_feature = True\n task = build_task(args)\n ret = task.train()\n accuracy_check(ret)\n\n\ndef test_infograph_mutag():\n args = get_default_args()\n args = add_infograp_args(args)\n args.dataset = 'mutag'\n task = build_task(args)\n ret = task.train()\n accuracy_check(ret)\n\n\ndef test_graph2vec_mutag():\n args = get_default_args()\n args = add_graph2vec_args(args)\n args.dataset = 'mutag'\n print(args)\n task = build_task(args)\n ret = task.train()\n accuracy_check(ret)\n\n\ndef test_graph2vec_proteins():\n args = get_default_args()\n args = add_graph2vec_args(args)\n args.dataset = 'proteins'\n print(args)\n task = build_task(args)\n ret = task.train()\n accuracy_check(ret)\n\n\ndef test_dgk_mutag():\n args = get_default_args()\n args = add_dgk_args(args)\n args.dataset = 'mutag'\n print(args)\n task = build_task(args)\n ret = task.train()\n accuracy_check(ret)\n\n\ndef test_dgk_proteins():\n args = get_default_args()\n args = add_dgk_args(args)\n args.dataset = 'proteins'\n print(args)\n task = build_task(args)\n ret = task.train()\n accuracy_check(ret)\n \n\nif __name__ == \"__main__\": \n test_graph2vec_mutag()\n test_graph2vec_proteins()\n\n test_infograph_mutag()\n test_infograph_imdb_binary()\n test_infograph_proteins()\n \n test_dgk_mutag()\n test_dgk_proteins()\n \n\n","sub_path":"tests/tasks/test_unsupervised_graph_classification.py","file_name":"test_unsupervised_graph_classification.py","file_ext":"py","file_size_in_byte":3775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"359441560","text":"\nimport matplotlib.pyplot as plt\nimport os\nimport shutil\n\n\ndef _getHeaderContent():\n output = []\n output.append(\"\\r\")\n output.append(\"\\r\")\n output.append(\" \\r\")\n return \"\".join(output)\n\ndef _getFooterContent():\n output = []\n output.append(\" \\r\")\n output.append(\"\\r\")\n return \"\".join(output)\n \ndef _getOperationContent(operation):\n output = []\n output.append(\" \\r\")\n output.append(\" %d\\r\"%operation.point)\n output.append(\" %d\\r\"%operation.arrival)\n output.append(\" %s\\r\"%operation.quantity)\n output.append(\" \\r\")\n return \"\".join(output)\n \ndef _getShiftContent(shift, index):\n output = []\n output.append(\" \\r\")\n output.append(\" %d\\r\"%index)\n output.append(\" %d\\r\"%shift.driver)\n output.append(\" %d\\r\"%shift.trailer)\n output.append(\" %d\\r\"%shift.start)\n output.append(\" \\r\")\n \n for operation in shift.operations_list:\n output.append(_getOperationContent(operation))\n \n output.append(\" \\r\") \n output.append(\" \\r\")\n return \"\".join(output)\n\n \ndef writeGreedySolution(solution, file_path):\n\n print(\"test: \", file_path)\n f = open(file_path, \"w\")\n \n f.write(_getHeaderContent())\n \n shift_index = 0\n for shift in solution.shifts_list:\n f.write(_getShiftContent(shift, shift_index))\n shift_index+=1\n \n f.write(_getFooterContent())\n ","sub_path":"src/export/export_greedy_solution_xml.py","file_name":"export_greedy_solution_xml.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"216874880","text":"#!/usr/bin/env python3\r\n\"\"\"Initialize Bayesian Optimization\"\"\"\r\n\r\n\r\nimport numpy as np\r\nfrom scipy.stats import norm\r\nGP = __import__('2-gp').GaussianProcess\r\n\r\n\r\nclass BayesianOptimization:\r\n \"\"\"Initialize Bayesian Optimization\"\"\"\r\n def __init__(self, f, X_init, Y_init, bounds, ac_samples,\r\n l=1, sigma_f=1, xsi=0.01, minimize=True):\r\n self.f = f\r\n self.X_s = np.linspace(bounds[0], bounds[1])[:, None]\r\n self.xsi = xsi\r\n self.minimize = minimize\r\n self.gp = GP(X_init, Y_init, l=l, sigma_f=sigma_f)\r\n self.Y_init = Y_init\r\n\r\n def acquisition(self):\r\n \"\"\"Calculate next best sample location\"\"\"\r\n fs, _ = self.gp.predict(self.gp.X)\r\n next_fs, vars = self.gp.predict(self.X_s)\r\n opt = np.min(fs)\r\n improves = opt - next_fs - self.xsi\r\n if not self.minimize:\r\n improve = -improves\r\n Z = improves / vars\r\n eis = improves * norm.cdf(Z) + vars * norm.pdf(Z)\r\n return self.X_s[np.argmax(eis)], eis\r\n","sub_path":"unsupervised_learning/0x03-hyperparameter_tuning/4-bayes_opt.py","file_name":"4-bayes_opt.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"384175398","text":"# takes seconds\n\n# -*- coding: utf-8 -*-\n\n\ndef numberInWords(number, spaces):\n\n col1 = [\"zero\", \"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\"]\n col2 = [\"\", \"ten\", \"twenty\", \"thirty\", \"forty\", \"fifty\", \"sixty\", \"seventy\", \"eighty\", \"ninety\"]\n col3 = [\"ten\", \"eleven\", \"twelve\", \"thirteen\", \"fourteen\", \"fifteen\", \"sixteen\", \"seventeen\", \"eighteen\", \"nineteen\"] \n \n numStr = \"\"\n andFlag = False\n \n if 10000 > number >= 1000:\n numStr += col1[int(number / 1000)]\n if spaces:\n numStr += \" thousand \"\n else:\n numStr += \"thousand\"\n number = number % 1000\n \n if 1000 > number >= 100:\n numStr += col1[int(number / 100)]\n if spaces:\n numStr += \" hundred \"\n else:\n numStr += \"hundred\"\n number = number % 100\n \n if number == 0:\n andFlag = False\n else:\n andFlag = True\n \n if andFlag:\n if spaces:\n numStr += \"and \"\n else:\n numStr += \"and\"\n \n if 100 > number > 19:\n numStr += col2[int(number / 10)]\n number = number % 10\n if spaces:\n numStr += \" \"\n \n if 20 > number >= 10:\n numStr += col3[number - 10]\n number = number % 10\n \n elif 10 > number > 0:\n numStr += col1[number]\n \n return numStr\n \n\n\ntotalLen = 0\n\nfor i in range(1000):\n totalLen += len(numberInWords(i + 1, False))\n print(i, totalLen)\n \nprint(totalLen)\n","sub_path":"0017-number-letter-counts.py","file_name":"0017-number-letter-counts.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"353608368","text":"\"\"\"Peak extraction.\"\"\"\n\nimport multiprocessing as mp\nimport numpy as np\nfrom peakfinder.util import compute_intervals, apply_async\nfrom peakfinder.mean_shift import mean_shift\n\n__author__ = \"Gianluca Corrado\"\n__copyright__ = \"Copyright 2016, Gianluca Corrado\"\n__license__ = \"MIT\"\n__maintainer__ = \"Gianluca Corrado\"\n__email__ = \"gianluca.corrado@unitn.it\"\n__status__ = \"Production\"\n\n\ndef _find_blocks(signal, window):\n \"\"\"Combine local maxima and minima to define blocks in a signal.\"\"\"\n minima = mean_shift(signal, window, mode='min')\n maxima = mean_shift(signal, window, mode='max')\n\n blocks = set()\n for i in range(minima.shape[0] - 1):\n if np.any(np.logical_and(maxima > minima[i], maxima < minima[i + 1])):\n start = minima[i]\n end = minima[i + 1]\n blocks.add((start, end, max(signal[start:end])))\n return np.array(sorted(list(blocks)))\n\n\ndef serial_find_blocks(profiles, window):\n \"\"\"Find blocks in profiles (serial version).\"\"\"\n blocks = {k: _find_blocks(profiles[k], window) for k in profiles}\n return blocks\n\n\ndef multiprocess_find_blocks(profiles, window, n_blocks=None,\n block_size=None, n_jobs=-1):\n \"\"\"Find blocks in profiles (parallel version).\"\"\"\n intervals = compute_intervals(\n size=len(profiles), n_blocks=n_blocks, block_size=block_size)\n if n_jobs == -1:\n pool = mp.Pool()\n else:\n pool = mp.Pool(n_jobs)\n results = [apply_async(pool, serial_find_blocks,\n args=(dict(profiles.items()[start:end]),\n window))\n for start, end in intervals]\n dicts = [p.get() for p in results]\n pool.close()\n pool.join()\n blocks = {k: v for d in dicts for k, v in d.items()}\n return blocks\n\n\ndef find_blocks(profiles, window, sorted_by_signal=False,\n n_blocks=None, block_size=None, n_jobs=-1):\n \"\"\"Find blocks for each transcript in profiles.\"\"\"\n if n_jobs == 1:\n blocks = serial_find_blocks(profiles, window)\n else:\n blocks = multiprocess_find_blocks(\n profiles, window, block_size=20, n_jobs=n_jobs)\n\n ret = list()\n for name in blocks.keys():\n for (start, end, high_val) in blocks[name]:\n ret.append((name, start, end, high_val))\n\n if not sorted_by_signal:\n # sort by transcript name and position\n ret = sorted(ret)\n else:\n ret = sorted(ret, key=lambda x: (x[3], x[0], x[1], x[2]))\n\n return ret\n","sub_path":"peakfinder/peak.py","file_name":"peak.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"358264328","text":"import pycountry\nfrom geopy import Nominatim\n\nfrom JsonParser import get_json\nfrom search.models import research as R\n\n\nclass countryToALpha3:\n def __init__(self, research_id):\n self.locations = []\n self.total = 0\n self.validLocationsRatio = 0\n self.invalidLocationsRatio = 0\n self.unknownLocationsRatio = 0\n research = R.getResearchById(research_id)\n self.totalTweets = research.numberOfTweets\n for tweet in get_json(research):\n if \"user\" in tweet:\n if str(tweet[\"user\"][\"location\"]) != \"None\":\n self.locations.append(tweet[\"user\"][\"location\"])\n self.countries = []\n self.topCountries = {}\n self.alpha2Countries = []\n self.alpha3Countries = []\n self.countryCounter = {}\n\n self.file = open('static/collected_coutries', 'w')\n\n def getAlpha2Countries(self):\n geolocator = Nominatim(timeout=60)\n for location in self.locations:\n geo = geolocator.geocode(location)\n if geo is not None:\n loc = geolocator.reverse(\"{}, {}\".format(geo.latitude, geo.longitude))\n self.alpha2Countries.append(loc.raw['address']['country_code'].upper())\n\n def getAlpha3Countries(self):\n for alpha2country in self.alpha2Countries:\n c = pycountry.countries.get(alpha_2=alpha2country)\n if c is not None:\n self.alpha3Countries.append(c.alpha_3)\n\n def getCountryCount(self):\n for alpha3Country in self.alpha3Countries:\n self.total += 1\n if alpha3Country in self.countryCounter:\n self.countryCounter[alpha3Country][\"numberOfThings\"] += 1\n else:\n self.countryCounter[alpha3Country] = {\"fillKey\": None, \"numberOfThings\": 1}\n\n def updateFillKeys(self):\n print(self.countryCounter)\n for country in self.countryCounter.values():\n if country[\"numberOfThings\"] / self.total >= 0.20:\n country[\"fillKey\"] = \"HIGH\"\n if (country[\"numberOfThings\"] / self.total < 0.20) \\\n and (country[\"numberOfThings\"] / self.total > 0.10):\n country[\"fillKey\"] = \"MEDIUM\"\n if country[\"numberOfThings\"] / self.total < 0.10:\n country[\"fillKey\"] = \"LOW\"\n\n def updateValidLocationsRatio(self):\n self.validLocationsRatio = len(self.locations)\n\n def updateInvalidLocationsRatio(self):\n self.invalidLocationsRatio = len(self.locations) - self.total\n\n def updateUnknownLocationsRatio(self):\n self.unknownLocationsRatio = self.totalTweets - len(self.locations)\n\n def getFullCountry(self, alpha3):\n c = pycountry.countries.get(alpha_3=alpha3)\n if c is not None:\n return c.name","sub_path":"static/scripts/mapping/countryToAlpha3.py","file_name":"countryToAlpha3.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"136436979","text":"import sqlite3\n\ndbase = sqlite3.connect('Our_data.db') #Open a database File\nprint('database opened')\n\ndbase.execute(''' CREATE TABLE IF NOT EXISTS employee_records(\n ID INT PRIMARY KEY NOT NULL,\n NAME TEXT NOT NULL,\n DIVISION TEXT NOT NULL,\n STARS INT NOT NULL\n)''')\nprint('table created')\n\ndef insert_record(ID,NAME,DIVISION, STARS):\n dbase.execute('''INSERT INTO employee_records (ID, NAME, DIVISION, STARS) \n VALUES(?, ?, ?, ?)''',(ID, NAME, DIVISION,STARS))\n dbase.commit()\n print('Records inserted')\n\ninsert_record(4, 'BOB', 'TEACHER', 1)\n\ndef read_Data():\n data = dbase.execute('''SELECT * FROM employee_records ORDER BY NAME''')\n for record in data:\n print('ID: '+str(record[0]))\n print('NAME: '+str(record[1]))\n print('DIVISION: '+str(record[2]))\n print('STARS: '+str(record[3]))\n\ndef update_record():\n dbase.execute('''UPDATE employee_records SET STARS=5 WHERE ID=2''')\n dbase.commit()\n print('UPDATED')\n\nupdate_record()\nprint('-------------------------------------')\nread_Data()\n\ndbase.close()\nprint('database closed')\n","sub_path":"Database/sqlite/31_update.py","file_name":"31_update.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"505922007","text":"import sys\nfrom PyQt5.QtWidgets import QDialog, QSystemTrayIcon, QMenu, QAction, QApplication, QLabel, QWidget\nfrom PyQt5.QtGui import QIcon, QPixmap, QColor\n\n\ndef requests_get():\n \"\"\"Not Empty\"\"\"\n url = \"https://beta.thejoyrun.com/bathroom/bathrooms?key=0fc37aac2993ed1894b1dfde9ef686b8\"\n\n import requests\n\n res = requests.get(url, verify=False)\n\n if res.status_code == 200:\n print(res.text)\n\n\nclass Main(QDialog):\n def __init__(self):\n super().__init__()\n self.loadMenu()\n self.initUI()\n\n def loadMenu(self):\n menuItems = [] # 菜单列表\n menuItems.append({\n \"text\": \"启动\",\n \"icon\": \"./icons/set.png\",\n \"event\": self.show,\n \"hot\": \"D\"\n })\n menuItems.append({\n \"text\": \"退出\",\n \"icon\": \"./icons/close.png\",\n \"event\": self.close,\n \"hot\": \"Q\"\n })\n self.trayIconMenu = QMenu(self) # 创建菜单\n #遍历绑定 显示的文字、图标、热键和点击事件\n #热键可能是无效的 我这里只是为了显示效果而已\n for i in menuItems:\n tmp = QAction(\n QIcon(i[\"icon\"]), i[\"text\"], self, triggered=i[\"event\"])\n tmp.setShortcut(self.tr(i[\"hot\"]))\n self.trayIconMenu.addAction(tmp)\n\n def initUI(self):\n self.trayIcon = QSystemTrayIcon(self) # <===创建通知栏托盘图标\n self.trayIcon.setIcon(QIcon(\"./joyrun/request/pic.ico\")) #<===设置托盘图标\n\n self.trayIcon.setContextMenu(self.trayIconMenu) #<===创建右键连接菜单\n self.trayIcon.show() #<====显示托盘\n\n self.setWindowIcon(QIcon(\"./joyrun/request/pic.ico\")) #<===设置窗体图标\n self.setGeometry(300, 300, 180, 300) # <===设置窗体打开位置与宽高\n self.setWindowTitle('窗体标题')\n\n self.show() #<====显示窗体\n # self.hide()#<====隐藏窗体\n # 默认不显示窗体\n\n # 重写窗体关闭事件,让其点击关闭时隐藏\n def closeEvent(self, event):\n if self.trayIcon.isVisible():\n self.trayIcon.hide()\n\n\nclass DlgMain(QDialog):\n def addSystemTray(self):\n minimizeAction = QAction(\"Mi&nimize\", self, triggered=self.hide)\n maximizeAction = QAction(\n \"Ma&ximize\", self, triggered=self.showMaximized)\n restoreAction = QAction(\"&Restore\", self, triggered=self.showNormal)\n quitAction = QAction(\"&Quit\", self, triggered=self.close)\n self.trayIconMenu = QMenu(self)\n self.trayIconMenu.addAction(minimizeAction)\n self.trayIconMenu.addAction(maximizeAction)\n self.trayIconMenu.addAction(restoreAction)\n self.trayIconMenu.addSeparator()\n self.trayIconMenu.addAction(quitAction)\n self.trayIcon = QSystemTrayIcon(self)\n self.trayIcon.setIcon(QIcon(\"skin/icons/logo.png\"))\n self.setWindowIcon(QIcon(\"skin/icons/logo.png\"))\n self.trayIcon.setContextMenu(self.trayIconMenu)\n self.trayIcon.show()\n sys.exit(self.exec_())\n\n def closeEvent(self, event):\n if self.trayIcon.isVisible():\n self.trayIcon.hide()\n\n\nfrom PyQt5.QtGui import QPixmap, QColor\n\n\nclass Tray(QWidget):\n def __init__(self):\n super().__init__()\n # super(self.__class__, self).__init__()\n\n self.winIconPix = QPixmap(16, 16)\n self.winIconPix.fill(QColor(0, 0, 100))\n self.setWindowIcon(QIcon(self.winIconPix))\n\n self.tray = QSystemTrayIcon(self)\n self.trayIconPix = QPixmap(16, 16)\n self.trayIconPix.fill(QColor(100, 0, 0))\n self.tray.setIcon(QIcon(self.trayIconPix))\n # self.tray.setIcon(QIcon(\"C:\\\\Users\\\\ShadowMimosa\\\\Documents\\\\GitRepository\\\\Top\\\\Top\\\\Joyrun\\\\request\\\\icons\\\\menu2.png\"))\n\n minimizeAction = QAction(\"Mi&nimize\", self, triggered=self.hide)\n maximizeAction = QAction(\n \"Ma&ximize\", self, triggered=self.showMaximized)\n restoreAction = QAction(\"&Restore\", self, triggered=self.showNormal)\n quitAction = QAction(\n \"&Quit\", self, triggered=QApplication.instance().quit) # 退出APP\n self.trayMenu = QMenu(self)\n self.trayMenu.addAction(minimizeAction)\n self.trayMenu.addAction(maximizeAction)\n self.trayMenu.addAction(restoreAction)\n self.trayMenu.addSeparator()\n self.trayMenu.addAction(quitAction)\n self.tray.setContextMenu(self.trayMenu)\n\n self.tray.show()\n self.show()\n\n def closeEvent(self, event):\n event.ignore() # 忽略关闭事件\n self.hide() # 隐藏窗体\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n\n # ex = Main()\n ex=Tray()\n\n # sys.exit(app.exec_())\n # import icon\n # tray = QSystemTrayIcon()\n # tray_icon = QIcon(\":/img/pic.ico\")\n # open('./joyrun/request/pic.ico', 'rb')\n # tray_icon = QIcon(\"./joyrun/request/pic.ico\")\n # icon = QIcon()\n # icon.addPixmap()\n # icon.addPixmap(QPixmap(\"pic.ico\"), QIcon.Normal, QIcon.Off)\n # tray.setIcon(tray_icon)\n # icon_pix = QPixmap(16, 16)\n # icon_pix.fill(QColor(100, 0, 0))\n # tray.setIcon(QIcon(icon_pix))\n # system=QWidget()\n # system.winIconPix=QPixmap(30,10)\n # system.winIconPix.fill(QColor(120,9,22))\n # system.setWindowIcon(QIcon(':/img/pic.ico'))\n # system.show()\n # tray=QSystemTrayIcon(QWidget())\n # tray.show()\n app.exec_()\n\n # print(a)\n\n\n# import sys\n# from PyQt5.QtWidgets import QWidget, QMenu, QAction, QSystemTrayIcon, QApplication, QVBoxLayout, QListWidget, QListWidgetItem\n# from PyQt5.QtGui import QIcon\n# from config import menu_items\n\n# class Tray(QWidget):\n# \"\"\"构建主要窗体\"\"\"\n\n# def __init__(self):\n# \"\"\"初始化\"\"\"\n# super().__init__()\n\n# self.load_menu()\n# self.init_ui()\n\n# def load_menu(self):\n# \"\"\"加载菜单\"\"\"\n\n# menu_items.append({\n# \"text\": \"启动\",\n# \"icon\": \"./icons/set.png\",\n# \"event\": self.show,\n# \"hot\": \"D\"\n# })\n# menu_items.append({\n# \"text\": \"退出\",\n# \"icon\": \"./icons/close.png\",\n# \"event\": self.close,\n# \"hot\": \"Q\"\n# })\n\n# self.tray_menu = QMenu(self)\n\n# for value in menu_items:\n# tmp = QAction(\n# QIcon(value[\"icon\"]),\n# value[\"text\"],\n# self,\n# triggered=value[\"event\"])\n# tmp.setShortcut(self.tr(value[\"hot\"]))\n\n# self.tray_menu.addAction(tmp)\n\n# def load_list(self):\n# \"\"\"\"\"\"\n# lv = QListWidget()\n# for index in range(len(menu_items)):\n# value = menu_items[index]\n# if not 'icon' in value.keys():\n# value[\"icon\"] = None\n# if not 'event' in value.keys():\n# value[\"event\"] = self.show\n# if not 'hot' in value.keys():\n# value[\"hot\"] = 'None'\n# qlv = QListWidgetItem(\n# QIcon(value[\"icon\"]),\n# self.tr(value[\"text\"]) + \" (\" + value[\"hot\"] + \")\")\n# qlv.event = value[\"event\"]\n# lv.insertItem(index + 1, qlv)\n# lv.itemDoubleClicked.connect(self.dbclickItem)\n# self.layout.addWidget(lv)\n\n# def dbclickItem(self, item):\n# \"\"\"还不知道是干啥的\"\"\"\n\n# item.event()\n\n# def init_ui(self):\n# \"\"\"初始化 UI\"\"\"\n# self.tray_icon = QSystemTrayIcon(self)\n# self.tray_icon.setIcon(QIcon(\"./icons/menu2.png\"))\n\n# self.tray_icon.setContextMenu(self.tray_menu)\n# self.tray_icon.show()\n\n# self.layout = QVBoxLayout()\n# self.load_list()\n# self.setLayout(self.layout)\n\n# self.setWindowIcon(QIcon(\"./icons/menu2.png\"))\n# self.setGeometry(300, 300, 220, 300)\n# self.setWindowTitle(\"joyrun\")\n\n# self.show()\n# import time\n# while True:\n# time.sleep(3)\n# self.load_list()\n\n# def closeEvent(self, event):\n# if self.tray_icon.isVisible():\n# self.tray_icon.hide()\n\n# if __name__ == \"__main__\":\n # import os\n # os.chdir(os.path.dirname(os.path.abspath(__file__)))\n\n# app = QApplication(sys.argv)\n# tray = Tray()\n\n# app.exec_()","sub_path":"Python/try_test/PyQt5/bathroom.py","file_name":"bathroom.py","file_ext":"py","file_size_in_byte":8404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"29985865","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nimport ephem\nimport pandas as pd\nimport numpy as np\n\n\n\n# ---------------------------------------------------------------------------\n# will run loops over those values\nplaces = ['Paris',\n 'Berlin',\n 'Prague',\n 'Warsaw',\n ]\n# places = list(cities._city_data.keys())\n\ndates = ['2017/01/15', '2017/02/15', '2017/03/15', '2017/04/15', '2017/05/15',\n '2017/06/15', '2017/07/15', '2017/08/15', '2017/09/15', '2017/10/15',\n '2017/11/15', '2017/12/15',]\n\n# ---------------------------------------------------------------------------\n# Observer example - test\nobserver = ephem.city('Warsaw')\ndate = ephem.Date('2017/05/22')\nobserver.date = date\nsun = ephem.Sun()\nx = ephem.localtime(observer.previous_rising(sun))\ny = ephem.localtime(observer.next_setting(sun))\n\n# ---------------------------------------------------------------------------\n# Run the loop to collect data about rising and setting of Sun\n# on a given day, in a given city.\n# Keep it in DataFrame with Cities as columns and dates as index.\n\nrises = pd.DataFrame(index=places, columns=dates)\nsettings = pd.DataFrame(index=places, columns=dates)\ntransits = pd.DataFrame(index=places, columns=dates)\n\n\n\nfor place in places:\n for date in dates:\n observer = ephem.city(place)\n ob_data = ephem.Date(date)\n observer.date = ob_data\n sun = ephem.Sun()\n\n a = ephem.localtime(observer.previous_rising(sun)).time()\n b = ephem.localtime(observer.previous_transit(sun)).time()\n c = ephem.localtime(observer.next_setting(sun)).time()\n\n rises.loc[place,date] = a\n transits.loc[place,date] = b\n settings.loc[place,date] = c\n\n\n# ---------------------------------------------------------------------------\n# Bokeh\n\nfrom bokeh.layouts import widgetbox, column\nfrom bokeh.models import ColumnDataSource, CustomJS\nfrom bokeh.models.widgets import Button, CheckboxButtonGroup, Select, Slider\nfrom bokeh.plotting import figure, output_file, show\n\n\n# output file name\noutput_file(\"visuals.html\")\n\n\n# data for plot - classic\nx = places\ny1 = rises.iloc[:,0]\ny2 = transits.iloc[:,0]\ny3 = settings.iloc[:,0]\n\n# create some widgets\n\n\n# select = Select(title=\"Option:\", value=\"Paris\", options=places)\n\n# with ColumnDataSource\nrises['display'] = pd.Series(rises.loc[:,'2017/01/15'], index=rises.index)\ntransits['display'] = pd.Series(transits.loc[:,'2017/01/15'], index=transits.index)\nsettings['display'] = pd.Series(settings.loc[:,'2017/01/15'], index=settings.index)\n\nsource1 = ColumnDataSource(rises)\nsource2 = ColumnDataSource(transits)\nsource3 = ColumnDataSource(settings)\n\nq = figure(plot_width=600, plot_height=500, x_range=places, y_axis_type='datetime')\n\nq.circle(x='index', y='display', size=15, color='navy', legend='Rise', source=source1)\nq.line(x='index', y='display', line_width=2, legend=\"Rise\", source=source1)\n\nq.square(x='index', y='display', size=15, color='orangered', legend='Transit', source=source2)\nq.line(x='index', y='display', line_width=2, color='orangered', legend='Transit', source=source2)\n\nq.circle(x='index', y='display', size=15, color='peru', legend='Setting', source=source3)\nq.line(x='index', y='display', line_width=2, color='peru', legend='Setting', source=source3)\n\nq.legend.location = \"bottom_left\"\nq.legend.click_policy=\"hide\"\n\n\ncallback_slider = CustomJS(args=dict(source1=source1, source2=source2, source3=source3), code=\n \"\"\"\n var data1 = source1.data\n var data2 = source2.data\n var data3 = source3.data\n var f = cb_obj.value\n var x = data1['index']\n var y1 = data1['display']\n var y2 = data2['display']\n var y3 = data3['display']\n if (f < 10 ) {\n date = '2017/0' + f + '/15'\n } else {\n date = '2017/' + f + '/15'\n }\n change1 = data1[date]\n change2 = data2[date]\n change3 = data3[date]\n for(var i = 0; i < x.length; i++) {\n y1[i] = change1[i]\n y2[i] = change2[i]\n y3[i] = change3[i]\n }\n source1.change.emit()\n source2.change.emit()\n source3.change.emit()\n \"\"\"\n )\nslider = Slider(start=1, end=12, value=1, step=1, title=\"Month Slider\")\nslider.js_on_change('value', callback_slider)\n\n#button_group = CheckboxButtonGroup(labels=[\"Rise\", \"Transition\", \"Set\"], active=[0,1,2])\n#button_group.js_on_change('active', callback_button)\n\nshow(column(q, widgetbox(slider, width=600)))\n","sub_path":"ephem_tries.py","file_name":"ephem_tries.py","file_ext":"py","file_size_in_byte":4863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"423289175","text":"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n# \n# \n#\nfrom __future__ import print_function\n\nimport pickle, sys\nfrom optparse import OptionParser\nfrom ConfigParser import ConfigParser\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.svm import SVC\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.utils.multiclass import unique_labels\nfrom sklearn.metrics import precision_recall_fscore_support\nimport preprocessing\nimport corpus.flood\nfrom print_logging import Logger\n\n# @author: Khwunchai J.\n# @date: Fri Nov 28 19:56:15 2014\n\n\n# Configurations\nconfig = ConfigParser()\nconfig.read(\"config.ini\")\n\nn_jobs = config.getint('GENERAL', 'n_jobs')\nn_samples = config.getint('GRID_SEARCH', 'n_samples')\nn_folds = config.getint('GRID_SEARCH', 'n_folds')\nverbose = config.getint('GRID_SEARCH', 'verbose')\nngram_range = eval(config.get('GRID_SEARCH', 'ngram_range'))\nmax_df = config.getfloat('GRID_SEARCH', 'max_df')\nmin_df = config.getfloat('GRID_SEARCH', 'min_df')\n\nlogger = Logger(\"parameter_estimation.log\")\n################################################################################\n\n\ndef save_classifier(grid, clf_name=None):\n if clf_name is None:\n clf_name = str(grid.best_estimator_).split('(')[0].lower()\n \n fname = \"models/classifiers/\" + clf_name\n logger.debug(r'Saving classifier at \"{}\"'.format(fname))\n with open(fname, 'w') as f:\n pickle.dump(grid.best_estimator_, f)\n logger.debug(\"Classifier saved\")\n \n fname = \"models/grids/grid_\" + clf_name\n logger.debug(r'Saving grid search object at \"{}\"'.format(fname))\n with open(fname, 'w') as f:\n pickle.dump(grid, f)\n logger.debug(\"Grid search object saved\")\n\n\ndef grid_search(classifier, X_train, y_train, X_test, y_test, \n parameters, target_names, cv=3, \n verbose=False, n_jobs=n_jobs , save_clf=True): \n logger.info(\"Tuning parameters\")\n grid = GridSearchCV(classifier, param_grid=parameters, cv=cv, \n verbose=verbose, n_jobs=n_jobs)\n \n console = sys.stdout\n sys.stdout = logger\n grid.fit(X_train, y_train)\n sys.stdout = console\n \n logger.info(\"\"\"\n Best parameter set found in training set:\\n\n {}\\n\n with accuracy: {}\n \"\"\".format(grid.best_estimator_,grid.best_score_))\n \n classifier = grid.best_estimator_\n predicted = classifier.predict(X_test)\n \n report = classification_report(y_test, predicted, target_names=target_names)\n cm = confusion_matrix(y_test, predicted)\n \n logger.info(\"Detailed classification report: \\n{}\".format(report.encode('utf-8')))\n logger.info('Confusion matrix:\\n{}'.format(cm))\n \n if save_clf:\n if isinstance(save_clf, basestring):\n save_classifier(grid, save_clf)\n else:\n save_classifier(grid)\n \n return grid\n \n\ndef grid_search_knn(k_range=[1, 2, 3, 4, 5]):\n logger.info(\"k-NN Classifier parameters optimization\")\n parameters = [{'n_neighbors': k_range}]\n logger.info(\"Loading corpus\")\n n_samples = 4000\n data_train = corpus.flood.fetch_data(subset='train', n_samples=n_samples)\n X_train, y_train = data_train.data, data_train.target\n data_test = corpus.flood.fetch_data(subset='test', n_samples=n_samples)\n X_test, y_true = data_test.data, data_test.target\n target_names = data_train.target_names\n preprocessor = preprocessing.get_vectorizer(subset='train')\n logger.info(\"Data loaded\")\n logger.debug(\"Number of training samples: {}\".format(len(X_train)))\n logger.debug(\"Number of test samples: {}\".format(len(X_test)))\n\n logger.info(\"Preprocessing dataset\")\n X_train = preprocessor.transform(X_train)\n X_test = preprocessor.transform(X_test)\n logger.info(\"Features extracted\")\n logger.debug(\"Number of features: {}\".format(X_train.shape[1]))\n\n \n p_scores = []\n r_scores = []\n f_scores = []\n for k in k_range:\n classifier = KNeighborsClassifier(n_neighbors=k)\n classifier.fit(X_train, y_train)\n y_pred = []\n for chunk in X_test:\n y_pred.append(classifier.predict(chunk.toarray())[0])\n labels = unique_labels(y_true, y_pred)\n p, r, f1, _ = precision_recall_fscore_support(y_true, y_pred,\n labels=labels)\n p_scores.append(p)\n r_scores.append(r)\n f_scores.append(f1)\n \n pickle.dump(p_scores, open('p_scores', 'w'))\n pickle.dump(r_scores, open('r_scores', 'w'))\n pickle.dump(f_scores, open('f_scores', 'w'))\n \n plt.figure()\n plt.title(\"k-NN Grid-search\")\n plt.xlabel(\"k\")\n plt.ylabel(\"Performance\")\n\n r_scores = np.array(r_scores).reshape(-1, 3)\n p_scores = np.array(p_scores).reshape(-1, 3)\n f_scores = np.array(f_scores).reshape(-1, 3)\n\n plt.plot(k_range, f_scores[:,0], 'r', label=\"Situation\")\n plt.plot(k_range, f_scores[:,1], 'g', label=\"Help request\")\n plt.plot(k_range, f_scores[:,2], 'c', label=\"Other\")\n \n plt.legend(loc=\"best\")\n plt.axis('tight')\n plt.ylim((0, 1.01))\n plt.savefig('../docs/reports/grid_search_knn.png', bbox_inches='tight')\n plt.show()\n\n\ndef grid_search_svc(kernel='linear', param_grid=None):\n if param_grid is None:\n C_range = 2. ** np.arange(-5, 16, 4)\n gamma_range = 2. ** np.arange(-15, 6, 4)\n degree_range = np.arange(1, 5)\n if kernel == 'linear':\n param_grid = [{'C': C_range}] \n elif kernel == 'rbf':\n param_grid = [{'C': C_range,'gamma': gamma_range}]\n elif kernel == 'poly':\n param_grid = [{'C': C_range, 'degree': degree_range}]\n elif kernel == 'sigmoid':\n param_grid = [{'C': C_range,'gamma': gamma_range}]\n\n logger.info(\"Optimizing parameters for {} kernel SVC\".format(kernel))\n logger.info(\"Loading flood data\")\n data_train = corpus.flood.fetch_data(subset='train', n_samples=n_samples)\n X_train, y_train = data_train.data, data_train.target\n data_test = corpus.flood.fetch_data(subset='test', n_samples=n_samples)\n X_test, y_test = data_test.data, data_test.target\n target_names = data_train.target_names\n preprocessor = preprocessing.get_vectorizer(subset='train')\n logger.info(\"Data loaded\")\n logger.debug(\"Number of training samples: {}\".format(len(X_train)))\n logger.debug(\"Number of test samples: {}\".format(len(X_test)))\n \n logger.info(\"Preprocessing dataset\")\n X_train = preprocessor.transform(X_train)\n X_test = preprocessor.transform(X_test)\n logger.info(\"Features extracted\")\n logger.debug(\"Number of features: {}\".format(X_train.shape[1]))\n\n logger.info(\"Start optimizing parameters\")\n logger.debug(\"The parameters are: \" + str(param_grid))\n clf_name = 'svc_' + kernel\n grid = grid_search(SVC(kernel=kernel), X_train, y_train, X_test, y_test, \n target_names=target_names, parameters=param_grid, \n save_clf=clf_name, verbose=verbose)\n logger.debug(\"Optimization completed\")\n return grid\n \n \nif __name__ == \"__main__\":\n #gamma_range = 2. ** np.arange(-5, 15)\n #C_range = 2. ** np.arange(-15, 3)\n #grid_search_svc(kernel='rbf', param_grid=[{'C': C_range, 'gamma': gamma_range}])\n k_range = range(1, 40, 4)\n grid_search_knn(k_range)\n'''\n op = OptionParser()\n op.add_option('--estimator',\n action='store', dest='estimator',\n help='Estimator to optimize parameters')\n op.add_option('--kernel',\n action='store', dest='kernel', default=None,\n help='Kernel for Support Vector Machines classifier')\n op.add_option('--n_neighbors', type=int, default=5,\n action='store', dest='n_neighbors',\n help='Number of neighbors in k-nearest neighbors classifier')\n op.add_option('--n_samples', type=int, default=-1,\n action='store', dest='n_samples',\n help='Number of samples')\n \n (opts, args) = op.parse_args()\n \n if opts.n_samples > 0:\n n_samples = opts.n_samples\n\n if opts.estimator == 'svc':\n if opts.kernel is None:\n grid_search_svc()\n else:\n grid_search_svc(kernel=opts.kernel)\n elif opts.estimator == 'knn':\n if opts.kernel is None:\n grid_search_knn()\n else:\n grid_search_knn(k_range=[opts.n_neighbors])\n else:\n grid_search_svc()\n'''\n","sub_path":"parameter_estimation.py","file_name":"parameter_estimation.py","file_ext":"py","file_size_in_byte":8595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"587715552","text":"#Faça um programa em Pyhton que leia um número digitado\r\n#pelo usuário. Se este for maior que 8, calcular a divisão por 2\r\n#do valor, senão (se for menor ou igual que oito) calcular o\r\n#cubo do valor. Visualize o resultado em cada caso.\r\n\r\nnum = int(input(\" Digite um numero inteiro: \"))\r\n\r\nif num > 8:\r\n print(num,\"/2 = \",num/2)\r\n \r\nelse:\r\n print(num,\" **3 = \", num**3)\r\n","sub_path":"aula 09/exe5_4.py","file_name":"exe5_4.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"424352100","text":"import numpy as np\nimport h5py\nimport datetime\nimport sys\nimport os\nimport matplotlib.pyplot as plt \n\ndef val2ind(val, an_array):\n return np.argmin(abs(an_array-val), axis=0)\n\nclass sample_info(object):\n \"\"\"\n A simple class that contains sample info.\n\n Arguments:\n ----------\n\n sample_id : A string that identifies the sample, spaces will be\n substituted for underscores.\n Specifying this is mandatory.\n \n sample_meta_data : More verbose description of the data. Having this \n structured isn't a bad idea, but not enforced at this \n level. Not requiered but highly encouraged.\n\n sample_date : The date at which the data was taken. Default is \n today.\n\n Attributes:\n -----------\n\n show(out) : prints the contents of id, meta data and date to out.\n If out is None, it defaults to sys.stdout \n\n Examples:\n ---------\n\n si = sample_info(sample_id = 'C_elegans', \n sample_date='2004_04_18', \n sample_meta_data='Some details.')\n si.show() \n\n \"\"\"\n def __init__(self, sample_id=\"Unknown\", sample_meta_data=\"None\", sample_date=None):\n assert ' ' not in sample_id\n self.sample_id = sample_id\n\n self.sample_meta_data = sample_meta_data\n if sample_date is None:\n sample_date = str(datetime.date.today().year)+ \\\n '_'+str(datetime.date.today().month)+ \\\n '_'+str(datetime.date.today().day)\n\n self.sample_date = sample_date\n\n def show(self, out=None):\n if out is None:\n out = sys.stdout\n print(\"Sample_id : %s \"%self.sample_id, file=out)\n print(\"Sample date : %s \"%self.sample_date, file=out )\n print(\"Sample description:\\n \", self.sample_meta_data, file=out )\n\n\nclass ir_map(object):\n \"\"\" A simple data object that contains IR data.\n\n Arguments:\n ----------\n wavenumbers : An array of wavenumbers\n\n sample_info : A sample_info object\n \n filename : The hdf5 filename where data will be read from. Required for \n the 'hdf5' mode.\n\n data_type : Either 'transmission', 'reflection', or 'absorbance'(default)\n\n _mode : Choice between 'memory' or 'hdf5'\n If 'memory', the dataset currenly resides in memory\n If 'hdf5', the dataset currenly resides in an hdf5 file \n\n _N_obs : The number of rows in the 2D spectral matrix. \n\n Attributes:\n -----------\n As above plus\n\n add_data(self, spectrum, xy) : add data, provide a numpy array of spectra\n and xy positions \n\n _allocate_space(self) : this is a function that allocates space for\n an hdf5 file. no need to call it yourself.\n\n write_as_hdf5(self, filename) : Writes in-memory data as an hdf5 file.\n\n \"\"\"\n\n def __init__(self, \n wavenumbers = [],\n sample_info = sample_info(), \n filename = None, \n data_type = 'absorbance',\n with_image_cube = False, \n with_factorization = False):\n self.wavenumbers = wavenumbers\n self.N_w = len(wavenumbers)\n self.sample_info = sample_info\n self._h5_filename = filename\n self.xy = np.empty( (0,2) )\n self.data = np.empty( (0,self.N_w) )\n self._N_obs = 0\n self._with_image_cube = with_image_cube\n self._with_factorization = with_factorization\n \n assert data_type in ['transmission', 'reflection', 'absorbance']\n self.data_type = data_type\n \n if self._h5_filename is not None: # hdf5 mode, load data from hdf5 file\n self._mode = 'hdf5'\n \n self._h5= h5py.File(self._h5_filename,'r')\n with self._h5:\n self._root = list(self._h5.keys())[0] #get sample root group name\n self.sample_info.sample_id = self._h5[self._root+'/info/sample_id'][()]\n self.sample_info.sample_meta_data = self._h5[self._root+'/info/sample_meta_data'][()]\n self.sample_info.sample_date = self._h5[self._root+'/info/sample_date'][()]\n else: # memory mode, data is in memory\n self._mode = 'memory'\n self._h5 = None # this stays None until we allocate space \n \n# assert self.sample_info.sample_id != \"Unknown\", \"Missing 'sample_info' keyword parameter\" \n self._root = str(self.sample_info.sample_id) + '_' + str(self.sample_info.sample_date)\n\n def add_data(self, spectrum=None, xy=None, ind=[]):\n \"\"\"When working in memory mode, append the data in memory into the ir_map object.\n \n When working in hdf5 mode, load the data from the hdf5 file. \n \n Arguments:\n \n ----------\n spectrum : The 2D spectral data matrix \n \n xy : An array of xy positions\n \n ind : An array of indices for selecting specific rows in the 2D spectra matrix. \n If it is not given(default), full spectra matrix with all data points are loaded \n into the ir_map object, 1D int array\n \n \"\"\" \n if self._mode == 'memory':\n assert spectrum is not None, \"please provide a spectrum matrix\"\n assert xy is not None, \"please provide a xy position array\"\n \n if len(ind) == 0:\n self.xy = np.append(self.xy, xy, axis = 0)\n self.data = np.append(self.data, spectrum, axis = 0)\n else:\n self.xy = np.append(self.xy, xy[ind,:], axis = 0)\n self.data = np.append(self.data, spectrum[ind,:], axis = 0)\n \n if self._mode == 'hdf5':\n self._h5= h5py.File(self._h5_filename,'r')\n with self._h5:\n self.wavenumbers = self._h5[self._root+'/data/wavenumbers'][:]\n if len(ind) == 0:\n self.data = self._h5[self._root+'/data/spectra'][:,:]\n self.xy = self._h5[self._root+'/data/xy'][:,:]\n else:\n self.data = self._h5[self._root+'/data/spectra'][ind,:]\n self.xy = self._h5[self._root+'/data/xy'][ind,:] \n\n def add_image_cube(self, imageCube=None, imageMask=None, image_grid_param=None, ind=[]):\n \"\"\"When working in memory mode, load the image cube data in memory into the ir_map object\n and flatten the image cube to 2d spectrum matrix.\n \n When working in hdf5 mode, load the data from the hdf5 file and flatten the image cube \n to 2d spectrum matrix. \n \n Arguments:\n \n ----------\n imageCube : The spectral image cube, 3D float array \n\n imageMask : An image mask where non-blank pixels = True, 2D bool array\n \n image_grid_param : [x0, y0, dx, dy], 1D float list or array\n \n ind : An array of indices for selecting wavenumber range. If it is not given(default), \n full spectrum are loaded into the ir_map object, 1D int array\n \"\"\" \n self._with_image_cube = True\n \n if self._mode == 'memory':\n assert imageCube is not None, \"please provide an image cube\"\n assert imageMask is not None, \"please provide an image mask matrix\"\n assert image_grid_param is not None, \"please provide image grid parameters : [x0, y0, dx, dy]\"\n \n self.imageMask = imageMask\n self.image_grid_param = image_grid_param\n self.N_y, self.N_x = imageMask.shape[0], imageMask.shape[1]\n \n if len(ind) == 0:# read in full spectra\n self.imageCube = imageCube\n else:# read in partial spectra\n self.imageCube = imageCube[:,:,ind]\n assert len(ind) <= len(self.wavenumbers), \"The selected wavenumber indices is longer than the full wavenumber range\"\n self.wavenumbers = self.wavenumbers[ind]\n self.N_w = len(self.wavenumbers)\n # convert image cube to 2d data matrix and load the data into self.data, self.xy\n self.flatten_image_cube(imageCube, imageMask, image_grid_param)\n \n if self._mode == 'hdf5':\n self._h5= h5py.File(self._h5_filename,'r')\n with self._h5:\n self.imageMask = self._h5[self._root+'/data/image/image_mask'][:,:]\n self.image_grid_param = self._h5[self._root+'/data/image/image_grid_param'][:]\n self.wavenumbers = self._h5[self._root+'/data/wavenumbers'][:]\n if len(ind) == 0:# read in full spectra\n self.imageCube = self._h5[self._root+'/data/image/image_cube'][:,:,:]\n else:# read in partial spectrum\n self.imageCube = self._h5[self._root+'/data/image/image_cube'][:,:,ind] \n assert len(ind) <= len(self.wavenumbers), \"The selected wavenumber indices is longer than the full wavenumber range\"\n self.wavenumbers = self.wavenumbers[ind]\n self.N_w = len(self.wavenumbers)\n # convert image cube to 2d data matrix and load the data into self.data, self.xy \n self.flatten_image_cube(self.imageCube, self.imageMask, self.image_grid_param)\n \n def add_factorization(self, component=None, component_coef=None, prefix='PCA', ind=[]):\n \"\"\"When working in memory mode, load the factorized components data in memory into the ir_map object.\n \n When working in hdf5 mode, load the data from the hdf5 file. \n \n Arguments:\n \n ----------\n component : The spectral image cube, 3D float array \n\n component_coef : An image mask where non-blank pixels = True, 2D bool array\n \n prefix : Name of the factorized components, e.g. PCA, MCR\n \n ind : An array of indices for selecting specific rows in the 2D spectra matrix. \n If it is not given(default), full spectra matrix with all data points are loaded \n into the ir_map object, 1D int array\n \"\"\" \n self._with_factorization = True\n self._factor_prefix = prefix + '_'\n \n if self._mode == 'memory':\n assert component is not None, \"please provide a component matrix\"\n assert component_coef is not None, \"please provide a component_coef matrix\"\n \n self.component = component\n self.N_component = component.shape[0]\n if len(ind) == 0:# read in all data\n self.component_coef = component_coef\n else:# read in partial data points\n self.component_coef = component_coef[ind,:]\n \n if self._mode == 'hdf5':\n self._h5= h5py.File(self._h5_filename,'r')\n with self._h5:\n self.component = self._h5[self._root + '/data/factorization/' + self._factor_prefix + 'component'][:,:]\n self.N_component = self.component.shape[0]\n if len(ind) == 0: # read in all data\n self.component_coef = self._h5[self._root + '/data/factorization/' + self._factor_prefix + 'component_coef'][:,:]\n else: # read in partial data points\n self.component_coef = self._h5[self._root + '/data/factorization/' + self._factor_prefix + 'component_coef'][ind,:]\n # check the component dimensions match self.data dimensions\n assert self.component.shape[1] == self.data.shape[1], \"number of wavenumbers in component does not match that of spectra matrix\"\n assert self.component_coef.shape[0] == self.data.shape[0], \"number of rows in component_coef does not match that of spectra matrix\"\n \n def _allocate_space(self):\n \n self._h5= h5py.File(self._h5_filename,'w')\n data_group = self._h5.create_group( self._root )\n data_group.create_dataset( 'data/xy', \n (self._N_obs, 2), \n dtype='float32') # we just allocate space\n data_group.create_dataset('data/wavenumbers', \n data = self.wavenumbers, \n dtype='float32') # this we can keep in memory\n data_group.create_dataset('data/spectra', \n (self._N_obs,self.N_w), \n dtype='float32') # we just allocate space\n\n dt = h5py.special_dtype(vlen=str)\n data_group.create_dataset('info/sample_id',\n data = self.sample_info.sample_id,\n dtype= dt )\n\n data_group.create_dataset('info/sample_meta_data',\n data = self.sample_info.sample_meta_data,\n dtype= dt )\n\n data_group.create_dataset('info/sample_date',\n data = self.sample_info.sample_date,\n dtype= dt ) \n if self._with_image_cube:\n data_group.create_dataset('data/image/image_cube', \n (self.N_y, self.N_x, self.N_w), \n dtype='float32') # we just allocate space\n data_group.create_dataset('data/image/image_mask', \n (self.N_y, self.N_x), \n dtype='bool') # we just allocate space\n data_group.create_dataset('data/image/ind_rc_map', \n (self._N_obs, 3), \n dtype='int') # we just allocate space\n data_group.create_dataset('data/image/image_grid_param', \n data = self.image_grid_param,\n dtype='float32') # this we keep in memory\n \n if self._with_factorization:\n data_group.create_dataset('data/factorization/' + self._factor_prefix + 'component', \n (self.N_component, self.N_w), \n dtype='float32') # we just allocate space\n data_group.create_dataset('data/factorization/' + self._factor_prefix + 'component_coef', \n (self._N_obs, self.N_component), \n dtype='float32') # we just allocate space\n\n def write_as_hdf5(self, filename):\n \"\"\"Save the object out as an hdf5 file. \n \n Arguments:\n \n ----------\n filename : The hdf5 filename where data will be written to. \n \n \"\"\"\n # prevent overwriting the existing hdf5 files \n assert self._h5_filename != filename, \\\n \"The given hdf5 filename already exists. Please provide a different filename\"\n \n self._h5_filename = filename\n self._N_obs = self.data.shape[0]\n self._allocate_space( )\n \n with self._h5:\n self._h5[self._root + '/data/xy'][:,:] = self.xy\n self._h5[self._root + '/data/spectra'][:,:] = self.data\n \n if self._with_image_cube: #save image cube\n self._h5[self._root + '/data/image/image_cube'][:,:,:] = self.imageCube\n self._h5[self._root + '/data/image/image_mask'][:,:] = self.imageMask\n self._h5[self._root + '/data/image/ind_rc_map'][:,:] = self.ind_rc_map\n self._h5[self._root + '/data/image/image_grid_param'][:] = self.image_grid_param\n \n if self._with_factorization: #save factorization\n self._h5[self._root + '/data/factorization/' + self._factor_prefix +'component'][:,:] = self.component\n self._h5[self._root + '/data/factorization/' + self._factor_prefix +'component_coef'][:,:] = self.component_coef\n \n print(f'Data is saved as an HDF5 file. Filename : {filename}')\n \n def to_image_cube(self, N_x=64, N_y=64, x0=0, y0=0, dx=1, dy=1):\n \"\"\"Transform the spectra matrix to 3D image cube. \n \n The third dimension of the image cube is the spectra data. \n \n Arguments:\n ----------\n (x0, y0) : The starting location of the ir image. \n \n (N_x, N_y) : The size of the image.\n \n (dx, dy) : The step size of the image.\n \n Returns:\n --------\n imageCube : The spectral image cube, 3D float array \n\n imageMask : An image mask where non-blank pixels = True, 2D bool array\n \n pointCounts : A mask that shows measurement counts in each pixel, 2D int array\n \"\"\"\n \n self._with_image_cube = True\n \n self.N_x = N_x\n self.N_y = N_y\n self.image_grid_param = np.array([x0, y0, dx, dy], dtype='float32')\n \n self.imageCube = np.zeros((self.N_y, self.N_x, self.N_w), dtype='float32')\n self.pointCounts = np.zeros((self.N_y, self.N_x), dtype='int')\n ind_rc_map = np.zeros((self.xy.shape[0], 3), dtype='int') # ind to row-col mapping [i, row, col]\n \n x = np.arange(self.N_x)*dx + x0\n y = np.arange(self.N_y)*dy + y0\n \n for i in range(self.xy.shape[0]):\n ind_rc_map[i, 0] = i\n ind_rc_map[i, 1] = val2ind(self.xy[i, 1], y) # align y coordinate to get row\n ind_rc_map[i, 2] = val2ind(self.xy[i, 0], x) # align x coordinate to get col\n self.pointCounts[ind_rc_map[i, 1], ind_rc_map[i, 2]] += 1 # count how many measurements fall in a pixel \n self.imageCube[ind_rc_map[i, 1], ind_rc_map[i, 2], :] += self.data[i,:]# add all spectra that fall in a pixel\n \n self.imageCube /= np.where(self.pointCounts != 0, self.pointCounts,1)[:,:,np.newaxis]# get average spectra per pixel\n self.imageMask = self.pointCounts.astype('bool') # convert to boolean matrix\n self.ind_rc_map = ind_rc_map\n \n return self.imageCube, self.imageMask, self.pointCounts\n \n def flatten_image_cube(self, imageCube, imageMask, image_grid_param):\n \"\"\"Transform a 3D image cube into a spectra matrix using imageMask to filter out blank data points, \n and load the matrix into self.data, load the xy positions of the data points into self.xy\n \n Arguments:\n ----------\n imageCube : The spectral image cube, 3D float array \n\n imageMask : An image mask where non-blank pixels = True, 2D bool array\n \n image_grid_param : [x0, y0, dx, dy], 1D float list or array\n \"\"\"\n self.N_x = N_x = imageCube.shape[1]\n self.N_y = N_y = imageCube.shape[0]\n x0 = image_grid_param[0]\n y0 = image_grid_param[1]\n dx = image_grid_param[2]\n dy = image_grid_param[3]\n # set up xy grid and use imageMask to pull out non-blank pixel xy-coordinate\n x = np.linspace(x0, x0+dx*(N_x-1), N_x)\n y = np.linspace(y0, y0+dy*(N_y-1), N_y)\n xv, yv = np.meshgrid(x, y)\n xy_grid = np.zeros((N_y, N_x, 2))\n xy_grid[:,:,0] = xv\n xy_grid[:,:,1] = yv\n \n # set up image grid and use imageMask to pull out non-blank pixel row, col position\n x = np.arange(N_x)\n y = np.arange(N_y)\n X, Y = np.meshgrid(x,y)\n ind_rc_map = np.zeros((len(X[imageMask]), 3), dtype='int') # ind to row-col mapping [i, row, col]\n for i, (r,c) in enumerate(zip(Y[imageMask], X[imageMask])):\n ind_rc_map[i,:] = [i, r, c]\n \n self.xy = xy_grid[imageMask,:]\n self.data = imageCube[imageMask,:]\n self.ind_rc_map = ind_rc_map\n\nif __name__ == \"__main__\":\n si = sample_info(sample_id = 'C_elegans')\n si.show() \n #prespare sample data\n np.random.seed(3)\n N_wav = 100\n N_obs = 100\n waves = np.linspace(500,4000,N_wav)\n data = np.random.uniform(0,1, (N_obs, N_wav) ) \n xy = np.random.uniform(-5,5, (N_obs, 2) )\n imageCube = data.reshape(-1, 10, N_wav)\n imageMask = np.random.random((N_obs//10, 10)) > 0.5\n x0, y0, dx, dy = 0, 0, 1, 1\n image_grid_param = [x0, y0, dx, dy]\n component = np.random.random((3, N_wav))\n component_coef = np.random.random((imageMask.sum(), 3))\n # test loading 2d spectra matrix and writing into hdf5 file\n ir_data = ir_map( waves, si)\n ir_data.add_data( data, xy )\n ir_data.write_as_hdf5('tst_file.h5')\n \n # test loading 2d spectra matrix from a hdf5 file\n ir_data2 = ir_map(filename ='tst_file.h5' )\n ir_data2.add_data() #load full data matrix\n print(ir_data2.data.shape)\n ir_data2.add_data(ind=np.arange(20)) #load partial data matrix\n print(ir_data2.data.shape)\n os.remove('tst_file.h5')\n \n # test loading image cube data \n ir_data3 = ir_map( waves, si) \n ir_data3.add_image_cube(imageCube, imageMask, image_grid_param)\n assert ir_data3.data.shape[0] == imageMask.sum(), \"number of rows in ir_data3.data doesn't match non-blank pixels in imageMask\"\n r , c= np.where(imageMask)\n assert np.all(ir_data3.xy[:,0] == c), \"x coordinates in ir_data3.xy doesn't match that of non-blank pixels in imageMask\"\n assert np.all(ir_data3.xy[:,1] == r), \"y coordinates in ir_data3.xy doesn't match that of non-blank pixels in imageMask\"\n \n # loading factorization component\n ir_data3.add_factorization(component, component_coef)\n \n # writing into hdf5 file\n ir_data3.write_as_hdf5('tst_file2.h5')\n # show hdf5 file dataset structure\n lst=[]\n with h5py.File('tst_file2.h5','r') as f:\n root_name = list(f.keys())[0]\n h = f[root_name]\n f.visit(lst.append)\n ind_rc_map = f[root_name + '/data/image/ind_rc_map'][:,:]\n print(*lst, sep='\\n')\n print(ind_rc_map[:20, :])\n plt.imshow(imageMask)\n \n # test loading image cube and factorization components from a hdf5 file\n ir_data4 = ir_map(filename='tst_file2.h5')\n ir_data4.add_image_cube()\n ir_data4.add_factorization()\n print(ir_data4.data.shape)\n print(ir_data4.component.shape)\n print(ir_data4.component_coef.shape)\n \n os.remove('tst_file2.h5')\n \n print('OK')\n","sub_path":"lbl_ir/lbl_ir/data_objects/ir_map.py","file_name":"ir_map.py","file_ext":"py","file_size_in_byte":22675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"338137627","text":"import ast\nimport tensorflow as tf\nfrom tensorflow.contrib.slim.nets import vgg\nimport numpy as np\nimport os\nimport time\nimport cv2\n\n\nslim = tf.contrib.slim\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ['CUDA_VISIBLE_DEVICES'] = '3'\n\nwith open(\"./data/flower.txt\", \"r\") as f:\n data = f.read()\n images_dicts = ast.literal_eval(data)\n\n\ndef get_image(filename):\n _R_MEAN = 123.68\n _G_MEAN = 116.78\n _B_MEAN = 103.94\n image = cv2.imread(filename)\n image = np.array(cv2.resize(image, (224, 224))).reshape([1, 224, 224, 3]).astype(np.float)\n image[:, :, 0] -= _R_MEAN\n image[:, :, 1] -= _G_MEAN\n image[:, :, 2] -= _B_MEAN\n return image\n\n\nwith tf.Graph().as_default():\n x = tf.placeholder(\"float32\", [None, 224, 224, 3])\n with slim.arg_scope(vgg.vgg_arg_scope()):\n logits, end_points = vgg.vgg_16(\n x, num_classes=5, is_training=False)\n print(end_points)\n pred = tf.argmax(end_points[\"vgg_16/fc8\"], 1)\n print(pred.shape)\n saver = tf.train.Saver()\n load_fn = slim.assign_from_checkpoint_fn(\"./ckpt/model.ckpt\", slim.get_model_variables())\n with tf.Session() as sess:\n load_fn(sess)\n image = get_image(\"./data/flower_photos/sunflowers/3.jpg\")\n img = cv2.imread(\"./data/flower_photos/sunflowers/3.jpg\")\n t1 = time.time()\n prob = sess.run(pred, feed_dict={x: image})\n print(images_dicts[prob[0]])\n cv2.putText(img, images_dicts[prob[0]], (0, 50), cv2.FONT_HERSHEY_COMPLEX, 1.0, (100, 200, 200), 2)\n print(time.time()-t1)\n cv2.imshow(\"hh\", img)\n cv2.waitKey()\n cv2.destroyAllWindows()\n","sub_path":"code/finetune/image_demo.py","file_name":"image_demo.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"78777267","text":"import scipy.stats as stats\nimport pandas as pd \nimport numpy as np\nfrom statsmodels.formula.api import ols\nfrom statsmodels.stats.anova import anova_lm\nimport matplotlib.pyplot as plt \nimport urllib.request\n\n\nurl = 'https://raw.githubusercontent.com/pykwon/python/master/testdata_utf8/group3.txt'\ndata = np.genfromtxt(urllib.request.urlopen(url), delimiter=',')\nprint(data)\n\n#그룹별(3개) 네과목 시험점수 차이 검정\n# 귀무 : 그룹별 네과목 시험점수는 차이가 없다 \n# 대립 : 그룹별 네과목 시험점수의 차이는 있다.\n\n#data2 = pd.read_csv(urllib.request.urlopen(url))\n#print(data2)\n\ngr1 = data[data[:,1] == 1, 0]\n#print(grl)\ngr2 = data[data[:,1] == 2, 0]\ngr3 = data[data[:,1] == 3, 0]\n\nprint(stats.shapiro(gr1)[1]) #0.3336853086948395 > 0.05 정규성을띔\nprint(stats.shapiro(gr2)[1]) #0.6561065912246704 > 0.05 정규성을띔\nprint(stats.shapiro(gr3)[1]) #0.832481324672699 > 0.05 정규성을띔\n\n# 그룹 간 데이터 들의 분포를 시각화 \n#plot_data = [gr1,gr2,gr3]\n#plt.boxplot(plot_data)\n#plt.show()\n\nf_statistic, p_val = stats.f_oneway(gr1,gr2,gr3)\nprint('일원분산분석 결과 : f_statistic:%f , p_val:%f'%(f_statistic,p_val))\n# 일원분산분석 결과 : f_statistic:3.711336 , p_val:0.043589 <0.05 이므로 귀무기각\n \n# 그룹별 (3개) 시험점수는 차이가 있다 라는 의견이 통계적으로 유의하다\n\n#일원분산분석 방법2 - Linear Model 을 속성으로 사용 \ndf = pd.DataFrame(data, columns = ['value','group'])\n#print(df)\nlmodel = ols('value ~ C(group)', df).fit() # C(그룹칼럼..) : 범주형임을 명시적으로 표시 PR(>F)=p-value 0.043589\nprint(anova_lm(lmodel))\n\n\n#이원분산분석 : 집단 구분 요인2\nurl = 'https://raw.githubusercontent.com/pykwon/python/master/testdata_utf8/group3_2.txt'\ndata = pd.read_csv(url)\nprint(data.head(3))\nprint(data.tail(3))\n\n#귀무 : 관측자와 태아수 그룹에 따라 태아의 머리둘레에 차이가 없다. \n#대립 : 관측자와 태아수 그룹에 따라 태아의 머리둘레에 차이가 있다. \n\n# 시각화 \nplt.rc('font', family = 'malgun gothic')\ndata.boxplot(column = '머리둘레' , by='태아수' , grid = True)\n#plt.show() # 태아의 머리둘레는 차이가 있어 보임 . 관측자와 상호 작용이 있는지 분산분석으로 검정\nformula = '머리둘레 ~ C(태아수) + C(관측자수) + C(태아수):C(관측자수)'\nlm = ols(formula = formula, data = data).fit() #학습한 결과를가지고 객체를 만들어냄\nprint(anova_lm(lm)) # 아노바\n\n# C(태아수) 해석 : 1.051039e-27 < 0.05 이므로 머리둘레에 차이가 있다.\n\n# C(관측자수) 해석 : 6.497055e-03 < 0.05 이므로 머리둘레에 차이가 있다. \n\n# C(태아수):C(관측자수) 해석 : 3.295509e-01 > 0.05 이므로 머리둘레에 차이가 없다.\n \n# 결과 : 관측자수와 태아수는 머리둘레에 영향을 미치나 , 관측자수와 태아수에 상호 작용에 의한 영향은 없다.\n\nprint()\nformula2 = '머리둘레 ~ C(태아수) + C(관측자수)' \nlm2 = ols(formula = formula2, data = data).fit() # 상호작용 X\n\nprint(anova_lm(lm2)) \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"py_hypo/pack1/hyp12anova.py","file_name":"hyp12anova.py","file_ext":"py","file_size_in_byte":3181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"613882385","text":"from .models import Alarm\n\n\ndef check_alarms():\n \"\"\"\n Esta funcion se encarga de checkear el estado de todas las alarmas\n configuradas en el sistema.\n \"\"\"\n alarms = Alarm.objects.all()\n for alarm in alarms:\n alarm.check_alarm()\n alarm.save()\n","sub_path":"back/investSimulator/alarms/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"108579151","text":"'''\r\nCreated on Oct 13, 2018\r\n\r\n@author: pure\r\n'''\r\nimport tensorflow as tf\r\n\r\ndef convolution_2d(in_data_4d_tensor, weight_4d_tensor, bias_int, conv_strides_4arr, padding_type_str, relu_bool, name_str):\r\n new_name = name_str + '/conv'\r\n conv = tf.nn.conv2d(in_data_4d_tensor, weight_4d_tensor, strides=conv_strides_4arr, padding=padding_type_str, name=new_name)\r\n \r\n new_name = new_name + '/bias'\r\n conv_add_bias = tf.nn.bias_add(conv, bias_int, name=new_name)\r\n \r\n if relu_bool == True:\r\n new_name = new_name + '/relu'\r\n return tf.nn.relu(conv_add_bias, name=new_name)\r\n else:\r\n return conv_add_bias","sub_path":"TensorFlow/Semantic-Segmentation/FCN_Basic/ubuntu/function_wrapper.py","file_name":"function_wrapper.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"433618917","text":"\"\"\"\n@brief test log(time=20s)\n\nskip this test for regular run\n\"\"\"\n\nimport sys\nimport os\nimport unittest\n\n\ntry:\n import src\nexcept ImportError:\n path = os.path.normpath(\n os.path.abspath(\n os.path.join(\n os.path.split(__file__)[0],\n \"..\",\n \"..\")))\n if path not in sys.path:\n sys.path.append(path)\n import src\n\ntry:\n import pyquickhelper as skip_\nexcept ImportError:\n path = os.path.normpath(\n os.path.abspath(\n os.path.join(\n os.path.split(__file__)[0],\n \"..\",\n \"..\",\n \"..\",\n \"pyquickhelper\",\n \"src\")))\n if path not in sys.path:\n sys.path.append(path)\n if \"PYQUICKHELPER\" in os.environ and len(os.environ[\"PYQUICKHELPER\"]) > 0:\n sys.path.append(os.environ[\"PYQUICKHELPER\"])\n import pyquickhelper as skip_\n\n\nfrom src.pymyinstall.packaged import find_module_install\nfrom pyquickhelper.loghelper import fLOG\nfrom pyquickhelper.pycode import get_temp_folder\n\n\nclass TestDownloadPyOpenGL(unittest.TestCase):\n\n def test_install_pyopengl(self):\n fLOG(\n __file__,\n self._testMethodName,\n OutputPrint=__name__ == \"__main__\")\n\n if sys.platform.startswith(\"win\"):\n temp = get_temp_folder(__file__, \"temp_download_pyopengl\")\n m = find_module_install(\"OpenGL\")\n exe = m.download(\n temp_folder=temp,\n file_save=os.path.join(\n temp,\n \"out_page.html\"), source=\"2\")\n self.assertTrue(os.path.exists(exe))\n if \"accelerate\" in m.name:\n raise Exception(m.name)\n down = os.listdir(temp)\n if len(down) != 1:\n raise Exception(down)\n if \"accelerate\" in down[0]:\n raise Exception(down[0])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"_unittests/ut_install/test_download_pyopengl.py","file_name":"test_download_pyopengl.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"454300456","text":"from sklearn.datasets import fetch_20newsgroups\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn import metrics\n\nfrom sklearn.svm import SVC\n\nclf = SVC(kernel='linear')\ntwenty_train = fetch_20newsgroups(subset='train', shuffle=True)\ntwenty_test = fetch_20newsgroups(subset='test', shuffle=True)\n\n\n# General\ntfidf_Vect = TfidfVectorizer()\nX_train_tfidf = tfidf_Vect.fit_transform(twenty_train.data)\nclf.fit(X_train_tfidf, twenty_train.target)\nX_test_tfidf = tfidf_Vect.transform(twenty_test.data)\npredicted = clf.predict(X_test_tfidf)\nscore = metrics.accuracy_score(twenty_test.target, predicted)\nprint(\"Accuracy score of general tdfidf vector \")\nprint(score)\n\n\n# Bigram\ntfidf_Vect1 = TfidfVectorizer(ngram_range=(1, 2))\nX_train_tfidf1 = tfidf_Vect1.fit_transform(twenty_train.data)\nclf.fit(X_train_tfidf1, twenty_train.target)\nX_test_tfidf1 = tfidf_Vect1.transform(twenty_test.data)\npredicted1 = clf.predict(X_test_tfidf1)\nscore = metrics.accuracy_score(twenty_test.target, predicted1)\nprint(\"Accuracy score bigram condition tdfidf vector \")\nprint(score)\n\n# stopwords\ntfidf_Vect2 = TfidfVectorizer(stop_words='english')\nX_train_tfidf2 = tfidf_Vect2.fit_transform(twenty_train.data)\nclf.fit(X_train_tfidf2, twenty_train.target)\nX_test_tfidf2 = tfidf_Vect2.transform(twenty_test.data)\npredicted2 = clf.predict(X_test_tfidf2)\nscore = metrics.accuracy_score(twenty_test.target, predicted2)\nprint(\"Accuracy score with stopwords condition tdfidf vector \")\nprint(score)","sub_path":"ICp 7/Source/tokens.py","file_name":"tokens.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"149851814","text":"from rsf.proj import *\nimport os\n\n####################### EDIT TO CHOOSE BETWEEN ########################\n################ MADAGASCAR AND STANDALONE INSTALLS ###################\n#asg = 'asg'\n#stdmdl='standardmodel'\n################################# OR ##################################\nCWPROOT = os.getenv('CWPROOT')\nsunull = os.path.join(CWPROOT,'bin/sunull')\nsushw = os.path.join(CWPROOT,'bin/sushw')\nsuchw = os.path.join(CWPROOT,'bin/suchw')\nsugain = os.path.join(CWPROOT,'bin/sugain')\nsuwind = os.path.join(CWPROOT,'bin/suwind')\nsuwaveform = os.path.join(CWPROOT,'bin/suwaveform')\nsupsimage = os.path.join(CWPROOT,'bin/supsimage')\nsuspike = os.path.join(CWPROOT,'bin/suspike')\nsufilter = os.path.join(CWPROOT,'bin/sufilter')\nsuconv = os.path.join(CWPROOT,'bin/suconv')\nsucddecon = os.path.join(CWPROOT,'bin/sucddecon')\nsufrac = os.path.join(CWPROOT,'bin/sufrac')\nsustack = os.path.join(CWPROOT,'bin/sustack')\nsuop = os.path.join(CWPROOT,'bin/suop')\nsugain = os.path.join(CWPROOT,'bin/sugain')\nsumax = os.path.join(CWPROOT,'bin/sumax')\nsuop2 = os.path.join(CWPROOT,'bin/suop2')\nsuflip = os.path.join(CWPROOT,'bin/suflip')\nsutxtaper = os.path.join(CWPROOT,'bin/sutxtaper')\nsudiff = os.path.join(CWPROOT,'bin/sudiff')\na2b = os.path.join(CWPROOT,'bin/a2b')\nsuresamp = os.path.join(CWPROOT,'bin/suresamp')\nMYAPPS = os.getenv('MYAPPS')\nasg = os.path.join(MYAPPS,'trip/iwave/asg/main/sim.x')\nacd = os.path.join(MYAPPS,'trip/iwave/acd/main/sim.x')\npoisson = os.path.join(MYAPPS,'trip/iwave/asg/main/ex2D.x')\nasg_ex3D = os.path.join(MYAPPS,'trip/iwave/asg/main/ex3D.x')\nstdmdl = os.path.join(MYAPPS,'trip/iwave/grid/main/standardmodel.x')\n#######################################################################\n\nBULK = [4.0,6.0,8.0]\nBUOY = [1.0,0.66667,0.5]\n\nfor i in range(0,3):\n Flow('bmh' + str(i), None, \n '''\n makevel n1=401 n2=801 \n d1=10.0 d2=10.0 v000=%g | \n sfput dim=2 gdim=2 id1=0 id2=1\n label1=Depth label2=Distance\n unit1=m unit2=m\n label=Bulk_modulus unit=GPa''' % BULK[i],\n stdin=0) \nfor i in range(0,3):\n Flow('byh'+str(i), None, \n '''\n makevel n1=401 n2=801 \n d1=10.0 d2=10.0 v000=%g | \n sfput dim=2 gdim=2 id1=0 id2=1\n label1=Depth label2=Distance\n unit1=m unit2=m\n label=Buoyancy unit=cm^3/g\n ''' % BUOY[i],\n stdin=0)\n\nFlow('fbmh', None, \n '''\n makevel n1=801 n2=1601 \n d1=5.0 d2=5.0 v000=%g | \n sfput dim=2 gdim=2 id1=0 id2=1\n label1=Depth label2=Distance\nO unit1=m unit2=m\n label=Bulk_modulus unit=GPa''' % BULK[0],\n stdin=0) \n\nFlow('fbyh', None, \n '''\n makevel n1=801 n2=1601 \n d1=5.0 d2=5.0 v000=%g | \n sfput dim=2 gdim=2 id1=0 id2=1\n label1=Depth label2=Distance\n unit1=m unit2=m\n label=Buoyancy unit=cm^3/g\n ''' % BUOY[0],\n stdin=0)\n\nFlow('pthdr1.su', None,\n sunull + ' nt=1001 ntr=201 dt=0.004 | ' + \n sushw + ' key=gx a=2000 b=20 j=201 | ' + \n sushw + ' key=gelev a=-1000',\n stdin=0)\n\n### single trace source dt=0.004\nFlow('srctrc004.su', None, suspike +\n ' nt=1001 ntr=1 offset=0 ix1=1 nspk=1 it1=250 dt=0.004 | ' +\n sugain + ' scale=250.0 | ' +\n sufilter + ' f=2,5,15,25 | ' +\n sushw + ' key=gelev a=-3000 | ' +\n sushw + ' key=gx a=3000', stdin=0)\n\n### single trace source dt=0.002\nFlow('srctrc002.su', None, suspike +\n ' nt=2001 ntr=1 offset=0 ix1=1 nspk=1 it1=500 dt=0.002 | ' +\n sugain + ' scale=500.0 | ' +\t \n sufilter + ' f=2,5,15,25 | ' +\n sushw + ' key=gelev a=-3000 | ' +\n sushw + ' key=gx a=3000', stdin=0)\n\n### single trace source dt=0.001\nFlow('srctrc001.su', None, suspike +\n ' nt=4001 ntr=1 offset=0 ix1=1 nspk=1 it1=1000 dt=0.001 | ' +\n sufilter + ' f=2,5,15,25 | ' +\n sugain + ' scale=1000.0 | ' +\n sushw + ' key=gelev a=-3000 | ' +\n sushw + ' key=gx a=3000', stdin=0)\n\n### single trace source dt=0.0005\nFlow('srctrc0005.su', None, suspike +\n ' nt=8001 ntr=1 offset=0 ix1=1 nspk=1 it1=2000 dt=0.0005 | ' +\n sufilter + ' f=2,5,15,25 | ' +\n sugain + ' scale=2000.0 | ' + \n sushw + ' key=gelev a=-3000 | ' +\n sushw + ' key=gx a=3000', stdin=0)\n\n### single trace source dt=0.00025\nFlow('srctrc00025.su', None, suspike +\n ' nt=16001 ntr=1 offset=0 ix1=1 nspk=1 it1=4000 dt=0.00025 | ' +\n sufilter + ' f=2,5,15,25 | ' +\n sugain + ' scale=4000.0 | ' + \n sushw + ' key=gelev a=-3000 | ' +\n sushw + ' key=gx a=3000', stdin=0)\n \nFlow('extrc004.su','srctrc004.su',\n poisson + '''\n bulk=%g buoy=%g distance=2828.4 \n source=${SOURCES[0]} pressure=${TARGETS[0]}\n ''' % (BULK[0],BUOY[0]) ,\n stdin=0, stdout=-1)\nFlow('extrc002.su','srctrc002.su',\n poisson + '''\n bulk=%g buoy=%g distance=2828.4 \n source=${SOURCES[0]} pressure=${TARGETS[0]}\n ''' % (BULK[0],BUOY[0]) ,\n stdin=0, stdout=-1)\nFlow('extrc001.su','srctrc001.su',\n poisson + '''\n bulk=%g buoy=%g distance=2828.4 \n source=${SOURCES[0]} pressure=${TARGETS[0]}\n ''' % (BULK[0],BUOY[0]) ,\n stdin=0, stdout=-1)\nFlow('extrc0005.su','srctrc0005.su',\n poisson + '''\n bulk=%g buoy=%g distance=2828.4 \n source=${SOURCES[0]} pressure=${TARGETS[0]}\n ''' % (BULK[0],BUOY[0]) ,\n stdin=0, stdout=-1)\nFlow('extrc00025.su','srctrc00025.su',\n poisson + '''\n bulk=%g buoy=%g distance=2828.4 \n source=${SOURCES[0]} pressure=${TARGETS[0]}\n ''' % (BULK[0],BUOY[0]) ,\n stdin=0, stdout=-1)\n\nfor i in range(0,3):\n Flow(['ptp'+str(i)+'.su', 'ptvz'+str(i)+'.su'],\n ['bmh'+str(i), 'byh'+str(i), 'srctrc002.su', 'pthdr1.su'],\n '/bin/cp ${SOURCES[3]} ${TARGETS[0]} && ' +\n\t '/bin/cp ${SOURCES[3]} ${TARGETS[1]} && ' +\n\t 'export OMP_NUM_THREADS=2 && ' + \n asg + \n '''\n bulkmod=${SOURCES[0]} buoyancy=${SOURCES[1]} \n source_p=${SOURCES[2]} data_p=${TARGETS[0]} data_v0=${TARGETS[1]} \n deriv=0 adjoint=0 order=4 cfl=0.5 cmin=1.0 cmax=3.0 num_threads=1\n dmin=0.8 dmax=3.0 nl1=250 nr1=250 nl2=250 nr2=250 pmlampl=1.0\n\t printact=1\n ''', stdin=0, stdout=-1)\n\nFlow('dsrctrc004.su','srctrc004.su',sufrac + ' power=1 | ' + sugain + ' scale=0.001')\nFlow('dsrctrc002.su','srctrc002.su',sufrac + ' power=1 | ' + sugain + ' scale=0.001')\n\nFlow('cdptp0.su',['bmh0', 'pthdr1.su', 'dsrctrc004.su'],\n '/bin/cp ${SOURCES[1]} ${TARGETS[0]} && ' +\n 'export OMP_NUM_THREADS=1 && ' + \n acd + \n '''\n csq=${SOURCES[0]} source=${SOURCES[2]} data=${TARGETS[0]} \n deriv=0 adjoint=0 order=2 cfl=0.5 cmin=1.0 cmax=3.0 sampord=1 num_threads=2\n ''', stdin=0, stdout=-1)\n\nFlow('cdptp02.su',['bmh0', 'pthdr1.su', 'dsrctrc002.su'],\n '/bin/cp ${SOURCES[1]} ${TARGETS[0]} && ' +\n acd + \n '''\n csq=${SOURCES[0]} source=${SOURCES[2]} data=${TARGETS[0]} \n deriv=0 adjoint=0 order=2 cfl=0.5 cmin=1.0 cmax=3.0 sampord=1\n ''', stdin=0, stdout=0)\n\nFlow(['fptp.su', 'fptvz.su'],\n ['fbmh', 'fbyh', 'srctrc004.su', 'pthdr1.su'],\n '/bin/cp ${SOURCES[3]} ${TARGETS[0]}; ' +\n '/bin/cp ${SOURCES[3]} ${TARGETS[1]}; ' +\n asg + \n '''\n bulkmod=${SOURCES[0]} buoyancy=${SOURCES[1]} \n source_p=${SOURCES[2]} data_p=${TARGETS[0]} data_v0=${TARGETS[1]} \n deriv=0 adjoint=0 order=4 cfl=0.5 cmin=1.0 cmax=3.0 \n dmin=0.8 dmax=3.0 nl1=250 nr1=250 nl2=250 nr2=250 pmlampl=1.0\n ''', stdin=0, stdout=0)\n\nFlow('cdfptp.su',['fbmh', 'pthdr1.su', 'dsrctrc004.su'],\n '/bin/cp ${SOURCES[1]} ${TARGETS[0]} && ' +\n acd + \n '''\n csq=${SOURCES[0]} source=${SOURCES[2]} data=${TARGETS[0]} \n deriv=0 adjoint=0 order=4 cfl=0.5 cmin=1.0 cmax=3.0 sampord=1\n ''', stdin=0, stdout=0)\n\nfor i in range(0,3):\n Flow('ptr'+str(i)+'.su', 'ptp'+str(i)+'.su',\n suwind + ' key=tracl min=151 max=151 tmin=2 tmax=3')\n Flow('ptr'+str(i),'ptr'+str(i)+'.su',\n 'suread read=data endian=0')\n\nFlow('fptr.su', 'fptp.su',\n suwind + ' key=tracl min=151 max=151 tmin=2 tmax=3')\n\nFlow('cdptr0.su', 'cdptp0.su',\n suwind + ' key=tracl min=151 max=151 tmin=2 tmax=3')\n\nFlow('cdfptr.su', 'cdfptp.su',\n suwind + ' key=tracl min=151 max=151 tmin=2 tmax=3')\n \nFlow('fptr','fptr.su',\n 'suread read=data endian=0')\n\nFlow('extrc0005_r.su', 'extrc0005.su',\n suresamp + ' nt=1001 dt=0.004 | ' +\n suwind + ' tmin=2 tmax=3')\n\nFlow('extrc0005_r','extrc0005_r.su',\n 'suread read=data endian=0')\n\nFlow('extrc00025_r.su', 'extrc00025.su',\n suresamp + ' nt=1001 dt=0.004 | ' +\n suwind + ' tmin=2 tmax=3')\n\nFlow('extrc00025_r','extrc00025_r.su',\n 'suread read=data endian=0')\n\nfor i in range(0,3):\n Result('comp'+str(i),['ptr'+str(i), 'extrc00025_r'],\n '''\n cat axis=2 ${SOURCES[1:2]} |\n graph plotcol=4,2 wanttitle=n label2=Pressure unit2=GPa\n ''')\n\nResult('fcomp',['fptr', 'extrc00025_r'],\n '''\n cat axis=2 ${SOURCES[1:2]} |\n graph plotcol=4,2 wanttitle=n label2=Pressure unit2=GPa\n ''')\n\n\n#######################################################################\n######################### 3D CASE #####################################\n#######################################################################\n\nfor i in range(0,3):\n Flow('bm'+str(i)+'_3D', None, \n '''\n makevel \n n1=101 d1=10.0\n n2=101 d2=10.0\n n3=11 d3=10.0 \n v000=%g | \n sfput dim=3 gdim=3 \n id1=0 label1=Depth unit1=m \n id2=1 label2=x-axis unit2=m\n id3=2 label3=y-axis unit3=m\n label=Bulk_modulus unit=GPa\n ''' % BULK[i],\n stdin=0)\n \nfor i in range(0,3):\n Flow('by'+str(i)+'_3D', None, \n '''\n makevel \n n1=101 d1=10.0\n n2=101 d2=10.0\n n3=11 d3=10.0 \n v000=%g | \n sfput dim=3 gdim=3 \n id1=0 label1=Depth unit1=m \n id2=1 label2=x-axis unit2=m\n id3=2 label3=y-axis unit3=m\n label=Buoyancy unit=cm^3/g\n ''' % BUOY[i],\n stdin=0)\n\nFlow('hdr_3D.su', None,\n sunull + ' nt=201 ntr=51 dt=0.004 | ' + \n sushw + ' key=gx a=0 b=20 j=51 | ' + \n sushw + ' key=gy a=40 | ' +\n sushw + ' key=gelev a=-500 | '+\n sushw + ' key=sx a=500 | '+\n sushw + ' key=sy a=50 | '+\n sushw + ' key=selev a=-500 ',\n stdin=0)\n\n### single trace source dt=0.004\nFlow('src_3D.su', None, suspike +\n ' nt=201 ntr=1 offset=0 ix1=1 nspk=1 it1=100 dt=0.004 | ' +\n sugain + ' scale=250.0 | ' +\n sufilter + ' f=2,5,15,25 | ' +\n sushw + ' key=gelev a=-500 | ' +\n sushw + ' key=gx a=500 | ' +\n sushw + ' key=gy a=50', stdin=0)\n\nfor i in range(0,3):\n Flow(['data_p'+str(i)+'_3D.su', 'data_vz'+str(i)+'_3D.su'],\n ['bm'+str(i)+'_3D', 'by'+str(i)+'_3D', 'src_3D.su', 'hdr_3D.su'],\n '/bin/cp ${SOURCES[3]} ${TARGETS[0]} && ' +\n\t '/bin/cp ${SOURCES[3]} ${TARGETS[1]} && ' +\n asg + \n '''\n bulkmod=${SOURCES[0]} buoyancy=${SOURCES[1]} \n source_p=${SOURCES[2]} data_p=${TARGETS[0]} data_v0=${TARGETS[1]} \n deriv=0 adjoint=0 order=4 cfl=0.5 \n cmin=1.0 cmax=3.0 dmin=0.8 dmax=3.0 \n nl1=250 nr1=250 nl2=250 nr2=250 nl3=250 nr3=250 pmlampl=1.0\n ''', stdin=0, stdout=0)\n\nFlow(['dirdata_p0_3D.su'],\n ['bm0_3D', 'by0_3D', 'src_3D.su', 'hdr_3D.su'],\n '/bin/cp ${SOURCES[3]} ${TARGETS[0]} && ' +\n asg + \n '''\n bulkmod=${SOURCES[0]} buoyancy=${SOURCES[1]} \n source_p=${SOURCES[2]} data_p=${TARGETS[0]}\n deriv=0 adjoint=0 order=4 cfl=0.5 \n cmin=1.0 cmax=3.0 dmin=0.8 dmax=3.0 \n nl1=0 nr1=0 nl2=0 nr2=0 nl3=0 nr3=0 pmlampl=1.0\n ''', stdin=0, stdout=0)\n\nFlow('ddtsrc_3D.su','src_3D.su', sufrac + ' power=1 | ' + sugain + ' scale=0.001')\nFlow(['cddata0_3D.su'],\n ['bm0_3D','ddtsrc_3D.su', 'hdr_3D.su'],\n '/bin/cp ${SOURCES[2]} ${TARGETS[0]} && ' +\n acd + \n '''\n csq=${SOURCES[0]} source=${SOURCES[1]} data=${TARGETS[0]} \n deriv=0 adjoint=0 order=2 cfl=0.5 \n cmin=1.0 cmax=3.0 dmin=0.8 dmax=3.0 \n ''', stdin=0, stdout=0)\n\nFlow('dataEx_3D.su',\n ['src_3D.su','hdr_3D.su'],\n '/bin/cp ${SOURCES[1]} ./${TARGETS[0]} && '+\n asg_ex3D +\n '''\n bulk=%g buoy=%g \n source=${SOURCES[0]} \n pressure=${TARGETS[0]}\n ''' % (BULK[0],BUOY[0]) ,\n stdin=0, stdout=0 )\n\n\n# far trace of exact solution and difference\nFlow('trcEx_far.su','dataEx_3D.su',\n suwind + ' key=tracl min=2 max=2')\nFlow('trcEx_far','trcEx_far.su',\n 'suread read=data endian=0')\n\nFlow('trcEx_mid.su','dataEx_3D.su',\n suwind + ' key=tracl min=12 max=12')\nFlow('trcEx_mid','trcEx_mid.su',\n 'suread read=data endian=0')\n\nFlow('dirtrc0_far.su','dirdata_p0_3D.su',\n suwind +' key=tracl min=2 max=2')\nFlow('cdtrc0_far.su','cddata0_3D.su',\n suwind +' key=tracl min=2 max=2')\n\t \nfor i in range(0,3):\n Flow('trc'+str(i)+'_far.su','data_p'+str(i)+'_3D.su',\n suwind +' key=tracl min=2 max=2')\n Flow('trc'+str(i)+'_far','trc'+str(i)+'_far.su',\n 'suread read=data endian=0')\n\n Result('comp'+str(i)+'_far',\n ['trcEx_far','trc'+str(i)+'_far'],\n '''\n cat axis=2 ${SOURCES[1:2]} |\n graph plotcol=4,2 wanttitle=n label2=Pressure unit2=GPa\n ''')\n\n Flow('trc'+str(i)+'_mid.su','data_p'+str(i)+'_3D.su',\n suwind +' key=tracl min=12 max=12')\n Flow('trc'+str(i)+'_mid','trc'+str(i)+'_mid.su',\n 'suread read=data endian=0')\n\n Result('comp'+str(i)+'_mid',\n ['trcEx_mid','trc'+str(i)+'_mid'],\n '''\n cat axis=2 ${SOURCES[1:2]} |\n graph plotcol=4,2 wanttitle=n label2=Pressure unit2=GPa\n ''')\n\n\n\nEnd()\n","sub_path":"repro/papers/iwavecal/project/SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":13869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"215978759","text":"'''Module to calculate the distance (in cm) of an object from the centre of\r\nthe Earth, given it's period (in seconds) and a required precision.'''\r\n\r\nfrom __future__ import division\r\n\r\nimport numpy as np\r\n\r\nclass PhysError(Exception):\r\n '''Exception class for the physically impossible.'''\r\n def __init__(self, value):\r\n super(PhysError, self).__init__(value)\r\n self.value = value\r\n def __str__(self):\r\n return repr(self.value)\r\n\r\nT_ERROR = \"Period ({0}s) is less than minimal orbital period for Earth (5060.9s).\"\r\n\r\ndef func(radius, T):\r\n '''Function which describes our non-linear equation.'''\r\n return (T * (np.sqrt(6.6738e-11 * 5.9726e24) / (2 * np.pi * np.sqrt(radius)))) - radius\r\n\r\nSTART = (6371000.0, 6371000.1)\r\n\r\ndef solveOrbit(T, precision):\r\n '''Function which calculates the distance of an object from the centre of\r\nthe Earth, given it's period (in seconds) and a required precision.'''\r\n if T < 5060.9:\r\n raise PhysError(T_ERROR.format(T))\r\n r1, r2 = START[0], START[1]\r\n while np.sign(func(r1, T)) == np.sign(func(r2, T)):\r\n r1 = r2\r\n r2 += 10000\r\n r3 = r2 - func(r2, T)*((r2 - r1) / (func(r2, T) - func(r1, T)))\r\n while np.fabs(r3 - r2) > precision:\r\n r1, r2 = r2, r3\r\n r3 = r2 - func(r2, T)*((r2 - r1) / (func(r2, T) - func(r1, T)))\r\n\r\n return r3\r\n","sub_path":"Computational Physics/q10u1306340.py","file_name":"q10u1306340.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"209004049","text":"from sqlalchemy import Column, String, Integer\nfrom database.base import Base, sessionFactory, modelFactory\n\n\nclass GudangORM(Base):\n __tablename__='Gudang'\n\n id_barang = Column(Integer, primary_key=True)\n nama_produk = Column(String)\n jumlah_barang = Column(String)\n lokasi = Column(String)\n tanggal_masuk = Column(String)\n harga_barang = Column(String)\n\n def __init__(self, nama_produk, tanggal_masuk, lokasi, harga_barang, jumlah_barang):\n session = sessionFactory()\n self.nama_produk = nama_produk\n self.tanggal_masuk = tanggal_masuk\n self.lokasi = lokasi\n self.harga_barang = harga_barang\n self.jumlah_barang = jumlah_barang\n session.add(self)\n session.commit()\n session.close()\n\n def dataGudang():\n session = sessionFactory()\n return session.query(GudangORM).all()\n session.close()\n\n def delGudang(id):\n session = sessionFactory()\n session.query(GudangORM).filter_by(id_barang=id).delete()\n session.commit()\n session.close()\n\n\n def updateGudang(ID,newNama_produk, newTanggal_masuk, newLokasi, newHarga_barang, newJumlah_barang):\n session = sessionFactory()\n session.query(GudangORM).filter_by(id_barang=ID).update({\n GudangORM.nama_produk: newNama_produk,\n GudangORM.tanggal_masuk: newTanggal_masuk,\n GudangORM.lokasi: newLokasi,\n GudangORM.harga_barang: newHarga_barang,\n GudangORM.jumlah_barang: newJumlah_barang,\n }, synchronize_session=False)\n session.commit()\n session.close()\n pass\n\nmodelFactory()\n\n\n","sub_path":"database/GudangORM.py","file_name":"GudangORM.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"138090081","text":"import numpy as numpy \nimport csv\nimport matplotlib.pyplot as plt\nList=numpy.random.normal(loc=0,scale=1,size=1000)\nplot=plt.hist(List,10,density=True)\nplt.grid(axis='x',alpha=1)\nplt.grid(axis='y',alpha=1)\nwith open('points.csv','w') as csvfile:\n\tfieldnames=['points']\n\twriter = csv.DictWriter(csvfile,fieldnames=fieldnames)\n\twriter.writeheader()\n\tfor i in List:\n\t\twriter.writerow({'points': i})\nplt.show()","sub_path":"Random/Random.py","file_name":"Random.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"354964507","text":"# -*- coding: utf-8 -*-\n'''\nBasis Function Manipulation\n################################\nFunctions for managing and manipulating basis set data.\nMany of the ordering schemes used in computational codes can be\ngenerated programmatically with the right numerical function.\nThis is preferred to an explicit parsing and storage of a given\nbasis set ordering scheme.\n'''\nimport re\nimport sympy\nimport numpy as np\nfrom sympy import Add, Mul\nfrom exatomic._config import config\nfrom collections import OrderedDict\nfrom numba import jit, vectorize\n\nx, y, z = sympy.symbols('x y z')\n\nlorder = ['s', 'p', 'd', 'f', 'g', 'h', 'i', 'k', 'l', 'm']\nlmap = {'s': 0, 'p': 1, 'd': 2, 'f': 3, 'g': 4, 'h': 5, 'i': 6, 'k': 7, 'l': 8,\n 'm': 9, 'px': 1, 'py': 1, 'pz': 1}\nrlmap = {value: key for key, value in lmap.items() if len(key) == 1}\nspher_ml_count = {'s': 1, 'p': 3, 'd': 5, 'f': 7, 'g': 9, 'h': 11, 'i': 13, 'k': 15,\n 'l': 17, 'm': 19}\nspher_lml_count = {lorder.index(key): value for key, value in spher_ml_count.items()}\ncart_ml_count = {'s': 1, 'p': 3, 'd': 6, 'f': 10, 'g': 15, 'h': 21, 'i': 28}\ncart_lml_count = {lorder.index(key): value for key, value in cart_ml_count.items()}\n\nenum_cartesian = {0: [[0, 0, 0]],\n 1: [[1, 0, 0], [0, 1, 0], [0, 0, 1]],\n 2: [[2, 0, 0], [1, 1, 0], [1, 0, 1],\n [0, 2, 0], [0, 1, 1], [0, 0, 2]],\n 3: [[3, 0, 0], [2, 1, 0], [2, 0, 1],\n [1, 2, 0], [1, 1, 1], [1, 0, 2],\n [0, 3, 0], [0, 2, 1], [0, 1, 2], [0, 0, 3]],\n 4: [[4, 0, 0], [3, 1, 0], [3, 0, 1], [2, 2, 0], [2, 1, 1],\n [2, 0, 2], [1, 3, 0], [1, 2, 1], [1, 1, 2], [1, 0, 3],\n [0, 4, 0], [0, 3, 1], [0, 2, 2], [0, 1, 3], [0, 0, 4]],\n 5: [[5, 0, 0], [4, 1, 0], [4, 0, 1], [3, 2, 0], [3, 1, 1],\n [3, 0, 2], [2, 3, 0], [2, 2, 1], [2, 1, 2], [2, 0, 3],\n [1, 4, 0], [1, 3, 1], [1, 2, 2], [1, 1, 3], [1, 0, 4],\n [0, 5, 0], [0, 4, 1], [0, 3, 2], [0, 2, 3], [0, 1, 4],\n [0, 0, 5]]}\n\n\ndef solid_harmonics(l_max):\n\n def _top_sh(lcur, sp, sm):\n lpre = lcur - 1\n kr = 1 if lpre == 0 else 0\n return (np.sqrt(2 ** kr * (2 * lpre + 1) / (2 * lpre + 2)) *\n (x * sp - (1 - kr) * y * sm))\n\n def _mid_sh(lcur, m, sm, smm):\n lpre = lcur - 1\n return (((2 * lpre + 1) * z * sm - np.sqrt((lpre + m) *\n (lpre - m)) * (x*x + y*y + z*z) * smm) /\n (np.sqrt((lpre + m + 1) * (lpre - m + 1))))\n\n def _bot_sh(lcur, sp, sm):\n lpre = lcur - 1\n kr = 1 if lpre == 0 else 0\n return (np.sqrt(2 ** kr * (2 * lpre + 1) / (2 * lpre + 2)) *\n (y * sp + (1 - kr) * x * sm))\n\n sh = OrderedDict()\n sh[(0, 0)] = 1\n for l in range(1, l_max + 1):\n lpre = l - 1\n ml_all = list(range(-l, l + 1))\n sh[(l, ml_all[0])] = _bot_sh(l, sh[(lpre,lpre)], sh[(lpre,-(lpre))])\n for ml in ml_all[1:-1]:\n try:\n sh[(l, ml)] = _mid_sh(l, ml, sh[(lpre,ml)], sh[(lpre-1,ml)])\n except KeyError:\n sh[(l, ml)] = _mid_sh(l, ml, sh[(lpre,ml)], sh[(lpre,ml)])\n sh[(l, ml_all[-1])] = _top_sh(l, sh[(lpre,lpre)], sh[(lpre,-(lpre))])\n return sh\n\n\ndef clean_sh(sh):\n \"\"\"Turns symbolic solid harmonic functions into string representations\n to be using in generating basis functions.\n\n Args\n sh (OrderedDict): Output from exatomic.algorithms.basis.solid_harmonics\n\n Returns\n clean (OrderedDict): cleaned strings\n \"\"\"\n _replace = {'x': '{x}', 'y': '{y}', 'z': '{z}', ' - ': ' -'}\n _repatrn = re.compile('|'.join(_replace.keys()))\n clean = OrderedDict()\n for key, sym in sh.items():\n if isinstance(sym, (Mul, Add)):\n string = str(sym.expand()).replace(' + ', ' ')#.replace(' - ', ' -')\n string = _repatrn.sub(lambda x: _replace[x.group(0)], string)\n clean[key] = [pre + '*' for pre in string.split()]\n else: clean[key] = ['']\n return clean\n\n\ndef car2sph_transform_matrices(sh, l_tot):\n '''\n Generates cartesian to spherical transformation matrices as an ordered dict\n with key corresponding to l value.\n\n Args\n sh (OrderedDict): the result of solid_harmonics(l_tot)\n '''\n s = [1]\n p = [y, z, x]\n d = [x*x, x*y, x*z, y*y, y*z, z*z]\n f = [x*x*x, x*x*y, x*x*z, x*y*y, x*y*z, x*z*z, y*y*y, y*y*z, y*z*z, z*z*z]\n g = [x*x*x*x, x*x*x*y, x*x*x*z, x*x*y*y, x*x*y*z,\n x*x*z*z, x*y*y*y, x*y*y*z, x*y*z*z, x*z*z*z,\n y*y*y*y, y*y*y*z, y*y*z*z, y*z*z*z, z*z*z*z]\n h = [x*x*x*x*x, x*x*x*x*y, x*x*x*x*z, x*x*x*y*y, x*x*x*y*z, x*x*x*z*z, x*x*y*y*y,\n x*x*y*y*z, x*x*y*z*z, x*x*z*z*z, x*y*y*y*y, x*y*y*y*z, x*y*y*z*z, x*y*z*z*z,\n x*z*z*z*z, y*y*y*y*y, y*y*y*y*z, y*y*y*z*z, y*y*z*z*z, y*z*z*z*z, z*z*z*z*z]\n ltopow = {0: s, 1: p, 2: d, 3: f, 4: g, 5: h}\n transdims = {0: (1, 1), 1: (3, 3), 2: (5, 6),\n 3: (7, 10), 4: (9, 15), 5: (11, 21)}\n ndict = OrderedDict()\n for lcur in range(l_tot + 1):\n ndict[lcur] = np.zeros(transdims[lcur])\n for ml in range(-lcur, lcur + 1):\n moff = lcur + ml\n expr = sh[(lcur, ml)]\n powers = ltopow[lcur]\n try:\n nexpr = expr.as_coeff_Mul()\n except AttributeError:\n ndict[lcur][moff,0] = expr\n continue\n for i, power in enumerate(powers):\n if float(nexpr[0]).is_integer():\n ndict[lcur][moff, i] = sympy.expand(nexpr[1]).coeff(power, 1)\n else:\n if power == nexpr[1]:\n ndict[lcur][moff, powers.index(power)] = nexpr[0]\n return ndict\n\n@jit(nopython=True, cache=True)\ndef _fac(n,v): return _fac(n-1, n*v) if n else v\n\n@jit(nopython=True, cache=True)\ndef fac(n): return _fac(n, 1)\n\n@jit(nopython=True, cache=True)\ndef _fac2(n,v): return _fac2(n-2, n*v) if n > 0 else v\n\n@jit(nopython=True, cache=True)\ndef fac2(n):\n if n < -1: return 0\n if n < 2: return 1\n return _fac2(n, 1)\n\n@jit(nopython=True, cache=True)\ndef normalize(alpha, L):\n prefac = (2 / np.pi) ** (0.75)\n numer = 2 ** (L) * alpha ** ((L + 1.5) / 2)\n denom = (fac2(2 * L - 1)) ** (0.5)\n return prefac * numer / denom\n\n@jit(nopython=True, cache=True)\ndef sto_normalize(alpha, n):\n return (2 * alpha) ** n * ((2 * alpha) / fac(2 * n)) ** 0.5\n\n@vectorize(['int64(int64)'])\ndef _vec_fac(n):\n return fac(n)\n\n@vectorize(['int64(int64)'])\ndef _vec_fac2(n):\n return fac2(n)\n\n@vectorize(['float64(float64,int64)'])\ndef _vec_normalize(alpha, L):\n return normalize(alpha, L)\n\n@vectorize(['float64(float64,int64)'])\ndef _vec_sto_normalize(alpha, n):\n return sto_normalize(alpha, n)\n\n### Is this necessary?\n@jit(nopython=True)\ndef _ovl_indices(nbas, nel):\n chis = np.empty((nel, 2), dtype=np.int64)\n cnt = 0\n for i in range(nbas):\n for j in range(i + 1):\n chis[cnt, 0] = i\n chis[cnt, 1] = j\n cnt += 1\n return chis\n\n\n@vectorize(['float64(float64,float64,float64,float64,float64,float64,int64, \\\n int64,int64,int64,int64,int64,float64,float64,float64,float64)'])\ndef _overlap(x1, x2, y1, y2, z1, z2, l1, l2, m1, m2, n1, n2, N1, N2, alpha1, alpha2):\n '''\n Pardon the Fortran style that follows. This was translated from the snafu\n electronic structure software package.\n '''\n s12 = 0.\n tol = 1e-8\n abx = x1 - x2\n aby = y1 - y2\n abz = z1 - z2\n ab2 = abx * abx + aby * aby + abz * abz\n if ab2 < tol:\n ll = l1 + l2\n mm = m1 + m2\n nn = n1 + n2\n if ll % 2 != 0 or mm % 2 != 0 or nn % 2 != 0:\n return s12\n ll2 = ll // 2\n mm2 = mm // 2\n nn2 = nn // 2\n ltot = ll2 + mm2 + nn2\n numer = np.pi ** (1.5) * fac2(ll - 1) * fac2(mm - 1) * fac2(nn - 1)\n denom = (2 ** ltot) * (alpha1 + alpha2) ** (ltot + 1.5)\n s12 = N1 * N2 * numer / denom\n return s12\n gamma = alpha1 + alpha2\n xp = (alpha1 * x1 + alpha2 * x2) / gamma\n yp = (alpha1 * y1 + alpha2 * y2) / gamma\n zp = (alpha1 * z1 + alpha2 * z2) / gamma\n px1 = xp - x1\n py1 = yp - y1\n pz1 = zp - z1\n px2 = xp - x2\n py2 = yp - y2\n pz2 = zp - z2\n pg12 = np.sqrt(np.pi / gamma)\n xix = 0\n yiy = 0\n ziz = 0\n ltot = l1 + l2\n mtot = m1 + m2\n ntot = n1 + n2\n if ltot == 0:\n xix = pg12\n else:\n iii = (ltot - 1) // 2 if ltot % 2 != 0 else ltot // 2\n for i in range(iii):\n k = 2 * i\n prod = pg12 * fac2(k - 1) / ((2 * gamma) ** i)\n qlow = max(-k, (k - 2 * l2))\n qhigh = min(k, (2 * l1 - k))\n fk = 0\n for q in range(qlow, qhigh, 2):\n j = (k + q) // 2\n k = (k - q) // 2\n newt1 = fac(l1) / fac(j) / fac(l1 - j)\n newt2 = fac(l2) / fac(k) / fac(l2 - k)\n fk += newt1 * newt2 * (px1 ** (l1 - j)) * (px2 ** (l2 - k))\n xix += prod * fk\n if mtot == 0:\n yiy = pg12\n else:\n iii = (mtot - 1) // 2 if mtot % 2 != 0 else mtot // 2\n for i in range(iii):\n k = 2 * i\n prod = pg12 * fac2(k - 1) / ((2 * gamma) ** i)\n qlow = max(-k, (k - 2 * m2))\n qhigh = min(k, (2 * m1 - k))\n fk = 0\n for q in range(qlow, qhigh, 2):\n j = (k + q) // 2\n k = (k - q) // 2\n newt1 = fac(m1) / fac(j) / fac(m1 - j)\n newt2 = fac(m2) / fac(k) / fac(m2 - k)\n fk += newt1 * newt2 * (py1 ** (m1 - j)) * (py2 ** (m2 - k))\n yiy += prod * fk\n if ntot == 0:\n ziz = pg12\n else:\n iii = (ntot - 1) // 2 if ntot % 2 != 0 else ntot // 2\n for i in range(iii):\n k = 2 * i\n prod = pg12 * fac2(k - 1) / ((2 * gamma) ** i)\n qlow = max(-k, (k - 2 * n2))\n qhigh = min(k, (2 * n1 - k))\n fk = 0\n for q in range(qlow, qhigh, 2):\n j = (k + q) // 2\n k = (k - q) // 2\n newt1 = fac(n1) / fac(j) / fac(n1 - j)\n newt2 = fac(n2) / fac(k) / fac(n2 - k)\n fk += newt1 * newt2 * (pz1 ** (n1 - j)) * (pz2 ** (n2 - k))\n ziz += prod * fk\n exponent = alpha1 * alpha2 * ab2 / gamma\n s12 = N1 * N2 * np.exp(-exponent) * xix * yiy * ziz\n return s12\n\n\n@jit(nopython=True)\ndef _wrap_overlap(x, y, z, l, m, n, N, alpha):\n nprim = len(x)\n arlen = nprim * (nprim + 1) // 2\n f1x = np.empty(arlen, dtype=np.float64)\n f1y = np.empty(arlen, dtype=np.float64)\n f1z = np.empty(arlen, dtype=np.float64)\n f1N = np.empty(arlen, dtype=np.float64)\n f1a = np.empty(arlen, dtype=np.float64)\n f1l = np.empty(arlen, dtype=np.int64)\n f1m = np.empty(arlen, dtype=np.int64)\n f1n = np.empty(arlen, dtype=np.int64)\n f2x = np.empty(arlen, dtype=np.float64)\n f2y = np.empty(arlen, dtype=np.float64)\n f2z = np.empty(arlen, dtype=np.float64)\n f2N = np.empty(arlen, dtype=np.float64)\n f2a = np.empty(arlen, dtype=np.float64)\n f2l = np.empty(arlen, dtype=np.int64)\n f2m = np.empty(arlen, dtype=np.int64)\n f2n = np.empty(arlen, dtype=np.int64)\n chi1 = np.empty(arlen, dtype=np.int64)\n chi2 = np.empty(arlen, dtype=np.int64)\n cnt = 0\n for i in range(nprim):\n for j in range(i + 1):\n f1x[cnt] = x[i]\n f2x[cnt] = x[j]\n f1y[cnt] = y[i]\n f2y[cnt] = y[j]\n f1z[cnt] = z[i]\n f2z[cnt] = z[j]\n f1N[cnt] = N[i]\n f2N[cnt] = N[j]\n f1a[cnt] = alpha[i]\n f2a[cnt] = alpha[j]\n f1l[cnt] = l[i]\n f2l[cnt] = l[j]\n f1m[cnt] = m[i]\n f2m[cnt] = m[j]\n f1n[cnt] = n[i]\n f2n[cnt] = n[j]\n chi1[cnt] = i\n chi2[cnt] = j\n cnt += 1\n overlap = _overlap(f1x, f2x, f1y, f2y, f1z, f2z, f1l, f2l,\n f1m, f2m, f1n, f2n, f1N, f2N, f1a, f2a)\n return chi1, chi2, overlap\n","sub_path":"exatomic/algorithms/basis.py","file_name":"basis.py","file_ext":"py","file_size_in_byte":12249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"142681501","text":"# Basic training configuration file\nfrom torch.optim import RMSprop\nfrom torch.optim.lr_scheduler import MultiStepLR\nfrom torchvision.transforms import RandomHorizontalFlip, RandomVerticalFlip\nfrom torchvision.transforms import RandomResizedCrop, RandomChoice\nfrom torchvision.transforms import ColorJitter, ToTensor, Normalize\nfrom common.dataset import FilesFromCsvDataset\nfrom common.data_loaders import get_data_loader\nfrom models.resnet import FurnitureResNetV2_50_350\n\n\nSEED = 17\nDEBUG = True\nDEVICE = 'cuda'\n\nOUTPUT_PATH = \"output\"\n\n\nsize = 350\n\nTRAIN_TRANSFORMS = [\n RandomChoice(\n [\n RandomResizedCrop(size, scale=(0.4, 6.0), interpolation=3),\n RandomResizedCrop(size, scale=(0.6, 1.0), interpolation=3),\n ]\n ),\n RandomHorizontalFlip(p=0.5),\n RandomVerticalFlip(p=0.5),\n ColorJitter(hue=0.12, brightness=0.12),\n ToTensor(),\n Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n]\n\nVAL_TRANSFORMS = [\n RandomResizedCrop(size, scale=(0.7, 1.0), interpolation=3),\n RandomHorizontalFlip(p=0.5),\n ToTensor(),\n Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n]\n\n\nBATCH_SIZE = 32\nNUM_WORKERS = 15\n\n\ndataset = FilesFromCsvDataset(\"output/filtered_train_dataset.csv\")\nTRAIN_LOADER = get_data_loader(dataset,\n data_transform=TRAIN_TRANSFORMS,\n batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS,\n pin_memory='cuda' in DEVICE)\n\n\nval_dataset = FilesFromCsvDataset(\"output/filtered_val_dataset.csv\")\nVAL_LOADER = get_data_loader(val_dataset,\n data_transform=VAL_TRANSFORMS,\n batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS,\n pin_memory='cuda' in DEVICE)\n\n\nMODEL = FurnitureResNetV2_50_350()\n\n\nN_EPOCHS = 100\n\n\nOPTIM = RMSprop(\n params=[\n {\"params\": MODEL.stem.parameters(), 'lr': 0.045},\n {\"params\": MODEL.features.parameters(), 'lr': 0.045},\n {\"params\": MODEL.classifier.parameters(), 'lr': 0.045},\n ],\n alpha=0.9,\n eps=1.0)\n\n\nLR_SCHEDULERS = [\n MultiStepLR(OPTIM, milestones=[2, 4, 6, 8, 10, 12, 14, 16, 18, 20], gamma=0.94)\n]\n\n\nEARLY_STOPPING_KWARGS = {\n 'patience': 15,\n}\n\n\nLOG_INTERVAL = 100\n","sub_path":"classification/imaterialist_challenge_furniture_2018/configs/train/train_resnetv2_350_random_resized_crop.py","file_name":"train_resnetv2_350_random_resized_crop.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"568618356","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport numpy as np\nimport sys\nimport cv2\nimport caffe\nimport os\nimport pprint\nimport random\nimport string\nimport time\nimport glob\nimport shutil\nclass detector:\n def __init__(self,deploy,model,mean_file,size,scale,gray):\n caffe.set_mode_cpu()\n self.deploy = deploy\n self.model = model\n if type(mean_file)==type(''):\n self.mean_value = np.load(mean_file).mean(1).mean(1)\n elif type(mean_file) == type(1):\n self.mean_value = mean_file\n self.size = size\n self.scale = scale\n self.gray = gray\n\n def auto_mkdir(self,path):\n if os.path.isdir(path):\n pass\n else:\n os.mkdir(path)\n def auto_image_process(self,img):\n if self.gray==1:\n gray_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n else:\n gray_img = img\n resize_img = cv2.resize(gray_img,self.size)\n resize_img = np.array(resize_img,dtype=np.float32)\n resize_img = resize_img - self.mean_value\n resize_img = resize_img*self.scale\n return resize_img\n #use the images in return image\n def auto_return_single_result(self,img):\n self.net.blobs['data'].data[...] = self.auto_image_process(img)\n out = self.net.forward()\n return int(out['result'][0].argmax())\n #写一个关于caffenet的一个自动检测的程序,并且读取用的是自带的一些工具\n def use_caffenet_detect(self,src,result_len):\n result_list = [0]*result_len\n net = caffe.Classifier('caffe_net/deploy.prototxt','caffe_net/caffenet_train_iter_43000.caffemodel',\n mean=np.array([104,117,123]),\n raw_scale= 255,\n channel_swap=(2,1,0),\n image_dims=(227,227)\n )\n for image_file in glob.iglob(os.path.join(src, '*.*')):\n if 'png' in image_file or 'jpg' in image_file or 'JPG' in image_file or 'jpeg' in image_file or 'JPEG' in image_file:\n #利用cv来检测读取的图片是否为真的图片\n img_cv2 = cv2.imread(image_file)\n if type(img_cv2) == type(np.ndarray([])):\n img = caffe.io.load_image(image_file)\n predict = net.predict([img])\n result = int(predict[0].argmax())\n if result == 0 or result == 1 or result == 2 or result == 3:\n name_text = image_file.split(\".\")[-2].split('-')\n index = int(name_text[0].split(\"/\")[-1])\n result_list[index] = 1\n #use time as\n dst_dir = 'image_storage/'\n rand_str = ''\n rand_str = rand_str.join(random.sample(string.ascii_letters+string.digits, 8))\n now_time = str(time.time()).split('.')[0]\n dst_pre = now_time+rand_str\n shutil.copy(image_file,dst_dir+dst_pre+'.'+image_file.split('.')[-1])\n os.remove(image_file)\n return result_list\n def local_detect(self,src,dst):\n self.net = caffe.Net(self.deploy,self.model,caffe.TEST)\n for image_file in glob.iglob(os.path.join(src, '*.*')):\n if 'png' in image_file or 'jpg' in image_file or 'JPG' in image_file or 'jpeg' in image_file or 'JPEG' in image_file:\n img = cv2.imread(image_file)\n result = self.auto_return_single_result(img)\n dst_dir = dst+'/'+str(result)\n image_path_array = image_file.split('/')\n if os.path.isdir(dst_dir):\n shutil.copy(image_file,dst_dir+'/'+image_path_array[-1])\n else:\n self.auto_mkdir(dst_dir)\n shutil.copy(image_file,dst_dir+'/'+image_path_array[-1])\n def return_list(self,src,result_len):\n result_list = [0]*result_len\n for image_file in glob.iglob(os.path.join(src, '*.*')):\n if 'png' in image_file or 'jpg' in image_file or 'JPG' in image_file or 'jpeg' in image_file or 'JPEG' in image_file:\n img = cv2.imread(image_file)\n img = self.delete_water_mark(img,0.8,0.6)\n result = self.auto_return_single_result(img)\n if result==0 or result==1:\n name_text = image_file.split(\".\")[-2].split('-')\n index = int(name_text[0].split(\"/\")[-1])\n result_list[index]=1\n os.remove(image_file)\n return result_list\n def delete_water_mark(self,img,height_per,width_per):\n height,width,deep = img.shape\n for i in range(int(height*height_per),height):\n for j in range(int(width*width_per),width):\n img[i,j]=0\n return img\n #use the caffe load\n\n# logodetect = logo_detect('logo_detect.prototxt','logo_iter_98000.caffemodel','mean.npy',(128,128),0.0078125,1)\n# logodetect.local_detect('images_test','test')\n# caffe.set_mode_cpu()\n# net = caffe.Net('logo_detect.prototxt','logo_iter_98000.caffemodel',caffe.TEST)\n# #data process\n# mean_value = np.load('mean.npy')\n# total = 0\n# right = 0\n# root_image_dir = 'images_test'\n# for image_file in glob.iglob(os.path.join(root_image_dir,'*.*')):\n# if ('png' in image_file or 'jpg' in image_file or 'JPG' in image_file or 'jpeg' in image_file or 'JPEG' in image_file) and 'neg' in image_file:\n# img = cv2.imread(image_file)\n# img = cv2.resize(img,(128,128))\n# for i in range(110,128):\n# for j in range(90,128):\n# img[i,j]=0\n# # cv2.imshow(\"test\",img)\n# img = np.array(img,dtype=np.float32)\n# gray_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n# resize_img = (gray_img -mean_value)*0.0078125\n#\n# # resize_img = resize_img*0.0078125\n# #change to float\n# net.blobs['data'].data[...] = resize_img\n# out = net.forward()\n# result = int(out['result'][0].argmax())\n# if result==2:\n# right+=1\n# total+=1\n# print str(right)+'/'+str(total)\n #print(\"the image is \"+image_file)\n # print(\"the result is #{}\".format(out['result'][0].argmax()))\n","sub_path":"logo_detect.py","file_name":"logo_detect.py","file_ext":"py","file_size_in_byte":6238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"401266855","text":"import tables as tb\nimport numpy as np\nimport pandas as pd\nimport os,sys\nimport logging\nfrom time import time\nfrom . import params\n\nlog = logging.getLogger('brilws')\n\ndef _is_element_tuple(me):\n '''\n Check if element of me is tuple \n Input: collection\n '''\n return np.all([isinstance(x,tuple) for x in me])\n\ndef _is_subset(me,target):\n '''\n Check me collection is a subset of the target collection\n '''\n return np.in1d(me,target).all()\n\ndef _open_validfile(filename, tables):\n '''\n Check if the file has all the required table \n Input: f: filename, tables: required table names\n Output: filehandle if input is valid, None if input is invalid \n ''' \n\n f = None\n try:\n f = tb.open_file(filename)\n except tb.exceptions.HDF5ExtError:\n log.debug('failed to open %s, skip'%filename)\n return None\n nodes = list(f.root._v_children.keys())\n if not len(nodes):\n f.close()\n return None \n if not _is_subset(tables,nodes):\n f.close()\n return None\n tablelist = [f.get_node('/'+x) for x in tables]\n filledtables = [x for x in tablelist if x.nrows>0]\n if len(tablelist) != len(filledtables):\n return None\n return f\n\ndef open_validfiles(filenames,lumitype):\n '''\n Input: [filename], lumitype\n Output: [filehandle]\n ''' \n mytype = lumitype.upper()\n requiredtables = ['tcds','beam']\n if mytype in params._lumitypeToh5tablename.keys():\n requiredtables.append(params._lumitypeToh5tablename[mytype])\n fhs = [_open_validfile(f,requiredtables) for f in filenames]\n return [x for x in fhs if x is not None]\n\ndef _build_preselectcondition( runmin=None,runmax=None,fillmin=None,tssecmin=None,tssecmax=None,fillmax=None, runlsselect=None ):\n if runlsselect is not None: \n if runmin==runmax==fillmin==tssecmin==tssecmax==fillmax==None:\n runmin = min(runlsselect.index.tolist())\n runmax = max(runlsselect.index.tolist()) \n conditions = []\n if runmin and runmax and runmin==runmax:\n conditions.append('(runnum == %d)'%runmin) \n elif runmin: \n conditions.append('(runnum >= %d)'%runmin)\n elif runmax: \n conditions.append('(runnum <= %d)'%runmax)\n if fillmin and fillmax and fillmin==fillmax:\n conditions.append('(fillnum == %d)'%fillmin)\n elif fillmin:\n conditions.append('(fillnum >= %d)'%fillmin)\n elif fillmax: \n conditions.append('(fillnum <= %d)'%fillmax)\n if tssecmin and tssecmax and tssecmin==tssecmax:\n conditions.append('(tssec == %d)'%tssecmin)\n elif tssecmin: \n conditions.append('(tssec >= %d)'%tssecmin)\n elif tssecmax: \n conditions.append('(tssec <= %d)'%tssecmax)\n conditionStr = '&'.join(conditions) \n return conditionStr\n\nclass dataRangeIterator: \n def __init__(self,filehandles,tablenames,conditionStr):\n self._filehandles = filehandles\n self._tablenames = tablenames\n self._conditionStr = conditionStr\n\n def _get_range_in_file(self,filehandle):\n '''\n Selected range of all tables in file\n Output: {tablename:coordinates}\n '''\n result = {}\n if not self._conditionStr:\n return result\n for tablename in self._tablenames:\n n = filehandle.get_node('/'+tablename)\n coordinates = n.get_where_list(self._conditionStr,sort=False)\n if coordinates.size == 0: \n result[tablename] = None\n else:\n result[tablename] = coordinates.tolist()\n return result\n \n def next(self):\n '''\n Output: (filehandle, {tablename: coordinates})\n '''\n for f in self._filehandles: #loop over files\n log.debug('Processing file '+f.filename)\n alltables = list(f.root._v_children.keys())\n if not _is_subset(self._tablenames,alltables):\n log.debug('file %s does not contain all the tables, skip '%f.filename)\n continue\n results = self._get_range_in_file(f)\n if not results:\n continue\n yield (f,results) \n\n \ndef dataFilter(tablehandle,coordinates,field_dtypes,runlsRangeSelectSeries=None,runlsnbPointFilterSeries=None):\n '''\n Filter input table nodes on fields based on runls range selection and runlsnb point selection conditions\n Input: \n tablehandle: table handle\n coordinates: row coordinates\n fields: output fields dtype\n runlsRangeSelectSeries: run,ls range selection matrix. Note: if None, select all\n runlsnbPointSelectSeries: [run,(ls,nb)]. Note: if None, select all\n Output: \n numpy record array of type field_dtypes\n Note: this filter does not produce None. Empty result means reject all\n ''' \n myrows = tablehandle.read_coordinates(coordinates)\n thisrun = np.array(myrows['runnum'])[0]\n lsnums = np.array(myrows['lsnum'])\n masks = np.full(lsnums.shape, True, dtype=bool) #default to select all\n\n if runlsRangeSelectSeries is not None:\n selectedruns = runlsRangeSelectSeries.index\n if thisrun not in selectedruns: #run is not selected\n masks = np.full(lsnums.shape, False, dtype=bool) #mask out all ls\n else: \n thisselectedls = runlsRangeSelectSeries[thisrun]\n masks = np.logical_or.reduce([np.logical_and(lsnums>=xmin, lsnums<=xmax) for [xmin,xmax] in thisselectedls]) #find masks \n\n if runlsnbPointFilterSeries is not None:\n if thisrun not in runlsnbPointFilterSeries.index: #run is not selected\n masks = np.full(lsnums[masks].shape, False, dtype=bool) #mask out all the rest ls \n else:\n lsnbselect = runlsnbPointFilterSeries[thisrun].values.tolist() #this run values : [(lsnum,nbnum)] \n mylsnbs = [tuple(x) for x in np.column_stack((myrows['lsnum'][masks],myrows['nbnum'][masks]))]\n for i,lsnb in enumerate(mylsnbs):\n if lsnb not in lsnbselect:\n masks[i] = False\n all_fields = []\n for fieldname in field_dtypes.names:\n value = myrows[fieldname][masks]\n all_fields.append(value)\n nrows = masks[np.nonzero(masks)].size\n log.debug('selected rows %d'%nrows)\n result = np.core.records.fromarrays(all_fields,names=field_dtypes.names,dtype=field_dtypes) \n return result\n\ndef andFilter(irecordsSize,conditions):\n masks = np.full(irecordsSize, True, dtype=bool) #default to select all \n for co in conditions:\n #print 'passed n masks:', masks[np.nonzero(masks)].size\n masks = np.logical_and(masks,co)\n return masks\n\n\nclass typebuilder: \n def __init__(self,datatablename,datatype,withBX): \n '''\n Build np.dtype for input tables and output data\n Input: \n datatablename: table name in hdf5 file\n withBX: if output is with per bx data \n '''\n self._basetypelist = [('runnum','uint32'),('lsnum','uint32'),('nbnum','uint32'),('timestampsec','uint32')]\n self._tcdstypelist = self._basetypelist + [('cmson','bool8'),('deadfrac','float32'),('ncollidingbx','uint32')]\n self._beam_typelist = self._basetypelist + [('status','U28'),('machinemode','U20'),('targetegev','uint32')] \n self._withBX = withBX \n self._datatablename = datatablename\n self._datatype = datatype\n\n def datatablename(self):\n return self._datatablename\n\n def datatype(self):\n return self._datatype \n\n def tcds_typelist(self):\n '''\n tcds table input fields \n '''\n return np.dtype(self._tcdstypelist)\n\n def beam_typelist(self):\n '''\n beam table input fields \n '''\n return np.dtype(self._beam_typelist)\n\n def lumidata_typelist(self):\n '''\n lumi data table input fields \n '''\n if self._datatablename == 'bestlumi':\n datafieldtypelist = self._basetypelist + [('fillnum','uint32'),('delivered','float32'),('recorded','float32'),('avgpu','float32'),('provider','U8')]\n if self._withBX:\n datafieldtypelist.append(('bxdelivered','float32',(3564,)))\n else:\n if self._datatype == 'raw':\n datafieldtypelist = self._basetypelist + [('fillnum','uint32'),('avgraw','float32')]\n if self._withBX:\n datafieldtypelist.append(('bxraw','float32',(3564,)))\n else:\n datafieldtypelist = self._basetypelist + [('fillnum','uint32'),('avg','float32')]\n if self._withBX:\n datafieldtypelist.append(('bx','float32',(3564,)))\n return np.dtype(datafieldtypelist)\n \ndef resultIter(filehandles,lumitype,datatype='best',runmin=None,runmax=None,fillmin=None,tssecmin=None,tssecmax=None,fillmax=None,beamstatusid=None,amodetagid=None,targetegev=None,runlsselect=None,withBX=False):\n '''\n get online bestlumi\n field choices: [runnum,lsnum,fillnum,timestampsec,cmson,beamstatus,delivered,recorded,bx,avgpu,datasource,normtag,normtagid,amodetagid,targetegev,numbxbeamactive,norb,nbperls]\n \n '''\n datatablename = params._lumitypeToh5tablename[lumitype.upper()]\n tbuilder = typebuilder(datatablename,datatype,withBX)\n preconditionStr = _build_preselectcondition(runmin=runmin,runmax=runmax,fillmin=fillmin,tssecmin=tssecmin,tssecmax=tssecmax,fillmax=fillmax,runlsselect=runlsselect)\n rangeIter = dataRangeIterator(filehandles,['tcds','beam',datatablename],preconditionStr)\n \n for co in rangeIter.next():\n filehandle = co[0]\n all_coordinates = co[1].values()\n if None in all_coordinates:\n log.debug('not all tables passed preselection, skip file ',filehandle.filename)\n continue\n\n #tcds selection\n\n tcds_coordinates = co[1]['tcds']\n tcds_table_handle = filehandle.get_node('/tcds')\n tcds_result = dataFilter(tcds_table_handle,tcds_coordinates,field_dtypes=tbuilder.tcds_typelist(),runlsRangeSelectSeries=runlsselect,runlsnbPointFilterSeries=None)\n\n if not tcds_result.size:\n log.debug('tcds failed data selection, continue ',filehandle.filename)\n continue\n \n selected_time = np.column_stack((tcds_result['runnum'],tcds_result['lsnum'],tcds_result['nbnum'])) \n runlsnbSeries = _make_runlsnb_Series( selected_time )\n #beam selection\n\n beam_coordinates = co[1]['beam']\n beam_table_handle = filehandle.get_node('/beam')\n beam_result = dataFilter(beam_table_handle,beam_coordinates,field_dtypes=tbuilder.beam_typelist(),runlsRangeSelectSeries=None,runlsnbPointFilterSeries=runlsnbSeries)\n beamconditions = []\n if beamstatusid is not None:\n req_beamstatus = params._idtobeamstatus[beamstatusid]\n beamconditions.append( beam_result['status']==req_beamstatus )\n if amodetagid is not None:\n req_amodetag = params._idtoamodetag[amodetagid]\n if params._amodetagtofull.has_key(req_amodetag):\n req_amodetag = params._amodetagtofull[req_amodetag]\n beamconditions.append( beam_result['machinemode']==req_amodetag )\n if targetegev is not None:\n beamconditions.append( beam_result['targetegev']==targetegev )\n beam_masks = andFilter(beam_result.size,beamconditions) \n beam_result = beam_result[beam_masks]\n\n if not beam_result.size:\n log.debug('beam failed data selection, continue ',filehandle.filename)\n continue\n\n beam_time = np.column_stack((beam_result['runnum'],beam_result['lsnum'],beam_result['nbnum'])) \n if not np.array_equal(beam_time,selected_time):\n log.debug('beam time differs from tcds time, remake a narrower selection')\n runlsnbSeries = _make_runlsnb_Series( beam_time)\n\n #data selection\n\n data_coordinates = co[1][datatablename]\n data_table_handle = filehandle.get_node('/'+datatablename)\n\n data_result = dataFilter(data_table_handle,data_coordinates,field_dtypes=tbuilder.lumidata_typelist(),runlsRangeSelectSeries=None,runlsnbPointFilterSeries=runlsnbSeries) \n\n for record in data_result:\n r = {}\n r['fillnum'] = record['fillnum']\n r['runnum'] = record['runnum']\n r['lsnum'] = lsnum = record['lsnum']\n nbnum = record['nbnum']\n r['timestampsec'] = record['timestampsec']\n tcds_masks = andFilter(tcds_result.size,[tcds_result['lsnum']==lsnum , tcds_result['nbnum']==nbnum])\n tcds_data = tcds_result[tcds_masks]\n r['deadtimefrac'] = tcds_data['deadfrac'][0]\n r['cmson'] = tcds_data['cmson'][0]\n r['numbxbeamactive'] = tcds_data['ncollidingbx'][0]\n beam_data = beam_result[ np.logical_and(beam_result['lsnum']==lsnum , beam_result['nbnum']==nbnum) ] \n r['beamstatusid'] = params._beamstatustoid[beam_data['status'][0]]\n machinemode = beam_data['machinemode'][0]\n r['amodetag'] = params._fulltoamodetag[machinemode]\n r['targetegev'] = beam_data['targetegev']\n\n if datatype == 'best':\n r['delivered'] = record['delivered']\n r['recorded'] = record['recorded']\n r['avgpu'] = record['avgpu']\n r['datasource'] = record['provider']\n if withBX:\n r['bxdeliveredblob'] = record['bxdelivered']\n elif datatype == 'raw': \n r['avglumi'] = record['avgraw'] \n if withBX:\n r['bxlumiblob'] = record['bxraw']\n else:\n r['avglumi'] = record['avg']\n if withBX:\n r['bxlumiblob'] = record['bx'] \n yield r \ndef _make_runlsnb_Series(runlsnbarray):\n '''\n Make pandas Series from [[run,ls,nb]] structure\n Input: 2-D structure [[run,ls,nb]]\n Output: pandas Series [(ls,nb), index=runs]\n '''\n runs = runlsnbarray[:,:1]\n lsnb = [tuple(x) for x in runlsnbarray[:,1:3]]\n return pd.Series(lsnb,index=runs.T[0])\n\nif __name__=='__main__':\n tables = ['beam','tcds','hfetlumi']\n #runlsselect = pd.Series([[1340,1345],[1400,1500]], index=[327554,327554]) #pd.Series , index=runnum, value=[[lsmin,lsmax]]\n #runlsselect = pd.Series([[1,10],[14,15]], index=[327560,327560]) #pd.Series , index=runnum, value=[[lsmin,lsmax]]\n runlsselect = None\n\n #filenames = ['/home/zhen/data/7491/7491_327554_1812020507_1812020558.hd5','/home/zhen/data/7491/7491_327559_1812020558_1812020731.hd5','/home/zhen/data/7491/7491_327560_1812020731_1812021237.hd5']\n \n filenames = ['/home/zhen/data/7491/7491_327560_1812020731_1812021237.hd5']\n lumitype = 'BEST'\n filehandles = open_validfiles(filenames,lumitype)\n print(filehandles)\n for result in resultIter(filehandles,lumitype,'best',fillmin=7491,fillmax=7491,runlsselect=runlsselect,beamstatusid=11,withBX=True):\n print(result['fillnum'],result['runnum'],result['lsnum'],result['timestampsec'],result['beamstatusid'],result['cmson'],result['delivered'],result['recorded'])\n lumitype = 'HFOC'\n for result in resultIter(filehandles,lumitype,'raw',fillmin=7491,fillmax=7491,runlsselect=runlsselect,beamstatusid=11,withBX=False):\n print(result['fillnum'],result['runnum'],result['lsnum'],result['timestampsec'],result['beamstatusid'],result['cmson'],result['avglumi'])\n\n [f.close() for f in filehandles] \n \n\n","sub_path":"brilws/fileapi.py","file_name":"fileapi.py","file_ext":"py","file_size_in_byte":15804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"339107310","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# on 20160824\n\n\"\"\" 输入列表 输出列表的元素和位置,传入参数为列表 列表内元素为int型 \"\"\"\n\n__author__ = 'Hedwig'\n\n\ndef rank(list):\n for i in list:\n j = list.index(i) + 1\n yield i, j\n","sub_path":"script/rank.py","file_name":"rank.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"16114727","text":"# -*- coding: utf-8 -*-\nfrom cart.cart import Cart\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.core.urlresolvers import reverse\nfrom django.conf import settings\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.template.loader import render_to_string\nfrom .models import OrderItem, Order\nfrom shop.models import Product\nfrom .forms import OrderCreateForm\nfrom .tasks import OrderCreated\nimport weasyprint\n\n\n@staff_member_required\ndef AdminOrderDetail(request, order_id):\n order = get_object_or_404(Order, id=order_id)\n return render(request, 'admin/orders/order/detail.html', {'order': order})\n\n\n@staff_member_required\ndef AdminOrderPDF(request, order_id):\n order = get_object_or_404(Order, id=order_id)\n html = render_to_string('orders/order/pdf.html', {'order': order})\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'filename=order_{}.pdf'.format(order.id)\n weasyprint.HTML(string=html).write_pdf(response,\n stylesheets=[weasyprint.CSS(settings.STATIC_ROOT + 'css/bootstrap.min.css')])\n return response\n\n\n@staff_member_required\ndef AdminOrderCompete(request, order_id):\n order = get_object_or_404(Order, id=order_id)\n if not order.completed:\n Order.objects.filter(id=order_id).update(completed=True, paid=True)\n\n for item in order.items.all():\n ordered = list(Product.objects.filter(name=item.product).values('ordered'))[0]['ordered'] - item.quantity\n stock = list(Product.objects.filter(name=item.product).values('stock'))[0]['stock'] - item.quantity\n Product.objects.filter(name=item.product).update(ordered=ordered, stock=stock)\n return render(request, 'admin/orders/order/complete.html', {'order': order})\n\n\ndef OrderCreate(request):\n cart = Cart(request)\n if request.method == 'POST':\n form = OrderCreateForm(request.POST)\n if form.is_valid():\n order = form.save()\n for item in cart:\n OrderItem.objects.create(order=order, product=item['product'],\n price=item['price'],\n quantity=item['quantity'])\n item['product'].ordered += item['quantity']\n item['product'].save()\n cart.clear()\n\n # Ассинхронная отправка сообщения\n OrderCreated.delay(order.id)\n request.session['order_id'] = order.id\n return redirect(reverse('payment:process'))\n\n form = OrderCreateForm()\n return render(request, 'orders/order/create.html', {'cart': cart,\n 'form': form})\n","sub_path":"orders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"264864758","text":"blobs = []\n\n\nclass Blob:\n def __init__(self,x,y):\n self.pos = PVector(x,y)\n self.r = float(10)\n self.vel = PVector.random2D()\n self.vel.mult(random(2,5))\n \n def update(self):\n self.pos.add(self.vel)\n \n if self.pos.x>width or self.pos.x <0:\n self.vel.x *= -1\n \n if self.pos.y >height or self.pos.y < 0:\n self.vel.y *= -1\n \n \n def show(self):\n \n ellipse(self.pos.x,self.pos.y,self.r*2,self.r*2)\n\n\n\ndef setup():\n global blobs\n \n size(360,150,P2D)\n frameRate(60)\n colorMode(HSB)\n noFill()\n stroke(0)\n strokeWeight(4)\n \n for i in range(2):\n blobs.append(Blob(random(width),random(height)))\n \ndef draw():\n background(0)\n\n loadPixels()\n for x in range(width):\n for y in range(height):\n index = x + (y*width)\n sum = 0\n for b in blobs:\n d = dist(x,y,b.pos.x,b.pos.y)\n if d > 0:\n col = (100*b.r)/d\n else:\n col = 0\n sum+= col\n \n pixels[index] = color(sum,255,255)\n \n updatePixels()\n \n for b in blobs:\n b.update()\n #b.show()\n \n","sub_path":"MetaBalls/MetaBalls/MetaBalls.pyde","file_name":"MetaBalls.pyde","file_ext":"pyde","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"165977128","text":"import json\nimport sys\nimport time\nfrom multiprocessing import Pool\n\nfrom addok import config, hooks\nfrom addok.helpers import iter_pipe, yielder\nfrom addok.helpers.index import deindex_document, index_document\n\n\ndef run(args):\n if args.filepath:\n for path in args.filepath:\n process_file(path)\n elif not sys.stdin.isatty(): # Any better way to check for stdin?\n process_stdin(sys.stdin)\n\n\n@hooks.register\ndef addok_register_command(subparsers):\n parser = subparsers.add_parser('batch', help='Batch import documents')\n parser.add_argument('filepath', nargs='*',\n help='Path to file to process')\n parser.set_defaults(func=run)\n\n\ndef preprocess_batch(d):\n return list(iter_pipe(d, config.BATCH_PROCESSORS))[0]\n\n\ndef process_file(filepath):\n print('Import from file', filepath)\n config.INDEX_EDGE_NGRAMS = False # Run command \"ngrams\" instead.\n with open(filepath) as f:\n batch(map(preprocess_batch, f))\n\n\ndef process_stdin(stdin):\n print('Import from stdin')\n batch(map(preprocess_batch, stdin))\n\n\n@yielder\ndef to_json(row):\n try:\n return json.loads(row)\n except ValueError:\n return None\n\n\ndef process(doc):\n if doc.get('_action') in ['delete', 'update']:\n deindex_document(doc['id'])\n if doc.get('_action') in ['index', 'update', None]:\n index_document(doc)\n\n\ndef batch(iterable):\n start = time.time()\n pool = Pool()\n count = 0\n chunk = []\n for doc in iterable:\n if not doc:\n continue\n chunk.append(doc)\n count += 1\n if count % 10000 == 0:\n pool.map(process, chunk)\n print(\"Done\", count, time.time() - start)\n chunk = []\n if chunk:\n pool.map(process, chunk)\n pool.close()\n pool.join()\n print('Done', count, 'in', time.time() - start)\n","sub_path":"addok/batch.py","file_name":"batch.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"113755033","text":"import shutil\n\nwith open('Corpus/listing_by_tabs.csv') as f:\n\tlines = f.readlines()\n\ncount = 0\ncount1 = 0\nfor line in lines: \n\tlst = line.split('\\t')\n\tcount += 1\n\tif '大学' in line:\n\t\tprint(lst[0])\n\t\tshutil.copy('Corpus/raw/'+lst[0]+'.txt', 'sorted_by_daxue/raw')\n\t\tshutil.copy('Corpus/pos/'+lst[0]+'.txt', 'sorted_by_daxue/pos')\n\n\n","sub_path":"get_daxue_text.py","file_name":"get_daxue_text.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"479031268","text":"from bottle import get, request, run, response\nimport json\n\nwith open('fixtures/users.json') as f:\n data = json.load(f)\n\n@get(\"/users\")\ndef users():\n return dict(users=data)\n\n@get(\"/users/\")\ndef get_by_username(username):\n for user in data:\n if username in user['username']:\n response.content_type = 'application/json'\n return json.dumps(user)\n\n response.status = 404\n response.content_type = 'application/json'\n return json.dumps({'error': 'User {username} not found!'.format(username=username)})\n \nrun(reloader=True, host='0.0.0.0', port=8081)","sub_path":"user_api.py","file_name":"user_api.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"258986932","text":"#!/usr/bin/env python\n#Author:JiangZhiheng\n\nimport pika\n\ncreds_broker = pika.PlainCredentials('admin','admin')\nconnection = pika.BlockingConnection(pika.ConnectionParameters(\n host='192.168.0.12',credentials=creds_broker))\nchannel = connection.channel()\n\n#声明exchange名,定义exchange类型exchange_type='fanout'\nchannel.exchange_declare(exchange='logs',exchange_type='fanout')\n\n#随机生成一个queue\nresult = channel.queue_declare(exclusive=True)\nqueue_name = result.method.queue\n\n#queue绑定exchange\nchannel.queue_bind(exchange = 'logs',queue = queue_name)\n\nprint(' [*] Waiting for logs. To exit press CTRL+C')\ndef callback(ch, method, properties, body):\n print(\" [x] %r\" % body)\n\nchannel.basic_consume(callback,\n queue=queue_name,\n no_ack=True)\n\nchannel.start_consuming()\n\n\n","sub_path":"Python基础/Week12/RabbitMQ知识/基于Exchange的发布订阅/订阅者.py","file_name":"订阅者.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"462183","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 14 16:48:19 2019\n\n@author: aemdlabs\n\"\"\"\n\nimport os\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import signal\n\nimport h5py\nimport multiprocessing as mp\n\nimport Pyxi.DemodModule as Dem\nimport Pyxi.FileModule as FileMod\n\nimport gc\n\nif __name__ == '__main__':\n \n# plt.close('all')\n \n \n \n debug = False\n #llegir fitxer\n\n# Dictname = \"F:\\Dropbox (ICN2 AEMD - GAB GBIO)\\PyFET\\LuciaScripts\\Lucia\\DataSaved\\VgsSweep_Test4x4_PhaseOpt_PostEth\"\n# Dictname =\"F:\\\\Dropbox (ICN2 AEMD - GAB GBIO)\\\\PyFET\\\\LuciaScripts\\\\Lucia\\\\DCTests\\\\RTest_Normal_VgsSweep_2Row_2Col_VcmToGnd\"\n# Dictname =r\"F:\\Dropbox (ICN2 AEMD - GAB GBIO)\\PyFET\\LuciaScripts\\Lucia\\DCTests\\Transistor\\25_07_2019\\TransistorTest_DC_VgsSweep_8Row_1Col_VcmToVcm_20mV_35kHz_15sec_10sec\"\n Dictname =r\"C:\\Users\\Lucia\\Dropbox (ICN2 AEMD - GAB GBIO)\\PyFET\\LuciaScripts\\Lucia\\DCTests\\Transistor\\29_07_2019\\Test\"\n \n FileName = Dictname +'_0'+'.h5'\n \n hfile = h5py.File(FileName, 'r')\n RGain = 10e3\n FsOut = 5e3\n\n FloatType=True\n \n \n ProcsDict = FileMod.ReadArchivo(Dictname)\n #lectura de parametres\n\n #Calcul de Parametres per a Demodulcio\n nFFT = 2**17\n# DownFact = 1\n\n data = {}\n for k in hfile.keys():\n data[k] = hfile[k].value\n hfile.close()\n#%%\n if debug == True:\n fig, axTemp = plt.subplots()\n fig, axPsd = plt.subplots() \n for dem, DemArgs in ProcsDict.items():\n if FloatType:\n LSB = 1\n else:\n LSB = DemArgs['LSB'][DemArgs['dInd']]\n Iin = ((data[DemArgs['dset']][:, DemArgs['dInd']])*LSB)/DemArgs['Gain']\n \n Lab = str(DemArgs['dset']) +'-'+ str(DemArgs['dInd'])\n print(Lab)\n \n ff, psdadem = signal.welch(Iin, fs=DemArgs['Fs'], nperseg=nFFT, scaling='spectrum') \n axPsd.loglog(ff, psdadem, label=Lab)\n Peaks = signal.find_peaks(psdadem, threshold=1e-17)\n \n for pi in Peaks[0]:\n print(ff[pi], '-->>', np.sqrt(psdadem[pi]))\n axPsd.plot(ff[pi], psdadem[pi], 'k*')\n axTemp.plot(Iin[:int(DemArgs['Samps'])*2], label=Lab) \n \n#%% \n# fig, axTemp = plt.subplots()\n# for dem, DemArgs in ProcsDict.items():\n# Lab = str(DemArgs['dset']) +'-'+ str(DemArgs['dInd'])\n# if FloatType:\n# LSB = 1\n# else:\n# LSB = DemArgs['LSB'][DemArgs['dInd']]\n# \n# Iin = ((data[DemArgs['dset']][:, DemArgs['dInd']])*LSB)/DemArgs['Gain']\n# axTemp.plot(Iin, label=Lab) \n#%% \n Procs = []\n Labs = []\n AcqArgs = []\n DivProcs = 9 \n results = []\n for dem, DemArgs in ProcsDict.items():\n# if DemArgs['dInd'] != 1:\n# continue\n# if DemArgs['col'] == 'Col1':\n# continue\n if DemArgs['Ac'] == 0:\n continue\n if FloatType:\n LSB = 1\n else:\n LSB = DemArgs['LSB'][DemArgs['dInd']]\n Iin = ((data[DemArgs['dset']][:, DemArgs['dInd']])*LSB)/DemArgs['Gain']\n# Iin = ((data[DemArgs['dset']][:, 0])*LSB)/DemArgs['Gain']\n# Iin = ((data[DemArgs['dset']][:, DemArgs['dInd']])*DemArgs['LSB'][DemArgs['dInd']])/DemArgs['Gain']\n\n Lab = str(DemArgs['dset']) +'-'+ str(DemArgs['dInd'])\n print(Lab) \n DownFact = int(DemArgs['Fs']/FsOut)\n args=(Iin, DemArgs['Fs'], DemArgs['Fc'], int(DemArgs['Samps']), DownFact)\n Labs.append(Lab)\n Procs.append(args)\n AcqArgs.append(DemArgs)\n\n if len(Procs) > DivProcs:\n print(len(Procs))\n po = mp.Pool(len(Procs))\n res = po.starmap(Dem.DemodProc, Procs)\n for r in res:\n results.append(r)\n po.close()\n Procs = []\n print('Collect', gc.collect())\n \n if len(Procs) > 0: \n print(len(Procs))\n po = mp.Pool(len(Procs))\n res = po.starmap(Dem.DemodProc, Procs)\n for r in res:\n results.append(r)\n po.close()\n Procs = []\n print('Collect', gc.collect()) \n \n#%%\n# dtype = 'complex128'\n# MaxFileSize = 10000e6\n# SaveBuf = FileMod.FileBuffer(FileName=SaveName,\n# MaxSize=MaxFileSize,\n# nChannels=1,\n# dtype=dtype) \n \n# for ind, (dem, lab, acqargs) in enumerate(zip(results, Labs, AcqArgs)):\n# dsetname = ('R'+str(acqargs['dInd'])+acqargs['col']) \n# print(dsetname)\n# SaveBuf.InitDset(dsetname)\n# dem.resize(len(dem),1)\n# demArray = np.array(dem)\n# SaveBuf.AddSample(demArray)\n# \n# SaveBuf.close()\n#%%\n# plt.close('all')\n DelaySamps = 200\n\n xAxispar = 'Vgs' #modificar segun el AcqArgs para el que se quiera graficar\n\n fig, axR = plt.subplots() \n for ind, (dem, lab, acqargs) in enumerate(zip(results, Labs, AcqArgs)):\n adems = np.abs(dem[DelaySamps:])\n x = np.arange(adems.size)\n ptrend = np.polyfit(x, adems, 1)\n trend = np.polyval(ptrend, x)\n ACarr = (2*ptrend[1])/np.sqrt(2)\n# R= (0.015/np.sqrt(2))/((np.max(np.abs(dem[DelaySamps:]))-np.mean(np.abs(dem[DelaySamps:])))/np.sqrt(2))\n \n R= (acqargs['Ac']/np.sqrt(2))/ACarr\n axR.plot(acqargs[xAxispar], R, '*')\n \n#%%\n\n fig, axres = plt.subplots() \n axres.set_title('Vgs')\n xAxispar = 'Vgs' #modificar segun el AcqArgs para el que se quiera graficar\n OutDict = {}\n DataDict = {}\n# OutDataDict = {}\n \n Trts = set([('R'+str(a['dInd'])+a['col']) for a in AcqArgs])\n Vgs = np.sort(np.unique(([-a['Vgs'] for a in AcqArgs])))\n \n for t in Trts:\n OutDict[t] = np.array([])\n \n for lab in Labs:\n DataDict[lab] = np.array([])\n \n# for t in Vgs:\n# OutDataDict[t] = np.array([])\n \n for ind, (dem, lab, acqargs) in enumerate(zip(results, Labs, AcqArgs)):\n TName = ('R'+str(acqargs['dInd'])+acqargs['col']) \n \n adems = np.abs(dem[DelaySamps:])\n x = np.arange(adems.size)\n ptrend = np.polyfit(x, adems, 1)\n trend = np.polyval(ptrend, x)\n ACarr = (2*ptrend[1])/np.sqrt(2)\n \n OutDict[TName] = np.append(OutDict[TName], ACarr)\n \n DataDict[lab] = np.append(DataDict[lab], (adems-trend)) #no tiene en cuenta Vgs sweep CACA\n# OutDataDict[acqargs[xAxispar]] = np.append(OutDataDict[acqargs[xAxispar]], ACarr)\n# OutVar\n axres.plot(acqargs[xAxispar], ACarr, '*')\n axres.set_xlabel(xAxispar)\n\n\n plt.figure()\n GoodTrt = []\n for k, v in OutDict.items():\n if np.any(v>1e-7):\n GoodTrt.append(k)\n plt.plot(v, label=k)\n print(set(GoodTrt)) \n plt.legend()\n \n \n plt.figure()\n for k in set(GoodTrt):\n plt.plot(Vgs,OutDict[k][:-1], label=k)\n# plt.legend()\n#%%\n dtype = 'float64'\n MaxFileSize = 10000e6\n SaveName = Dictname + '_AcData'+'.h5'\n if os.path.isfile(SaveName):\n print('Remove File')\n os.remove(SaveName)\n \n SaveBuf = FileMod.FileBuffer(FileName=SaveName,\n MaxSize=MaxFileSize,\n nChannels=1,\n dtype=dtype) \n for Name, data in OutDict.items():\n dem = data\n print(Name)\n dsetname = str(Name) \n print(dsetname)\n SaveBuf.InitDset(dsetname)\n dem.resize(len(dem),1)\n demArray = np.array(dem)\n SaveBuf.AddSample(demArray) \n SaveBuf.close()\n \n SaveName = Dictname + '_RowDemodData'+'.h5'\n if os.path.isfile(SaveName):\n print('Remove File')\n os.remove(SaveName)\n \n SaveBuf = FileMod.FileBuffer(FileName=SaveName,\n MaxSize=MaxFileSize,\n nChannels=1,\n dtype=dtype) \n for Name, data in DataDict.items():\n dem = data\n print(Name)\n dsetname = str(Name) \n print(dsetname)\n SaveBuf.InitDset(dsetname)\n dem.resize(len(dem),1)\n demArray = np.array(dem)\n SaveBuf.AddSample(demArray) \n SaveBuf.close()\n","sub_path":"PyFreqMux/Sweeps/VgsSweepDemodInt16.py","file_name":"VgsSweepDemodInt16.py","file_ext":"py","file_size_in_byte":8331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"616021444","text":"\n# usage: (py3 a.py < a.in) > a.out\n\nimport time, sys, inspect\nfrom hopcroftkarp import HopcroftKarp\n\nlineno = lambda: inspect.currentframe().f_back.f_back.f_lineno\nprint = lambda *a, **k: __builtins__.print(str(lineno())+':', *a, file=sys.stderr, **k)\nmap = lambda *a: list(__builtins__.map(*a))\nreversed = lambda *a: list(__builtins__.reversed(*a))\n\n#---------------------------------------------\n\n'''\n\nso we have tuples of numbers\n (a1, b1)\n (a2, b2)\n (a2, b1)\n with left and right numbers non-overlapping\n -> bipartite undirected graph\n\n'longest-path-algorithm?'\n crossing from RTL\n\nuses one unique word => is real!\n we can do an implication chain to find out rite!?\n\nwhat if we have all duplicates?\n real\n (1,2)\n (3,4)\n fake\n (1,4)\n (3,2)\n impossible to find fake\n ((1,4),(3,2) could be orig and others fake)\n => non-constructive task\n\nwait\n paths of len 3 or more imply at least one fake\n (but len 4 doesn't imply 2 fakes)\n\nomg\n maximum matching in bipartite graph!!\n we had this in discrete structures :D\n\n'''\n\ndef run(data):\n\n graph = dict()\n\n # build graph for bipartite maximum-matching\n for a,b in data:\n a = 'A_' + a\n b = 'B_' + b\n\n if a not in graph:\n graph[a] = set([b])\n else:\n graph[a] |= set([b])\n\n # if b not in graph:\n # graph[b] = set([a])\n # else:\n # graph[b] |= set([a])\n\n # number of edges in left / right half of graph\n left = set()\n right = set()\n for a,b in data:\n left |= set([a])\n right |= set([b])\n\n x = HopcroftKarp(graph).maximum_matching()\n num_optimal = round(len(x) / 2)\n num_real_topics = num_optimal + (len(left) - num_optimal) + (len(right) - num_optimal)\n\n return len(data) - num_real_topics\n\n#---------------------------------------------\n\ndef read_case():\n m = int(input())\n return [input().split() for i in range(m)]\n\nfor i in range(int(input())):\n outstr = 'Case #'+str(i+1)+': '+str(run(read_case()))\n print(outstr, ' @ t =', time.clock())\n __builtins__.print(outstr)\n\n\n\n","sub_path":"solutions_5686313294495744_0/Python/xjcl/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"525488952","text":"#!/usr/bin/env python3\nimport sys\nimport os\nfrom Pegasus.DAX3 import *\n\nif len(sys.argv) != 2:\n print(\"Usage: %s PEGASUS_BIN\" % (sys.argv[0]))\n sys.exit(1)\n\n\n# Create a DAX\ndiamond = ADAG(\"diamond\")\n\n# Add some metadata\ndiamond.metadata(\"name\", \"diamond\")\ndiamond.metadata(\"createdby\", \"Karan Vahi\")\n\n# Add input file to the DAX-level replica catalog\na = File(\"f.a\")\na.addPFN(PFN(\"file://\" + os.getcwd() + \"/f.a\", \"local\"))\na.metadata(\"size\", \"1024\")\na.metadata(\"raw_input\", \"true\")\ndiamond.addFile(a)\n\n# Add executables to the DAX-level replica catalog\nkeg = PFN(\"file://\" + sys.argv[1] + \"/pegasus-keg\", \"local\")\ne_preprocess = Executable(namespace=\"diamond\", name=\"preprocess\", version=\"4.0\", os=\"linux\", arch=\"x86_64\", installed=False)\ne_preprocess.metadata(\"size\", \"2048\")\ne_preprocess.metadata(\"transformation\", \"preprocess\")\ne_preprocess.addPFN(keg)\ndiamond.addExecutable(e_preprocess)\n\ne_findrange = Executable(namespace=\"diamond\", name=\"findrange\", version=\"4.0\", os=\"linux\", arch=\"x86_64\", installed=False)\ne_findrange.metadata(\"size\", \"2048\")\ne_findrange.metadata(\"transformation\", \"findrange\")\ne_findrange.addPFN(keg)\ndiamond.addExecutable(e_findrange)\n\ne_analyze = Executable(namespace=\"diamond\", name=\"analyze\", version=\"4.0\", os=\"linux\", arch=\"x86_64\", installed=False)\ne_analyze.metadata(\"size\", \"2048\")\ne_analyze.metadata(\"transformation\", \"analyze\")\ne_analyze.addPFN(keg)\ndiamond.addExecutable(e_analyze)\n\n# Add a preprocess job\npreprocess = Job(e_preprocess)\npreprocess.metadata(\"time\", \"60\")\nb1 = File(\"f.b1\")\nb2 = File(\"f.b2\")\npreprocess.addArguments(\"-a preprocess\",\"-T60\",\"-i\",a,\"-o\",b1, \"-o\",b2)\npreprocess.uses(a, link=Link.INPUT)\npreprocess.uses(b1, link=Link.OUTPUT, transfer=True, register=True)\npreprocess.uses(b2, link=Link.OUTPUT, transfer=True, register=True)\ndiamond.addJob(preprocess)\n\n# Add left Findrange job\nfrl = Job(e_findrange)\nfrl.metadata(\"time\", \"60\")\nc1 = File(\"f.c1\")\nfrl.addArguments(\"-a findrange\",\"-T60\",\"-i\",b1,\"-o\",c1)\nfrl.uses(b1, link=Link.INPUT)\nfrl.uses(c1, link=Link.OUTPUT, transfer=True, register=True)\ndiamond.addJob(frl)\n\n# Add right Findrange job\nfrr = Job(e_findrange)\nfrr.metadata(\"time\", \"60\")\nc2 = File(\"f.c2\")\nfrr.addArguments(\"-a findrange\",\"-T60\",\"-i\",b2,\"-o\",c2)\nfrr.uses(b2, link=Link.INPUT)\nfrr.uses(c2, link=Link.OUTPUT, transfer=True, register=True)\ndiamond.addJob(frr)\n\n# Add Analyze job\nanalyze = Job(e_analyze)\nanalyze.metadata(\"time\", \"60\")\nd = File(\"f.d\")\nanalyze.addArguments(\"-a analyze\",\"-T60\",\"-i\",c1,c2,\"-o\",d)\nanalyze.uses(c1, link=Link.INPUT)\nanalyze.uses(c2, link=Link.INPUT)\nanalyze.uses(d, link=Link.OUTPUT, transfer=True, register=True)\ndiamond.addJob(analyze)\n\n# Add dependencies\ndiamond.depends(parent=preprocess, child=frl)\ndiamond.depends(parent=preprocess, child=frr)\ndiamond.depends(parent=frl, child=analyze)\ndiamond.depends(parent=frr, child=analyze)\n\n# Write the DAX to stdout\nimport sys\ndiamond.writeXML(sys.stdout)\n\n# Write the DAX to a file\nf = open(\"blackdiamond.dax\",\"w\")\ndiamond.writeXML(f)\nf.close()\n","sub_path":"test/core/046-aws-batch-black/blackdiamond.py","file_name":"blackdiamond.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"459856554","text":"import sqlite3\nimport datetime\nimport time\nimport pickle\nfrom sklearn import linear_model as lm\n\nfrom flask import Flask, jsonify, render_template\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n return render_template('index.html')\n\n@app.route('/houses')\ndef houses():\n loaded_model = pickle.load(open(\"model_v2.sav\", 'rb'))\n\n conn = sqlite3.connect('sanfrancisco.db')\n\n c=conn.cursor()\n c.execute(\"SELECT Address, Longitude, Latitude, Sold_Price, Bedroom, Bathroom, Sq_ft, Days_on_Market, Original_Price, URL FROM Address\")\n\n rows=c.fetchall()\n output = []\n\n for row in rows:\n address = row[0]\n longitude = row[1]\n latitude = row[2]\n sold_price = \"{:,}\".format(row[3])\n bedroom = row[4]\n bathroom =row[5]\n sq_ft = row[6] \n original_price = row[8]\n estimate = \"___\"\n estimate_raw = 0\n if sq_ft is None: \n sq_ft = \"---\"\n else:\n if original_price is None:\n estimate = \"___\"\n else:\n estimate_raw = int(loaded_model.predict([[bedroom,bathroom,sq_ft/1000,original_price/1000000]])[0] * 1000000)\n estimate = \"$\" + \"{:,}\".format(estimate_raw)\n original_price = \"{:,}\".format(int(original_price))\n sq_ft = \"{:,}\".format(row[6])\n days_on_market=row[7]\n URL = row[9]\n\n entry = {}\n entry[\"address\"] = address\n entry[\"longitude\"] = longitude\n entry[\"latitude\"] = latitude\n #Info Window\n entry[\"sold_price\"] = sold_price\n entry[\"bedroom\"] = bedroom\n entry[\"bathroom\"] = bathroom\n entry[\"sq_ft\"] = sq_ft\n entry[\"days_on_market\"] = days_on_market\n entry[\"original_price\"] = original_price\n entry[\"url\"] = URL\n entry[\"Vanesstimate\"] = estimate\n entry[\"Vanesstimate_raw\"] = estimate_raw\n\n\n output.append(entry)\n return jsonify(output)\n\n@app.route('/bar')\ndef bar():\n conn = sqlite3.connect('sanfrancisco.db')\n\n c=conn.cursor()\n c.execute(\"SELECT Location, AVG(Sold_Price) AS AVG_Sold_Price FROM Address WHERE Sold_Price>0 GROUP BY Location ORDER BY AVG_Sold_Price DESC\")\n\n rows=c.fetchall()\n output = {}\n\n x=[]\n y=[]\n\n for row in rows:\n Location = row[0]\n sold_price=row[1]\n\n # Ignore \"San francisco\"\n if Location.lower().find(\"san francisco\") == 0:\n continue\n\n x.append(Location)\n y.append(sold_price) \n\n output[\"x\"] = x\n output[\"y\"] = y\n\n return jsonify(output)\n \n# Over asking %\n\n@app.route('/percentage')\ndef percentage():\n conn = sqlite3.connect('sanfrancisco.db')\n\n c=conn.cursor()\n c.execute(\"SELECT Location, SUM(Sold_Price), SUM(Original_Price), COUNT(Location) FROM Address WHERE Original_Price > 0 and Sold_Price>0 GROUP BY Location\")\n\n rows=c.fetchall()\n output = {}\n\n x=[]\n y=[]\n\n for row in rows:\n Location = row[0]\n sold_price=row[1]\n original_price = row[2]\n\n # Ignore \"San francisco\"\n if Location.lower().find(\"san francisco\") == 0:\n continue\n\n x.append(Location)\n avg_percentage = (sold_price - original_price)/original_price\n y.append(avg_percentage * 100) \n\n output[\"x\"] = x\n output[\"y\"] = y\n\n return jsonify(output)\n \n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"312905257","text":"\"\"\"\nThe ``beacon`` package contains code to start a Beacon Rest API.\n\"\"\"\n\n__title__ = 'Beacon v2.0'\n__version__ = VERSION = '2.0'\n__author__ = 'CRG developers'\n__license__ = 'Apache 2.0'\n__copyright__ = 'Beacon 2.0 @ CRG, Barcelona'\n\nimport sys\nif sys.version_info < (3, 7):\n print(\"beacon-python requires python 3.7 or higher\", file=sys.stderr)\n sys.exit(1)\n\n# Send warnings using the package warnings to the logging system\n# The warnings are logged to a logger named 'py.warnings' with a severity of WARNING.\n# See: https://docs.python.org/3/library/logging.html#integration-with-the-warnings-module\nimport logging\nimport warnings\nlogging.captureWarnings(True)\nwarnings.simplefilter(\"default\") # do not ignore Deprecation Warnings\n\n\n\nfrom logging.config import dictConfig\nfrom pathlib import Path\nimport yaml\nimport os\n\ndef load_logger():\n log_file = Path(__file__).parent / \"logger.yml\"\n with open(log_file, 'r') as stream:\n dictConfig(yaml.safe_load(stream))\n\n\n# Try to load the access levels yaml into a dict,\n# from the envvar BEACON_ACCESS_LEVELS if defined,\n# and [here]/access_levels.yml otherwise\ndef load_access_levels():\n filepath = Path(os.getenv('BEACON_ACCESS_LEVELS', Path(__file__).parent / \"access_levels.yml\"))\n if filepath.suffix not in ('.yaml', '.yml'):\n LOG.error(\"Unsupported format for %s\", filepath)\n raise ValueError('Unsupported format for Access Levels')\n with open(filepath, 'r') as stream:\n return yaml.safe_load(stream)\n","sub_path":"beacon/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"107020355","text":"\nimport os\nimport logging\n\nfrom .version import __version__\nfrom .core import *\nfrom .loader import *\n\n\nLOG = logging.getLogger(\"pulse\")\nLOG.level = logging.DEBUG\n\nBUILTIN_ACTIONS_LOADED = False\n\ndef loadActionsFromDirectory(startDir):\n \"\"\"\n Search for and load BuildActions from the given directory,\n then register them for use.\n\n Args:\n startDir: A str path of the directory to search\n \"\"\"\n loader = BuildActionLoader()\n for config, action in loader.loadActionsFromDirectory(startDir):\n registerAction(config, action)\n\n\ndef loadBuiltinActions():\n \"\"\"\n Load all built-in pulse actions.\n \"\"\"\n global BUILTIN_ACTIONS_LOADED\n if not BUILTIN_ACTIONS_LOADED:\n actionsDir = os.path.join(os.path.dirname(__file__), 'actions')\n loadActionsFromDirectory(actionsDir)\n BUILTIN_ACTIONS_LOADED = True\n","sub_path":"src/pulse/scripts/pulse/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"401291711","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 16 12:14:19 2019\n\n@author: tanvirkaur\n\"\"\"\n\nclass Solution(object):\n def isValidBST(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: bool\n \"\"\"\n stack = []\n tmp = []\n while root or stack:\n while root:\n stack.append(root)\n root = root.left\n node = stack.pop()\n tmp.append(node.val)\n if node.right:\n root = node.right\n for i in range(len(tmp)-1):\n if tmp[i+1] <= tmp[i]:\n return False\n return True","sub_path":"ValidateBST2.py","file_name":"ValidateBST2.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"15434259","text":"# -*- coding: utf-8 -*-\n\n# Created by: PyQt5 UI code generator 5.11.3\n\nfrom PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import QDialog\n\nfrom game_components.lobby import Lobby, GameState\n\n\nclass NewGameDlg(QDialog):\n def __init__(self, parent=None):\n super(NewGameDlg, self).__init__(parent)\n self.ui = Ui_Dialog()\n self.ui.setupUi(self)\n self.setWindowIcon(QIcon('icons\\icon.png'))\n self.connect_actions()\n\n self.lobby = None\n self.player_name = ''\n\n def connect_actions(self):\n self.ui.cb_infinite.clicked.connect(self.cb_infinite_clicked)\n\n def cb_infinite_clicked(self):\n if self.sender().isChecked():\n self.ui.sb_num_turns.setEnabled(False)\n else:\n self.ui.sb_num_turns.setEnabled(True)\n\n def submit(self):\n self.player_name = self.ui.player_name.text()\n game_name = self.ui.game_name.text()\n if game_name == '':\n game_name = 'Game of ' + self.player_name\n if self.ui.cb_infinite.isChecked():\n num_turns = -1\n else:\n num_turns = self.ui.sb_num_turns.value()\n num_players = self.ui.sb_num_players.value()\n self.lobby = Lobby(game_name, num_players, num_turns, GameState.INIT.value)\n self.accept()\n\n# Created by: PyQt5 UI code generator 5.11.3\n\nclass Ui_Dialog(object):\n def setupUi(self, Dialog):\n Dialog.setObjectName(\"Dialog\")\n Dialog.setWindowModality(QtCore.Qt.ApplicationModal)\n Dialog.setFixedSize(QtCore.QSize(229, 248))\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(Dialog.sizePolicy().hasHeightForWidth())\n Dialog.setSizePolicy(sizePolicy)\n Dialog.setAutoFillBackground(False)\n Dialog.setSizeGripEnabled(False)\n Dialog.setModal(True)\n self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)\n self.buttonBox.setGeometry(QtCore.QRect(-10, 210, 231, 32))\n self.buttonBox.setOrientation(QtCore.Qt.Horizontal)\n self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)\n self.buttonBox.setObjectName(\"buttonBox\")\n self.groupBox = QtWidgets.QGroupBox(Dialog)\n self.groupBox.setGeometry(QtCore.QRect(10, 10, 211, 191))\n self.groupBox.setObjectName(\"groupBox\")\n self.player_name = QtWidgets.QLineEdit(self.groupBox)\n self.player_name.setGeometry(QtCore.QRect(90, 30, 113, 20))\n self.player_name.setObjectName(\"player_name\")\n self.label = QtWidgets.QLabel(self.groupBox)\n self.label.setGeometry(QtCore.QRect(10, 30, 71, 21))\n self.label.setObjectName(\"label\")\n self.label_2 = QtWidgets.QLabel(self.groupBox)\n self.label_2.setGeometry(QtCore.QRect(10, 60, 71, 21))\n self.label_2.setObjectName(\"label_2\")\n self.game_name = QtWidgets.QLineEdit(self.groupBox)\n self.game_name.setGeometry(QtCore.QRect(90, 60, 113, 20))\n self.game_name.setObjectName(\"game_name\")\n self.sb_num_turns = QtWidgets.QSpinBox(self.groupBox)\n self.sb_num_turns.setEnabled(False)\n self.sb_num_turns.setGeometry(QtCore.QRect(110, 100, 42, 22))\n self.sb_num_turns.setObjectName(\"sb_num_turns\")\n self.label_3 = QtWidgets.QLabel(self.groupBox)\n self.label_3.setGeometry(QtCore.QRect(10, 100, 91, 21))\n self.label_3.setObjectName(\"label_3\")\n self.cb_infinite = QtWidgets.QCheckBox(self.groupBox)\n self.cb_infinite.setGeometry(QtCore.QRect(110, 130, 70, 17))\n self.cb_infinite.setChecked(True)\n self.cb_infinite.setObjectName(\"cb_infinite\")\n self.sb_num_players = QtWidgets.QSpinBox(self.groupBox)\n self.sb_num_players.setEnabled(True)\n self.sb_num_players.setGeometry(QtCore.QRect(110, 160, 42, 22))\n self.sb_num_players.setProperty(\"value\", 1)\n self.sb_num_players.setObjectName(\"sb_num_players\")\n self.label_4 = QtWidgets.QLabel(self.groupBox)\n self.label_4.setGeometry(QtCore.QRect(10, 160, 91, 21))\n self.label_4.setObjectName(\"label_4\")\n\n self.retranslateUi(Dialog)\n self.buttonBox.accepted.connect(Dialog.submit)\n self.buttonBox.rejected.connect(Dialog.reject)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n\n def retranslateUi(self, Dialog):\n _translate = QtCore.QCoreApplication.translate\n Dialog.setWindowTitle(_translate(\"Dialog\", \"New Game\"))\n self.groupBox.setTitle(_translate(\"Dialog\", \"Game Parameters\"))\n self.label.setText(_translate(\"Dialog\", \"Player\\'s name:\"))\n self.label_2.setText(_translate(\"Dialog\", \"Game\\'s name:\"))\n self.label_3.setText(_translate(\"Dialog\", \"Number of turns:\"))\n self.cb_infinite.setText(_translate(\"Dialog\", \"infinite\"))\n self.label_4.setText(_translate(\"Dialog\", \"Number of players:\"))\n\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n Dialog = NewGameDlg()\n\n Dialog.show()\n sys.exit(app.exec_())\n\n","sub_path":"source/graphics/new_game_dlg.py","file_name":"new_game_dlg.py","file_ext":"py","file_size_in_byte":5214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"401290648","text":"import logging\nimport os\nimport torch\nfrom torchsummary import summary\nfrom src.runners.audio_runner import AudioRunner\nfrom src.experiments.base_experiment import BaseExperiment\nfrom src.runners.run_parameters import RunParameters\nfrom src.datasets.diskds.disk_storage import RandomSubsampleWindowGenerationStrategy\nfrom src.datasets.diskds.disk_dataset import DiskDataset\nfrom src.datasets.diskds.single_file_disk_storage import SingleFileDiskStorage\nfrom src.models.res_net_akamaster_audio import *\nfrom src.models.working_model_loader import *\nfrom src.datasets.diskds.sox_transforms import FileLoadingSoxEffects\nfrom src.runners.run_parameter_keys import R\nfrom torch.utils.data import DataLoader\nimport numpy as np\nimport logging\nfrom tqdm import tqdm\nfrom src.runners.spectrogram.spectrogram_generator import SpectrogramGenerator\nimport matplotlib.pyplot as plt\nimport librosa\nimport torchaudio\nfrom src.util.mutagen_utils import read_mp3_metadata\nfrom src.datasets.diskds.memory_file_storage import MemoryFileDiskStorage\nfrom src.config import Config\nimport matplotlib.cm\n\ncmap = matplotlib.cm.get_cmap('plasma')\n\nlog = logging.getLogger(__name__)\n\n\nclass MonteSingleFileTester(BaseExperiment):\n\n def get_experiment_default_parameters(self):\n return {\n R.DATASET_NAME: 'disk-ds(/media/andrius/FastBoi/bakis_data/final22k/train)',\n # R.DATASET_NAME: 'disk-ds(/home/andrius/git/searchify/resampled_music)',\n R.DISKDS_NUM_FILES: '9500',\n R.BATCH_SIZE_TRAIN: '75',\n R.CLUSTERING_MODEL: 'mass',\n R.MODEL_SAVE_PATH: 'zoo/9500massv3',\n R.EPOCHS: '4',\n R.BATCH_SIZE_VALIDATION: '150',\n R.DISKDS_WINDOW_HOP_TRAIN: str((2**17)),\n R.DISKDS_WINDOW_LENGTH: str((2**17)),\n R.DISKDS_TRAIN_FEATURES: 'data,onehot',\n R.DISKDS_USE_SOX_RANDOM_PRE_SAMPLING_TRAIN: 'False',\n }\n\n def generate_full_file_spectrogram(self, filepath: str, config: Config, ax):\n print(\"Generating full spectrogram\")\n scale_hops = 10\n spectrogram_t_viz = torchaudio.transforms.Spectrogram(\n n_fft=2048*scale_hops, win_length=2048*scale_hops, hop_length=1024*scale_hops, power=None\n ).to(config.run_device) # generates a complex spectrogram\n mel_t_viz = torchaudio.transforms.MelScale(n_mels=256, sample_rate=config.sample_rate).to(config.run_device)\n norm_t = torchaudio.transforms.ComplexNorm(power=2).to(config.run_device)\n ampToDb_t = torchaudio.transforms.AmplitudeToDB().to(config.run_device)\n\n full_samples, sample_rate = torchaudio.load(filepath)\n full_samples = full_samples.to(\"cpu\")\n full_samples, sample_rate = FileLoadingSoxEffects(sample_rate, config.sample_rate, False).forward(full_samples)\n print(f\"Full samsples shape: {full_samples.shape}\")\n full_samples = full_samples.to(config.run_device)\n spectrogram = spectrogram_t_viz(full_samples.view(1, -1))\n spectrogram = norm_t(spectrogram)\n spectrogram = mel_t_viz(spectrogram)\n spectrogram = ampToDb_t(spectrogram)\n spectrogram = spectrogram.cpu().detach().numpy()[0]\n print(f\"Spectrogram shape: {spectrogram.shape}\")\n librosa.display.specshow(spectrogram, sr=config.sample_rate, hop_length=1024*scale_hops,\n x_axis='time', y_axis='mel', ax=ax, cmap=cmap)\n\n def run(self):\n log = logging.getLogger(__name__)\n run_params = super().get_run_params()\n config = Config()\n model_save_path = run_params.get(R.MODEL_SAVE_PATH)\n model, files = load_working_model(run_params, model_save_path)\n\n # loading oneshot model\n # model = model.to(\"cpu\")\n # centroids = model.classification[-1].centroids.data\n # new_centroids = torch.cat((centroids.to(\"cpu\"), torch.zeros((1, centroids.shape[-1]))))\n # masses = model.classification[-1].cluster_mass.data\n # masses = torch.cat((masses, torch.tensor(0.02877584192901845).view(-1)))\n\n # model.classification[-1].centroids = torch.nn.Parameter(torch.zeros(new_centroids.shape[-2],centroids.shape[-1]))\n # model.classification[-1].cluster_mass = torch.nn.Parameter(torch.zeros(masses.shape))\n # model.load_state_dict(torch.load(\"temp_oneshot.pth\"))\n # model = model.to(config.run_device)\n\n target_file = \"/media/andrius/FastBoi/test_samples/The Strokes - Someday (FULL).mp3\"\n \n # target_file = \"/media/andrius/FastBoi/bakis_data/final22k/train/Adele - Hello.mp3\"\n # target_file = \"/media/andrius/FastBoi/bakis_data/final22k/train/New Order - Blue Monday.mp3\"\n if not os.path.isfile(target_file):\n raise RuntimeError(f\"Requested file not found at: {target_file}\")\n\n file_info = torchaudio.backend.sox_io_backend.info(target_file)\n w_len = 2**17\n topn = 10\n generation_strategy = RandomSubsampleWindowGenerationStrategy(window_len=w_len, average_hop=int((w_len)*0.01), overread=2.5)\n base_dataset = DiskDataset(\n target_file,\n file_limit=0,\n features=[\"data\", \"onehot\"],\n formats=[\".mp3\"],\n window_generation_strategy=generation_strategy,\n storage_type=SingleFileDiskStorage,\n sox_effects=FileLoadingSoxEffects(initial_sample_rate=file_info.sample_rate, final_sample_rate=config.sample_rate, random_pre_resampling=False)\n )\n # base_dataset = MemoryFileDiskStorage(\n # target_file,\n # format=\"mp3\",\n # run_params=run_params,\n # features=[\"data\"],\n # window_generation_strategy=generation_strategy\n # )\n loader = DataLoader(base_dataset, shuffle=False, batch_size=64, num_workers=6)\n num_batches = len(loader)\n print(f\"Files: {len(files)}\")\n file_duration = int(file_info.num_frames/file_info.sample_rate)\n spectrogram_generator = SpectrogramGenerator(config)\n predicted_total = 0\n predicted_correctly = 0\n predicted_correctly_topk = 0\n top1_buckets = [[0, 0] for _ in range(file_duration)]\n topk_buckets = [[0, 0] for _ in range(file_duration)]\n num_epochs = int(run_params.get(R.EPOCHS))\n for i in range(num_epochs):\n pbar = tqdm(enumerate(loader), total=len(loader), leave=True)\n model.to(config.run_device)\n model.train(mode=False)\n for i, data in pbar:\n xb = data[\"samples\"]\n start_times = data[\"window_start\"]\n xb = xb.to(config.run_device)\n yb = data[\"onehot\"]\n yb = yb.to(config.run_device)\n spectrogram = spectrogram_generator.generate_spectrogram(\n xb, narrow_to=128,\n timestretch=False, random_highpass=False,\n random_bandcut=False, normalize_mag=True\n )\n outputs = model(spectrogram).detach()\n output_cat = outputs\n # yb = torch.full((outputs.shape[-2],), 9500).to(config.run_device)\n\n top_cats = output_cat.topk(topn)\n target_expanded = yb.view((-1,1)).expand_as(top_cats.indices).detach()\n topk_correct = target_expanded.eq(top_cats.indices)\n predicted_correctly_topk += topk_correct.sum().item()\n correct_topk = topk_correct.sum(dim=-1)\n output_cat = torch.argmax(outputs, dim=1)\n # print(f\"Predicted category:{output_cat}\")\n target = yb.detach().view(-1)\n correct = target.eq(output_cat).detach()\n correct_predictions_in_batch = correct.sum().item()\n predicted_total += len(target)\n predicted_correctly += correct_predictions_in_batch\n pbar.set_description(\n f\"running validation accuracy: TOP-1:{predicted_correctly/predicted_total:.3%}, TOP-{topn}: {predicted_correctly_topk/predicted_total:.3%}\")\n for bi in range(len(target)):\n # iterate over batch and add to the counters:\n bi_correct = correct[bi].item()\n bi_start_time = start_times.detach()[bi]\n start_time_i = int(bi_start_time/file_info.sample_rate)\n if(bi_correct):\n # print(f\"Correct prediction at time: {start_time_i}\")\n top1_buckets[start_time_i][0] = top1_buckets[start_time_i][0] + 1\n top1_buckets[start_time_i][1] = top1_buckets[start_time_i][1] + 1\n\n bi_topk_correct = correct_topk[bi]\n if(bi_topk_correct):\n topk_buckets[start_time_i][0] = topk_buckets[start_time_i][0] + 1\n topk_buckets[start_time_i][1] = topk_buckets[start_time_i][1] + 1\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n self.generate_full_file_spectrogram(target_file, config, ax)\n lim_bottom_y, lim_top_y = ax.get_ylim()\n ax2 = ax.twinx()\n ax2.plot(range(file_duration), list([0 if x[1] == 0 else (x[0]/x[1])*100 for x in top1_buckets]), 'g', linewidth=3, label=\"top-1\")\n ax2.plot(range(file_duration), list([0 if x[1] == 0 else (x[0]/x[1])*100 for x in topk_buckets]), 'm', linewidth=3, label=f\"top-{topn}\")\n plt.legend(loc=\"upper left\")\n ax2.set_ylabel('Accuracy')\n metadata = read_mp3_metadata(target_file)\n display_name = metadata[\"artist\"] + \" - \" + metadata[\"title\"]\n fig.suptitle(f'Accuracy for \\\"{display_name}\\\"', fontsize=16)\n plt.show()\n\n @staticmethod\n def help_str():\n return \"\"\"Runs a monte-carlo style experiment checking on which parts of a file are identified by the model\"\"\"\n","sub_path":"src/experiments/monte_single_file_tester.py","file_name":"monte_single_file_tester.py","file_ext":"py","file_size_in_byte":9885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"255395770","text":"# Data preprocessing\n# 1. Add header to data\n# 2. Remove unknown: sed '/\\?/d' adult.data > adult_known.data\n# 3. Remove tailing . in \"adult.test\": sed 's/.$//' adult_known.test > adult_known.test2\n\nimport sklearn\nimport sklearn.tree\nimport sklearn.ensemble\nimport sklearn.metrics\nimport pandas as pd\nimport numpy as np\nimport scipy\nimport scipy.optimize\nimport pdb\nimport tensorflow as tf\n\n# because we need to encode categorical feature, have to concate dataframe and then split\ndf_train_raw = pd.read_csv('adult_known.data', sep=', ', engine='python')\ndf_test_raw = pd.read_csv('adult_known.test', sep=', ', engine='python')\n\nn_train = len(df_train_raw)\nn_test = len(df_test_raw)\ndf_raw = pd.concat([df_train_raw, df_test_raw])\ndf = pd.get_dummies(df_raw, columns=['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country', 'Y'])\n\n# binrary feature will be mapped to two categories. Remove one of them.\ndf = df.drop(columns=['sex_Male', 'Y_<=50K'])\nX = df.drop(columns=['Y_>50K'])\ngroup_label = X['sex_Female']\n\nscaler = sklearn.preprocessing.StandardScaler()\nX_scaled = scaler.fit_transform(X)\nY = df['Y_>50K']\n\nX_train = X_scaled[:n_train]\nX_test = X_scaled[n_train:]\nY_train = Y.iloc[:n_train]\nY_test = Y.iloc[n_train:]\n\nindex_male_train = np.where(group_label[:n_train] == 0)[0].astype(np.int32)\nindex_female_train = np.where(group_label[:n_train] == 1)[0].astype(np.int32)\nindex_male_true_train = np.where(np.logical_and(group_label[:n_train] == 0, Y_train==1))[0].astype(np.int32)\nindex_male_false_train = np.where(np.logical_and(group_label[:n_train] == 0, Y_train==0))[0].astype(np.int32)\nindex_female_true_train = np.where(np.logical_and(group_label[:n_train] == 1, Y_train==1))[0].astype(np.int32)\nindex_female_false_train = np.where(np.logical_and(group_label[:n_train] == 1, Y_train==0))[0].astype(np.int32)\n\nindex_male_test = np.where(group_label[n_train:] == 0)[0].astype(np.int32)\nindex_female_test = np.where(group_label[n_train:] == 1)[0].astype(np.int32)\nindex_male_true_test = np.where(np.logical_and(group_label[n_train:] == 0, Y_test==1))[0].astype(np.int32)\nindex_male_false_test = np.where(np.logical_and(group_label[n_train:] == 0, Y_test==0))[0].astype(np.int32)\nindex_female_true_test = np.where(np.logical_and(group_label[n_train:] == 1, Y_test==1))[0].astype(np.int32)\nindex_female_false_test = np.where(np.logical_and(group_label[n_train:] == 1, Y_test==0))[0].astype(np.int32)\n\n# put Y into one hot label\nY_train = np.stack([1-Y_train, Y_train]).T\nY_test = np.stack([1-Y_test, Y_test]).T\n\nDIM_INPUT = X_train.shape[1]\nDIM_HIDDEN = 256\nDIM_OUTPUT = 2\n\nX_placeholder = tf.placeholder(tf.float32, [None, DIM_INPUT])\nY_placeholder = tf.placeholder(tf.float32, [None, DIM_OUTPUT])\nindex_male_placeholder = tf.placeholder(tf.int32, [None])\nindex_female_placeholder = tf.placeholder(tf.int32, [None])\nindex_male_true_placeholder = tf.placeholder(tf.int32, [None])\nindex_male_false_placeholder = tf.placeholder(tf.int32, [None])\nindex_female_true_placeholder = tf.placeholder(tf.int32, [None])\nindex_female_false_placeholder = tf.placeholder(tf.int32, [None])\n\n# w is the importance of female\n# use clip instead of sigmoid to avoid saturation. Make training on w faster\n# but it have a problem: once w go out side [0, 1], it will lost graident and couldn't go back\nraw_w = tf.Variable(0.5, name='w')\nw = tf.clip_by_value(raw_w, 0, 1)\n\n# alpha: importance of imparity loss\n# beta: importance of imparity loss + outcome loss\nalpha = 0.5\nbeta = 0.5\n\nL1_output = tf.layers.dense(X_placeholder, DIM_HIDDEN, activation=tf.nn.tanh)\noutput = tf.layers.dense(L1_output, DIM_OUTPUT, activation=None)\n\nprob = tf.nn.softmax(output)\nprob_male = tf.nn.embedding_lookup(prob, index_male_placeholder)\nprob_female = tf.nn.embedding_lookup(prob, index_female_placeholder)\nprob_male_true = tf.nn.embedding_lookup(prob, index_male_true_placeholder)\nprob_male_false = tf.nn.embedding_lookup(prob, index_male_false_placeholder)\nprob_female_true = tf.nn.embedding_lookup(prob, index_female_true_placeholder)\nprob_female_false = tf.nn.embedding_lookup(prob, index_female_false_placeholder)\n\nloss_imparity = tf.math.squared_difference(w*tf.reduce_mean(prob_female[:, 1]), (1-w)*tf.reduce_mean(prob_male[:, 1])) \\\n\t\t\t + tf.math.squared_difference(w*tf.reduce_mean(prob_female[:, 0]), (1-w)*tf.reduce_mean(prob_male[:, 0]))\n\nloss_outcome = -w*(tf.reduce_mean(prob_female_true[:, 1] + tf.reduce_mean(prob_female_false[:, 0]))) \\\n\t\t\t - (1-w)*(tf.reduce_mean(prob_male_true[:, 1] + tf.reduce_mean(prob_male_false[:, 0]))) \\\n\npred = tf.math.argmax(prob, axis=1)\ndiff = tf.to_float(pred) - Y_placeholder[:, 1]\naccuracy = 1 - tf.math.reduce_mean(tf.math.abs(diff))\nloss_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=Y_placeholder, logits=output))\nloss_total = beta * (alpha * loss_imparity + (1-alpha) * loss_outcome) + (1-beta)*loss_entropy\n\n# remove regulization on w\nvariables = [v for v in tf.trainable_variables() if v != raw_w]\nfor v in variables:\n\tloss_total += 1e-4 * tf.nn.l2_loss(v) + 1e-6 * tf.losses.absolute_difference(v, tf.zeros(tf.shape(v)))\n\nlr = tf.Variable(0.01, name='lr', trainable=False)\nlr_decay_op = lr.assign(lr * 0.95)\noptimizer = tf.train.AdamOptimizer(lr)\ntrain_op = optimizer.minimize(loss_total)\n\nwith tf.Session() as sess:\n\tsess.run(tf.global_variables_initializer())\n\n\twait = 0\n\tsmallest_loss_total_train = float('inf')\n\tpatience_lr_decay = 5\n\n\tfor epoch in range(1000):\n\t\tw_train, loss_total_train, loss_entropy_train, accuracy_train, loss_imparity_train, loss_outcome_train, train_step = sess.run(\n\t\t\t[w, loss_total, loss_entropy, accuracy, loss_imparity, loss_outcome, train_op],\n\t\t\t\tfeed_dict={\n\t\t\t\t\tX_placeholder: X_train,\n\t\t\t\t\tY_placeholder: Y_train,\n\t\t\t\t\tindex_male_placeholder: index_male_train,\n\t\t\t\t\tindex_female_placeholder: index_female_train,\n\t\t\t\t\tindex_male_true_placeholder: index_male_true_train,\n\t\t\t\t\tindex_male_false_placeholder: index_male_false_train,\n\t\t\t\t\tindex_female_true_placeholder: index_female_true_train,\n\t\t\t\t\tindex_female_false_placeholder: index_female_false_train,\n\t\t\t\t}\n\t\t)\n\n\t\tloss_total_test, loss_entropy_test, accuracy_test, loss_imparity_test, loss_outcome_test = sess.run(\n\t\t\t[loss_total, loss_entropy, accuracy, loss_imparity, loss_outcome],\n\t\t\t\tfeed_dict={\n\t\t\t\t\tX_placeholder: X_test,\n\t\t\t\t\tY_placeholder: Y_test,\n\t\t\t\t\tindex_male_placeholder: index_male_test,\n\t\t\t\t\tindex_female_placeholder: index_female_test,\n\t\t\t\t\tindex_male_true_placeholder: index_male_true_test,\n\t\t\t\t\tindex_male_false_placeholder: index_male_false_test,\n\t\t\t\t\tindex_female_true_placeholder: index_female_true_test,\n\t\t\t\t\tindex_female_false_placeholder: index_female_false_test,\n\t\t\t\t}\n\t\t)\n\n\t\tif loss_total_train <= smallest_loss_total_train:\n\t\t\tsmallest_loss_total_train = loss_total_train\n\t\t\twait = 0\n\t\t\tprint('New smallest')\n\t\telse:\n\t\t\twait += 1\n\t\t\tprint('Wait {}'.format(wait))\n\t\t\tif wait % patience_lr_decay == 0:\n\t\t\t\tsess.run(lr_decay_op)\n\t\t\t\tprint('Apply lr decay, new lr: %f' % lr.eval())\n\n\t\tprint(f'Epoch: {epoch}, W: {w_train}\\ntotal_train: {loss_total_train}, entropy_train: {loss_entropy_train}, accuracy_train: {accuracy_train}, imparity_train: {loss_imparity_train}, outcome_train: {loss_outcome_train}\\ntotal_test : {loss_total_test}, entropy_test : {loss_entropy_test}, accuracy_test : {accuracy_test}, imparity_test : {loss_imparity_test}, outcome_test : {loss_outcome_test}\\n')\n\n\tpdb.set_trace()\n\tprint('Pause before exit')\n","sub_path":"main_mlp.py","file_name":"main_mlp.py","file_ext":"py","file_size_in_byte":7446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"169526857","text":"from pydub import AudioSegment\nimport numpy as np\nfrom keras.layers import Input, Dense\nfrom keras.models import Model\nfrom keras import regularizers\nfrom sklearn.preprocessing import MinMaxScaler\nimport os\nimport sys\n\nsonglen = 2500000\n# Create an empty np array of size n where n is the total num of songs\nsong_list = os.listdir(\"TestSongs\")\nnum_songs = len(song_list)\nsong_data = np.zeros(shape=(num_songs,songlen))\ncount = 0\n# Get the samples from each song\nfor fname in os.listdir(\"TestSongs/\"):\n song = AudioSegment.from_wav(\"TestSongs/\"+fname)\n samples = np.array(song.get_array_of_samples())\n samples = samples[:songlen]\n song_data[count] = np.array(samples)\n count = count + 1\n\nencoding_dim = 2\n\ninput_song = Input(shape=(songlen,))\nencoded = Dense(encoding_dim, activation='relu',\n activity_regularizer=regularizers.l1(10e-9))(input_song)\ndecoded = Dense(songlen, activation='sigmoid')(encoded)\n# Map input to its reconstruction\nautoencoder = Model(input_song, decoded)\n\n# Map input to its encoded representation\nencoder = Model(input_song, encoded)\n\nencoded_input = Input(shape=(encoding_dim,))\ndecoder_layer = autoencoder.layers[-1]\ndecoder = Model(encoded_input, decoder_layer(encoded_input))\n\nautoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')\n\nx_train = song_data\nx_test = song_data\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\n#print(x_train.dtype)\nscaler = MinMaxScaler(feature_range=(0,1))\nscaler.fit(x_train)\nx_train = scaler.transform(x_train)\nx_test = scaler.transform(x_test)\n#print(np.min(x_train))\n#print(np.max(x_train))\n\n#print(x_train.shape)\n#print(x_test.shape)\n\n# Training the data for e epochs\nautoencoder.fit(x_train, x_train,\n epochs=int(sys.argv[1]),\n batch_size=2,\n shuffle=True,\n validation_data=(x_test, x_test))\n\nencoded_songs = encoder.predict(x_test)\ndecoded_songs = decoder.predict(encoded_songs)\n\nprint(encoded_songs)\nprint(decoded_songs)\n\nnp.save(str(num_songs)+'_sparse_encoded_songs_'+sys.argv[1]+'_epochs',encoded_songs)\n\n\nimport matplotlib.pyplot as plt\nencoded_songs = scaler.fit_transform(encoded_songs)\nprint(encoded_songs)\ncount = 0\nplt.figure()\nfor fname in os.listdir(\"TestSongs/\"):\n print(encoded_songs[count][0])\n print(encoded_songs[count][1])\n plt.scatter(encoded_songs[count][0], encoded_songs[count][1], s=700,\n c=(int(encoded_songs[count][1]/10.0),0,int(1-encoded_songs[count][1]/10.0)),\n marker=r\"$ {} $\".format(fname[:4]), edgecolors='none')\n count += 1\nplt.show()\n","sub_path":"ae-sparse.py","file_name":"ae-sparse.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"525878971","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef get_colormaps(palette, min_val, max_val):\n from matplotlib.colors import BoundaryNorm\n # define the colormap\n cmap = plt.get_cmap(palette)\n\n # extract all colors from the Reds map\n cmaplist = [cmap(i) for i in range(cmap.N)]\n cmap = cmap.from_list('Custom cmap', cmaplist, cmap.N)\n\n # define the bins and normalize and forcing 0 to be part of the colorbar!\n bounds = np.arange(min_val, max_val, 1)\n idx=np.searchsorted(bounds, 0)\n bounds=np.insert(bounds, idx, 0)\n norm = BoundaryNorm(bounds, cmap.N)\n return cmap, norm\n\n\ndef rewrite_array_as_list_for_plotting(array):\n y = []\n x = []\n for item in array:\n y.extend(list(item))\n y.append(None)\n x.extend(list(range(len(item))))\n x.append(None)\n return x, y\n","sub_path":"plotting/generic_plotting_functions.py","file_name":"generic_plotting_functions.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"12890461","text":"##################################################################################\n# FLASK TEST CODE\n##################################################################################\nfrom flask import Flask, url_for\nfrom menu_scraper import *\n\napp = Flask(__name__)\n\n@app.route('/')\ndef api_root():\n return 'Welcome'\n\n\nfrom flask import Response\n\n@app.route('/hello', methods = ['GET'])\ndef api_hello():\n import json\n data = {\n 'hello' : 'world',\n 'number' : 3\n }\n js = json.dumps(data)\n\n resp = Response(js, status=200, mimetype='application/json')\n resp.headers['Link'] = 'http://luisrei.com'\n\n return resp\n\n\nfrom flask import escape, Response\n\n@app.route('/date/', methods = ['GET'])\ndef api_date(date):\n #date = request.args.get('date')\n\n if date != '':\n ms = MenuScraper()\n y = int(date[:4])\n m = int(date[4:6])\n d = int(date[6:8])\n date_string = \"Month: \" + m + \" Day: \" + d + \" Year: \" + y\n print(date_string)\n return Response(ms.return_menu_json(), mimetype=\"application/json\")\n else:\n return Response(\"Invalid Date\", status=404)\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"python/test-flask.py","file_name":"test-flask.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"231037517","text":"'''\nCopyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n\nNVIDIA CORPORATION and its licensors retain all intellectual property\nand proprietary rights in and to this software, related documentation\nand any modifications thereto. Any use, reproduction, disclosure or\ndistribution of this software and related documentation without an express\nlicense agreement from NVIDIA CORPORATION is strictly prohibited.\n'''\nfrom engine.pyalice import Application\nimport argparse\n\nif __name__ == '__main__':\n # Parse arguments\n parser = argparse.ArgumentParser(description=\"Navsim navigation app\")\n parser.add_argument(\n \"--map_json\",\n help=\"The path to the map json to load\",\n default=\"apps/assets/maps/virtual_small_warehouse.json\")\n parser.add_argument(\n \"--robot_json\",\n help=\"The path to the robot json to load\",\n default=\"packages/navsim/robots/carter.json\")\n parser.add_argument(\n \"--more\",\n help=\"A comma separated list of additional json files to load\")\n parser.add_argument(\n \"--mission_robot_name\",\n help=\"Accept missions from the remote mission server for the robot with the given name\")\n parser.add_argument(\n \"--mission_host\",\n help=\"The ip address or hostname of the host to connect to and receive missions from\",\n default=\"localhost\")\n parser.add_argument(\n \"--mission_port\",\n help=\"The TCP port to connect to the mission server\",\n type=int,\n default=9998)\n args = parser.parse_args()\n\n # Create and start the app\n more_jsons = args.map_json + \",\" + args.robot_json\n if args.more:\n more_jsons += \",\" + args.more\n app_path = \"apps/navsim/navsim_navigate.app.json\"\n app = Application(app_filename=app_path, more_jsons=more_jsons)\n\n if args.mission_robot_name:\n # Load the mission subgraph and set the config based on the input parameters\n app.load(\n \"packages/behavior_tree/apps/missions.graph.json\")\n app.nodes[\"tcp_client\"].components[\"JsonTcpClient\"].config[\"host\"] = args.mission_host\n app.nodes[\"tcp_client\"].components[\"JsonTcpClient\"].config[\"port\"] = args.mission_port\n app.nodes[\"mission_control\"].components[\"NodeGroup\"].config[\"node_names\"] = \\\n [\"goals.goal_behavior\"]\n app.nodes[\"robot_name\"].components[\"JsonMockup\"].config[\"json_mock\"] = \\\n {\"text\":args.mission_robot_name}\n run_on_start = app.nodes[\"goals.run_on_start\"]\n # Change the start behavior to the mission behavior\n nodes = run_on_start.components[\"NodeGroup\"].config[\"node_names\"]\n run_on_start.components[\"NodeGroup\"].config[\"node_names\"] = nodes + [\"mission_control\"]\n run_on_start.components[\"SwitchBehavior\"].config[\"desired_behavior\"] = \"mission_control\"\n # Send the navigation output back through the json tcp client\n app.connect(app.nodes[\"navigation.subgraph\"].components[\"interface\"], \"feedback\",\n app.nodes[\"tcp_client\"].components[\"JsonTcpClient\"], \"feedback\")\n\n app.start_wait_stop()\n","sub_path":"apps/navsim/navsim_navigate.py","file_name":"navsim_navigate.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"328906602","text":"import copy\nimport logging\n\nfrom django.conf import settings\nfrom django.views.debug import ExceptionReporter\nfrom behind import jarvis\n\n\nclass SlackExceptionHandler(logging.Handler):\n \"\"\"\n Code from djang-slack app\n An exception log handler that sends log entries to a Slack channel.\n \"\"\"\n\n def __init__(self, **kwargs):\n self.kwargs = kwargs\n logging.Handler.__init__(self)\n\n def emit(self, record):\n try:\n request = record.request\n\n internal = 'internal' if request.META.get('REMOTE_ADDR') in \\\n settings.INTERNAL_IPS else 'EXTERNAL'\n\n subject = '{} ({} IP): {}'.format(\n record.levelname,\n internal,\n record.getMessage(),\n )\n except Exception:\n subject = '{}: {}'.format(\n record.levelname,\n record.getMessage(),\n )\n request = None\n subject = self.format_subject(subject)\n\n # Since we add a nicely formatted traceback on our own, create a copy\n # of the log record without the exception data.\n no_exc_record = copy.copy(record)\n no_exc_record.exc_info = None\n no_exc_record.exc_text = None\n\n if record.exc_info:\n exc_info = record.exc_info\n else:\n exc_info = (None, record.getMessage(), None)\n\n reporter = ExceptionReporter(request, is_email=True, *exc_info)\n\n try:\n tb = reporter.get_traceback_text()\n except:\n tb = \"(An exception occured when getting the traceback text)\"\n\n if reporter.exc_type:\n tb = \"{} (An exception occured when rendering the \" \\\n \"traceback)\".format(reporter.exc_type.__name__)\n message = \"{}\\n\\n{}\".format(self.format(no_exc_record), tb)\n text = f'{subject} - {message}'\n jarvis.send_slack(text, channel='#monitoring')\n\n def format_subject(self, subject):\n \"\"\"\n Escape CR and LF characters, and limit length. RFC 2822's hard limit is\n 998 characters per line. So, minus \"Subject: \" the actual subject must\n be no longer than 989 characters.\n \"\"\"\n\n formatted_subject = subject.replace('\\n', '\\\\n').replace('\\r', '\\\\r')\n\n return formatted_subject[:989]\n","sub_path":"behind/behind/log_handlers.py","file_name":"log_handlers.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"24445891","text":"from matplotlib import use\nuse('TkAgg')\nfrom mpl_toolkits.mplot3d import axes3d\nfrom random import *\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\n## Globals\nSEA_LEVEL = 0\nSIGMA = 1\n\n## Functions\ndef SetTheUniversalConstants(N):\n global X\n global Y\n global Z\n global DELTA\n X = []\n Y = []\n Z = []\n DELTA = []\n for i in range(N+1):\n X.append([])\n Y.append([])\n Z.append([])\n for j in range(N+1):\n X[i].append(i)\n Y[i].append(j)\n Z[i].append(0)\n\ndef GenerateTheWorld(nrc, sd):\n seed(sd)\n \n N = 2**nrc\n SetTheUniversalConstants(N)\n Z[0][0] = gauss(0, SIGMA)\n Z[0][N] = gauss(0, SIGMA)\n Z[N][0] = gauss(0, SIGMA)\n Z[N][N] = gauss(0, SIGMA)\n \n # Start at 1 for equation\n # Go to nrc+2 because we need to index to nrc (+1) and have an offset (+1)\n for i in range(1,nrc+2):\n DELTA.append(SIGMA * 0.5**(i * H) * (1 - 2**(2*H - 2))**0.5)\n \n BuildTheMountains(Z, 0, N, 0, N, 1, nrc)\n# FillTheOceans(Z, len(Z))\n\ndef BuildTheMountains(arr, top, bottom, left, right, t, nrc):\n middle = (top + bottom) // 2\n center = (left + right) // 2\n \n arr[top][center] = (arr[top][left] + arr[top][right]) / 2 + DELTA[t] * gauss(0, SIGMA)\n arr[middle][left] = (arr[top][left] + arr[bottom][left]) / 2 + DELTA[t] * gauss(0, SIGMA)\n arr[middle][right] = (arr[top][right] + arr[bottom][right]) / 2 + DELTA[t] * gauss(0, SIGMA)\n arr[bottom][center] = (arr[bottom][left] + arr[bottom][right]) / 2 + DELTA[t] * gauss(0, SIGMA)\n arr[middle][center] = (arr[top][left] + arr[top][right] + arr[bottom][left] + arr[bottom][right]) / 4 + DELTA[t-1] * gauss(0, SIGMA)\n \n if t < nrc:\n BuildTheMountains(arr, top, middle, left, center, t + 1, nrc)\n BuildTheMountains(arr, top, middle, center, right, t + 1, nrc)\n BuildTheMountains(arr, middle, bottom, left, center, t + 1, nrc)\n BuildTheMountains(arr, middle, bottom, center, right, t + 1, nrc)\n\ndef FillTheOceans(arr, size):\n for i in range(size):\n for j in range(size):\n if arr[i][j] < SEA_LEVEL:\n arr[i][j] = SEA_LEVEL\n\ndef RobTheCartographer():\n fig = plt.figure(j)\n ax = fig.add_subplot(111, projection='3d')\n ax.autoscale(enable=False, axis=u'z')\n ax.set_zlim(bottom=-3.0,top=3.0)\n ax.plot_surface(np.array(X),np.array(Y),np.array(Z), rstride=5, cstride=5)\n fig.savefig(figureName)\n plt.close(fig)\n# plt.show()\n\n## Program\nseed()\nseedList = []\nfor i in range(10):\n seedList.append(randint(-1000000000,1000000000))\n\nfor i in range(10):\n sd = seedList.pop()#int(input(\"Starting seed: \"))\n os.makedirs(\"S\" + str(sd))\n H = 0.1#float(input(\"Roughness coefficient: \"))\n SIGMA = 1#float(input(\"Initial std dev: \"))\n# SEA_LEVEL = float(input(\"Sea level: \"))\n nrc = 6#int(input(\"Number of Recursive Calls: \"))\n \n for j in range(1, 10):\n H = j / 10\n figureName = \"S\" + str(sd) + \"/frame-\" + str(j) + \".png\"\n GenerateTheWorld(nrc, sd)\n RobTheCartographer()\n","sub_path":"7.21/Atlas.py","file_name":"Atlas.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"445286575","text":"from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, cast\n\nfrom dagster import check\nfrom dagster.core.execution.plan.utils import build_resources_for_manager\nfrom dagster.core.storage.tags import MEMOIZED_RUN_TAG\n\nif TYPE_CHECKING:\n from dagster.core.execution.context.system import StepExecutionContext\n from dagster.core.definitions.resource import Resources\n from dagster.core.types.dagster_type import DagsterType\n from dagster.core.definitions import SolidDefinition, PipelineDefinition\n from dagster.core.log_manager import DagsterLogManager\n from dagster.core.system_config.objects import EnvironmentConfig\n from dagster.core.execution.plan.plan import ExecutionPlan\n from dagster.core.execution.plan.outputs import StepOutputHandle\n from dagster.core.log_manager import DagsterLogManager\n\n\nclass OutputContext(\n NamedTuple(\n \"_OutputContext\",\n [\n (\"step_key\", str),\n (\"name\", str),\n (\"pipeline_name\", str),\n (\"run_id\", Optional[str]),\n (\"metadata\", Optional[Dict[str, Any]]),\n (\"mapping_key\", Optional[str]),\n (\"config\", Optional[Any]),\n (\"solid_def\", Optional[\"SolidDefinition\"]),\n (\"dagster_type\", Optional[\"DagsterType\"]),\n (\"log\", Optional[\"DagsterLogManager\"]),\n (\"version\", Optional[str]),\n (\"resource_config\", Optional[Dict[str, Any]]),\n (\"resources\", Optional[\"Resources\"]),\n (\"step_context\", Optional[\"StepExecutionContext\"]),\n ],\n )\n):\n \"\"\"\n The context object that is available to the `handle_output` method of an :py:class:`IOManager`.\n\n Attributes:\n step_key (str): The step_key for the compute step that produced the output.\n name (str): The name of the output that produced the output.\n pipeline_name (str): The name of the pipeline definition.\n run_id (Optional[str]): The id of the run that produced the output.\n metadata (Optional[Dict[str, Any]]): A dict of the metadata that is assigned to the\n OutputDefinition that produced the output.\n mapping_key (Optional[str]): The key that identifies a unique mapped output. None for regular outputs.\n config (Optional[Any]): The configuration for the output.\n solid_def (Optional[SolidDefinition]): The definition of the solid that produced the output.\n dagster_type (Optional[DagsterType]): The type of this output.\n log (Optional[DagsterLogManager]): The log manager to use for this output.\n version (Optional[str]): (Experimental) The version of the output.\n resource_config (Optional[Dict[str, Any]]): The config associated with the resource that\n initializes the RootInputManager.\n resources (Optional[Resources]): The resources required by the output manager, specified by the\n `required_resource_keys` parameter.\n \"\"\"\n\n def __new__(\n cls,\n step_key: str,\n name: str,\n pipeline_name: str,\n run_id: Optional[str] = None,\n metadata: Optional[Dict[str, Any]] = None,\n mapping_key: Optional[str] = None,\n config: Optional[Any] = None,\n solid_def: Optional[\"SolidDefinition\"] = None,\n dagster_type: Optional[\"DagsterType\"] = None,\n log_manager: Optional[\"DagsterLogManager\"] = None,\n version: Optional[str] = None,\n resource_config: Optional[Dict[str, Any]] = None,\n resources: Optional[\"Resources\"] = None,\n step_context: Optional[\"StepExecutionContext\"] = None,\n ):\n return super(OutputContext, cls).__new__(\n cls,\n step_key=step_key,\n name=name,\n pipeline_name=pipeline_name,\n run_id=run_id,\n metadata=metadata,\n mapping_key=mapping_key,\n config=config,\n solid_def=solid_def,\n dagster_type=dagster_type,\n log=log_manager,\n version=version,\n resource_config=resource_config,\n resources=resources,\n step_context=step_context,\n )\n\n def get_run_scoped_output_identifier(self) -> List[str]:\n \"\"\"Utility method to get a collection of identifiers that as a whole represent a unique\n step output.\n\n The unique identifier collection consists of\n\n - ``run_id``: the id of the run which generates the output.\n Note: This method also handles the re-execution memoization logic. If the step that\n generates the output is skipped in the re-execution, the ``run_id`` will be the id\n of its parent run.\n - ``step_key``: the key for a compute step.\n - ``name``: the name of the output. (default: 'result').\n\n Returns:\n List[str, ...]: A list of identifiers, i.e. run id, step key, and output name\n \"\"\"\n # if run_id is None and this is a re-execution, it means we failed to find its source run id\n check.invariant(\n self.run_id is not None,\n \"Unable to find the run scoped output identifier: run_id is None on OutputContext.\",\n )\n run_id = cast(str, self.run_id)\n if self.mapping_key:\n return [run_id, self.step_key, self.name, self.mapping_key]\n\n return [run_id, self.step_key, self.name]\n\n\ndef get_output_context(\n execution_plan: \"ExecutionPlan\",\n pipeline_def: \"PipelineDefinition\",\n environment_config: \"EnvironmentConfig\",\n step_output_handle: \"StepOutputHandle\",\n run_id: Optional[str] = None,\n log_manager: Optional[\"DagsterLogManager\"] = None,\n step_context: Optional[\"StepExecutionContext\"] = None,\n) -> \"OutputContext\":\n \"\"\"\n Args:\n run_id (str): The run ID of the run that produced the output, not necessarily the run that\n the context will be used in.\n \"\"\"\n\n step = execution_plan.get_step_by_key(step_output_handle.step_key)\n # get config\n solid_config = environment_config.solids[step.solid_handle.to_string()]\n outputs_config = solid_config.outputs\n\n if outputs_config:\n output_config = outputs_config.get_output_manager_config(step_output_handle.output_name)\n else:\n output_config = None\n\n step_output = execution_plan.get_step_output(step_output_handle)\n output_def = pipeline_def.get_solid(step_output.solid_handle).output_def_named(step_output.name)\n\n io_manager_key = output_def.io_manager_key\n resource_config = environment_config.resources[io_manager_key].config\n\n resources = build_resources_for_manager(io_manager_key, step_context) if step_context else None\n\n return OutputContext(\n step_key=step_output_handle.step_key,\n name=step_output_handle.output_name,\n pipeline_name=pipeline_def.name,\n run_id=run_id,\n metadata=output_def.metadata,\n mapping_key=step_output_handle.mapping_key,\n config=output_config,\n solid_def=pipeline_def.get_solid(step.solid_handle).definition,\n dagster_type=output_def.dagster_type,\n log_manager=log_manager,\n version=(\n _step_output_version(\n pipeline_def, execution_plan, environment_config, step_output_handle\n )\n if MEMOIZED_RUN_TAG in pipeline_def.tags\n else None\n ),\n step_context=step_context,\n resource_config=resource_config,\n resources=resources,\n )\n\n\ndef _step_output_version(\n pipeline_def: \"PipelineDefinition\",\n execution_plan: \"ExecutionPlan\",\n environment_config: \"EnvironmentConfig\",\n step_output_handle: \"StepOutputHandle\",\n) -> Optional[str]:\n from dagster.core.execution.resolve_versions import resolve_step_output_versions\n\n step_output_versions = resolve_step_output_versions(\n pipeline_def, execution_plan, environment_config\n )\n return (\n step_output_versions[step_output_handle]\n if step_output_handle in step_output_versions\n else None\n )\n","sub_path":"python_modules/dagster/dagster/core/execution/context/output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":8029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"629155120","text":"import csv\nimport numpy\nimport pandas\nimport math\nimport json\nimport sys\nimport string\n\ndef info_gain(dataset):\n\n class_column = dataset['data'].loc[:,dataset['target']].tolist()\n class_values, class_count = numpy.unique(class_column, return_counts = True)\n\n # calcula a informação associada ao conjunto original => Info(D)\n info_D = 0.0\n for i in range(len(class_values)):\n pi = float(class_count[i]) / float(len(class_column))\n info_D += -pi * math.log(pi,2)\n\n\n gain = { }\n\n # calcula o ganho de informação para cada coluna\n for (attribute_name, attribute) in dataset['attributes'].items():\n if attribute_name == dataset['target']: continue\n\n # obtém a coluna\n a_column = dataset['data'].loc[:,attribute_name].tolist()\n \n # conta o número de ocorrências de cada valor distinto na coluna\n distinct_values, distinct_count = numpy.unique(a_column, return_counts = True)\n\n if attribute['type'] == 'nominal':\n\n # cria um subconjunto do dataset\n data_subset = numpy.asarray((a_column,class_column)).T\n\n # para cada valor distinto, calcula a informação associada\n # faz o somatório em infocolumn para obter a informação associada à coluna\n info_column = 0.0\n for j in range(len(distinct_values)):\n\n # cria um subconjunto de dados do valor distinto\n column_subset = []\n for k in range(len(data_subset)): \n if(data_subset[k][0] == distinct_values[j]):\n column_subset.append(data_subset[k])\n np_column_subset = numpy.array(column_subset)\n # conta as ocorrências de cada classe para o atributo\n class_distinct_values, class_distinct_count = numpy.unique(np_column_subset[:,1], return_counts = True)\n\n # calcula o ganho de informação\n info_attr = 0.0\n for k in range(len(class_distinct_values)):\n pattri = float(class_distinct_count[k]) / float(distinct_count[j])\n info_attr += - pattri * math.log(pattri,2)\n info_attr *= (float(distinct_count[j]) / float(len(a_column)))\n info_column += info_attr\n \n gain[attribute_name] = (info_D - info_column)\n \n if attribute['type'] == 'numeric':\n \n # cria um subconjunto do dataset\n data_subset = numpy.asarray((a_column,class_column)).T\n\n # faz a média dos valores da coluna\n mean = 0\n for k in range(len(a_column)):\n mean += float(a_column[k])\n mean /= len(a_column)\n\n # cria dois subconjuntos (o critério de divisão é a média)\n lower_than = []\n greater_than = []\n for k in range(len(data_subset)):\n if(float(data_subset[k][0]) <= float(mean)):\n lower_than.append(data_subset[k])\n else:\n greater_than.append(data_subset[k])\n np_lower_than = numpy.empty((0,2)) if lower_than == [] else numpy.array(lower_than)\n np_greater_than = numpy.empty((0,2)) if greater_than == [] else numpy.array(greater_than)\n class_lower_distinct_values, class_lower_distinct_count = numpy.unique(np_lower_than[:,1], return_counts = True)\n class_greater_distinct_values, class_greater_distinct_count = numpy.unique(np_greater_than[:,1], return_counts = True)\n\n info_column = 0.0\n \n # calcula o ganho de informação\n info_attr_lower = 0.0\n for k in range(len(class_lower_distinct_values)):\n pattri = float(class_lower_distinct_count[k]) / float(len(np_lower_than))\n info_attr_lower += - pattri * math.log(pattri,2)\n info_attr_lower *= (float(len(np_lower_than)) / float(len(a_column)))\n info_column += info_attr_lower\n info_attr_greater = 0.0\n for k in range(len(class_greater_distinct_values)):\n pattri = float(class_greater_distinct_count[k]) / float(len(np_greater_than))\n info_attr_greater += - pattri * math.log(pattri,2)\n info_attr_greater *= (float(len(np_greater_than)) / float(len(a_column)))\n info_column += info_attr_greater\n\n gain[attribute_name] = (info_D - info_column)\n\n return gain","sub_path":"infogain.py","file_name":"infogain.py","file_ext":"py","file_size_in_byte":4512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"636556882","text":"#!/usr/bin/python3\n#使用for循环实现斐波那契数列\n\nn = int(input('Enter the number of terms: '))\n\ndef Fibonacci(n):\n f0, f1 = 0, 1\n for _ in range(n):\n yield f0\n f0, f1 = f1, f0+f1\n\nfibs = list(Fibonacci(n))\nprint(fibs)","sub_path":"practice_20190516/fibonacci_for.py","file_name":"fibonacci_for.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"23592313","text":"# Цирковая обезьянка еще не может быть полноценным игроком в Ним,\r\n# но она обучена либо удваивать количество камней в куче, либо добавлять один.\r\n# Изначально в кучке уже лежит 1 камень\r\n\r\n# Программа подсчитывает минимальное количество действий, которое нужно сделать мартышке,\r\n# чтобы получить кучку из введенных Вами камней\r\nn = int(input('Ввведите кол-во камней в кучке:'))\r\ncount = 0\r\nwhile n != 0:\r\n count += 1\r\n if n % 2 == 0:\r\n n /= 2\r\n print('---', n)\r\n else:\r\n n -= 1\r\n print('---', n)\r\nprint('Обезьянке потребовалось(-лся)', count - 1, 'ход(-а/-ов)')\r\n","sub_path":"Основы программирования Python/5. Debugger/Фокус в цирке - трассировка.py","file_name":"Фокус в цирке - трассировка.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"443060544","text":"#Tic Tac Toe\nimport random\n\ndef drawBoard(board):\n #this function prints out the board that it was passed.\n\n #\"board\" is a list of 10 strings representing the board(ignore index0)\n\n print(\" | |\")\n print(\" \"+board[1]+\" |\"+board[2]+\" | \"+board[3])\n print(\" | |\")\n print(\"-------------\")\n print(\" \"+board[4]+\" |\"+board[5]+\" | \"+board[6])\n print(\" | |\")\n print(\"-------------\")\n print(\" \"+board[7]+\" |\"+board[8]+\" | \"+board[9])\n print(\" | |\")\n print(\"-------------\")\n\ndef inputPlayerLetter():\n #Lets the player type which letter they want to be.\n #Returns a list with the player's letter as the first item,and the\n #computer's letter as the second\n\n letter=\"\"\n while not(letter == \"X\" or letter ==\"O\" ):\n print(\"Do you want to be X or O?\")\n letter=input().upper()\n\n #the first element in the list is the player's letter\n #the second is computer's letter\n if letter == \"X\" :\n return [\"X\",\"O\"]\n else:\n return [\"O\",\"X\"]\n\ndef whoGoesFirst():\n #Randomly choose the player who goes first.\n if random.randint(0,1) ==0:\n return 'computer'\n else:\n return 'player'\n\ndef playAgain():\n #this function returns True if the player wants to play again.\n #otherwise it returns False\n print(\"Do you want to play again?(yes or no)\")\n return input().lower().startswith('y')\n\n \n\ndef makeMove(board,letter,move):\n board[move] =letter\n\ndef isWinner(board,letter):\n #Given a board and a player's letter, this function returns True\n #if that player has won.\n \n return ((board[7] == letter and board[8]== letter and board[9]==letter) or #across the top\n (board[4] == letter and board[5]== letter and board[6]==letter) or #across the middle\n (board[1] == letter and board[2]== letter and board[3]==letter) or #across the bottom\n\n (board[1] == letter and board[4]== letter and board[7]==letter) or #down the left side\n (board[2] == letter and board[5]== letter and board[8]==letter) or #down the middle side\n (board[3] == letter and board[6]== letter and board[9]==letter) or #down the right side\n (board[7] == letter and board[5]== letter and board[3]==letter) or #diagonal\n (board[1] == letter and board[5]== letter and board[9]==letter)) #diagona\n\ndef getBoardCopy(board):\n #Make a duplicate of the board list and return it the duplicate.\n dupBoard=[]\n\n for i in board:\n dupBoard.append(i)\n return dupBoard\n\ndef isSpaceAvailable(board,move):\n #Return True if the passed move is available on the passed board.\n return (board[move]==\" \")\n\ndef getPlayerMove(board):\n #Let the player type in their move.\n move=\"\"\n while move not in \"1,2,3,4,5,6,7,8,9\".split(\",\") or not isSpaceAvailable(board,int(move)):\n print(\"What is your next move?(1--9))\")\n move=input()\n \n return int(move)\n\ndef chooseRandomMoveFromList(board,moveList):\n #Returns a valid move from the passed list on the passed board.\n #Returns None if there is no valid move.\n possibleMoves=[]\n for i in moveList:\n if isSpaceAvailable(board,i):\n possibleMoves.append(i)\n\n if len(possibleMoves) != 0:\n return possibleMoves[random.randint(0,len(possibleMoves)-1)]\n else:\n return None\n\ndef getComputerMove(board,computerLetter):\n #Given a board and the computer's letter\n #determine where to move and return that move\n if computerLetter == \"X\" :\n playerLetter=\"O\"\n else:\n playerLetter=\"X\"\n\n #here is our algorithm for our Tic Tac Toe AI:\n #First, check if we can win in the next move\n for i in range(1,10):\n copy=getBoardCopy(board)\n if isSpaceAvailable(copy,i):\n makeMove(copy,computerLetter,i)\n if isWinner(copy,computerLetter):\n return i;\n\n #check if the palyer could win on their next move,and block them.\n for i in range(1,10):\n copy=getBoardCopy(board)\n if isSpaceAvailable(copy,i):\n makeMove(copy,playerLetter,i)\n if isWinner(copy,playerLetter):\n return i;\n\n\n #Try to take one of the corners,if they are available\n move=chooseRandomMoveFromList(board,[1,3,7,9])\n if move != None:\n return move\n\n #Try to take the center,if it is available\n if isSpaceAvailable(board,5):\n return 5\n\n #Move on one of the sides.\n return chooseRandomMoveFromList(board,[2,4,6,8])\n\ndef isBoardFull(board):\n #Return True if every space on the board has been taken,\n #otherwise return False\n for i in range(1,10):\n if isSpaceAvailable(board,i):\n return False\n\n return True\n\n#Program start\n\nprint(\"Welcome to Tic Tac Toe!\")\n\nwhile True:\n #Reset the board\n theBoard=[\" \"] * 10\n playerLetter,computerLetter=inputPlayerLetter()\n turn=whoGoesFirst()\n print(\"The \"+turn +\" will go first.\")\n gameIsPlaying =True\n\n while gameIsPlaying:\n if turn == 'player':\n #Player's turn\n #drawBoard(theBoard)\n move=getPlayerMove(theBoard)\n makeMove(theBoard,playerLetter,move)\n print(\"the player has placed at %d\" % move)\n drawBoard(theBoard)\n\n if isWinner(theBoard,playerLetter):\n #drawBoard(theBoard)\n print(\"Bravo! You win!\")\n gameIsPlaying=False\n else:\n if isBoardFull(theBoard):\n #drawBoard(theBoard)\n print(\"The game is a tie!\")\n break\n else:\n turn='computer'\n else:\n #computer's turn\n move=getComputerMove(theBoard,computerLetter)\n makeMove(theBoard,computerLetter,move)\n print(\"the computer has placed at %d\" % move)\n drawBoard(theBoard)\n\n if isWinner(theBoard,computerLetter):\n #drawBoard(theBoard)\n print(\"The computer has beaten you! You lose!\")\n gameIsPlaying=False\n else:\n if isBoardFull(theBoard):\n #drawBoard(theBoard)\n print(\"The game is a tie!\")\n break\n else:\n turn=\"player\"\n\n if not playAgain():\n break\n \n \n \n\n\n \n \n \n \n \n \n \n\n\n \n \n \n \n","sub_path":"Code_class/ex1_TicTacToe.py","file_name":"ex1_TicTacToe.py","file_ext":"py","file_size_in_byte":6667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"635305575","text":"\"\"\"\n 34. Find First and Last Position of Element in Sorted Array\n Given an array of integers nums sorted in ascending order, \n find the starting and ending position of a given target value.\n If target is not found in the array, return [-1, -1].\n Follow up: Could you write an algorithm with O(log n) runtime complexity?\n\n Example 1:\n Input: nums = [5,7,7,8,8,10], target = 8\n Output: [3,4]\n Example 2:\n Input: nums = [5,7,7,8,8,10], target = 6\n Output: [-1,-1]\n Example 3:\n Input: nums = [], target = 0\n Output: [-1,-1]\n\n Constraints:\n 0 <= nums.length <= 105\n -109 <= nums[i] <= 109\n nums is a non-decreasing array.\n -109 <= target <= 109\n\"\"\"\nfrom typing import List\n\nclass Solution:\n def searchRange(self, nums: List[int], target: int) -> List[int]:\n finalId = len(nums)-1\n if finalId<0:\n return [-1,-1]\n if nums[0]==target and nums[finalId]==target:\n return [0, finalId]\n # lets try bisection search\n left, right = 0, finalId\n counter=0\n mid = (left+right)//2\n while left <= right :\n counter+=1\n if counter == len(nums):\n print('something is wrong .............................')\n break\n mid = (left+right)//2\n print('index left {} , right {} , mid {} '.format(left, right, mid))\n print('values left {} , right {} , mid {} '.format(nums[left], nums[right], nums[mid]))\n\n if nums[mid] > target :\n left, right = left , mid-1\n elif nums[mid] < target :\n left, right = mid+1 , right\n elif nums[mid]==target :\n break\n \n if mid==0 and nums[left]==target:\n mid=left\n if mid==0 and nums[right]==target:\n mid=right\n print('final I left {} , right {} , mid {} '.format(left, right, mid))\n print('final V left {} , right {} , mid {} '.format(nums[left], nums[right], nums[mid]))\n if nums[mid] != target :\n return [-1, -1]\n else :\n left, right = mid, mid\n while left >=1:\n if nums[left-1]==target :\n left-=1\n break\n else:\n break\n while right < finalId:\n if nums[right+1]==target:\n right+=1\n break\n else:\n break\n\n return [left, right]\n\n\n\n\n\nclass Solution2:\n # brute force method\n def searchRange(self, nums: List[int], target: int) -> List[int]:\n finalId = len(nums)-1\n if finalId<0:\n return [-1,-1]\n if nums[0]==target and nums[finalId]==target:\n return [0, finalId]\n\n tmp=[]\n for i in range(len(nums)):\n if nums[i]==target:\n tmp.append(i)\n elif nums[i] > target:\n break\n print(tmp)\n \n if len(tmp)>0 :\n return [tmp[0], tmp[-1]] \n else :\n return [-1, -1]","sub_path":"LeetCode_exercises/ex0034_firstAndLastPositionOfElement.py","file_name":"ex0034_firstAndLastPositionOfElement.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"494516923","text":"#coding=utf-8\n# 3. 编写一个函数,利用正则匹配,获取一个文件中双引号之间的内容\nimport re\n\npattern=re.compile(\"(\\\"[^\\\"]*\\\")\")\n\nf=open(\"data.txt\")\nresult1=[]\nresult2=[]\nfor line in f:\n #一行中有一对双引号\n s=pattern.search(line)\n if s!=None:\n result1.append(s.group())\n #一行中有多对双引号\n s=pattern.findall(line)\n if s!=[]:\n result2.append(s)\nprint(result1)\nprint(result2)\nf.close()\n\n\n#双引号跨行\nf=open(\"data.txt\")\nlines=f.readlines()\ns=pattern.findall(''.join(lines))\nprint(s)\nf.close()\n","sub_path":"autoTestTrain/test_3.py","file_name":"test_3.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"428929004","text":"from datetime import datetime\nimport time\n\n\nclass MyTimer:\n def __init__(self):\n self.start = 0\n self.end = 0\n self.time = 0\n self.prompt = '还未开始计时!'\n\n def __str__(self):\n return self.prompt\n\n def start_time(self):\n self.start = datetime.now()\n print('开始计时')\n\n def end_time(self):\n self.end = datetime.now()\n self.time = (self.end - self.start).seconds\n self.prompt = '用了%d秒' % self.time\n print('计时结束', self.prompt)\n\n def __add__(self, other):\n return '一共使用了%d秒' % (self.time + other.time)\n\n\ntime1 = MyTimer()\ntime2 = MyTimer()\n\ntime1.start_time()\ntime.sleep(1)\ntime1.end_time()\n\ntime2.start_time()\ntime.sleep(1)\ntime2.end_time()\n\nprint(time1)\nprint(time2)\nprint(time1 + time2)\n","sub_path":"2019年5月6日/3.timer.py","file_name":"3.timer.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"493238222","text":"# arpa2cmd is a base class for ARPA2 command interpreters.\n# It shows itself as arpa2shell, from which it usually runs.\n#\n# These interpreters can be told to know_about each other\n# and call each other when combined with the meta-shell of\n# the bare instance below.\n#\n# From: Rick van Rein \n\n\nimport sys\nimport time\n\nimport cmd\nfrom cmdparser import cmdparser\n\nimport types\n\n\n\n@cmdparser.CmdClassDecorator()\nclass Cmd (cmd.Cmd):\n\n\tversion = (0,0)\n\tprompt = 'arpa2shell> '\n\tintro = 'The ARPA2 generic command shell offers basic support to actual shells.'\n\n\tgss_name = None\n\tgss_life = None\n\n\t\"\"\"General class for switching between shells.\n\t It relies on do_arpa2xxx functions in each and\n\t shares such commands for allowing switches at\n\t termination of the calling shell. These\n\t functions therefore usually just return True\n\t after setting the next shell name. This is\n\t even safe for references to the shell itself,\n\t except in the case of ARPA2shell which has none\n\t above it.\n\t\"\"\"\n\tdef __init__ (self, *args, **kwargs):\n\t\tcmd.Cmd.__init__ (self, *args, **kwargs)\n\t\tself.known = [ ]\n\t\tself.next_shell = None\n\t\tself.reset ()\n\n\tdef reset (self):\n\t\t\"\"\"Actual shells would override the reset\n\t\t method such that they remove traces of\n\t\t command history. This is called from\n\t\t __init__() and can be overridden in\n\t\t subclasses. Subclasses should also\n\t\t call their superclass method to ensure\n\t\t proper resetting.\n\t\t\"\"\"\n\t\tpass\n\n\t@cmdparser.CmdMethodDecorator()\n\tdef do_help (self, args, fields):\n\t\t\"\"\"help []\n\t\t \n\t\t Trigger the help command in the superclass, but apply\n\t\t the cmdparser wrapper so it can be called over JSON.\n\t\t There is a shorthand notation, namely a question mark.\n\t\t\"\"\"\n\t\tcmd.Cmd.do_help (self, ' '.join (args [1:]))\n\n\t@cmdparser.CmdMethodDecorator()\n\tdef do_version (self, *ignored):\n\t\t\"\"\"version\n\t\t \n\t\t Print the name and current version of this shell.\n\t\t\"\"\"\n\t\tsys.stdout.write ('%s-%d.%d\\n' % (self.prompt.split ('>') [0], self.version [0], self.version [1]))\n\t\treturn False\n\n\t@cmdparser.CmdMethodDecorator()\n\tdef do_ping (self, *ignored):\n\t\t\"\"\"ping\n\t\t \n\t\t Respond to ping requests (with output on stderr).\n\t\t\"\"\"\n\t\tsys.stderr.write ('EPROTONOSUPPORT: Please upgrade to ping6\\n')\n\t\treturn False\n\n\t@cmdparser.CmdMethodDecorator()\n\tdef do_ping6 (self, *ignored):\n\t\t\"\"\"ping6\n\t\t \n\t\t Respond to ping6 requests (with output on stdout).\n\t\t\"\"\"\n\t\tsys.stdout.write ('pong6\\n')\n\t\treturn False\n\n\t@cmdparser.CmdMethodDecorator()\n\tdef do_date (self, *ignored):\n\t\t\"\"\"date\n\t\t \n\t\t Request the current time on the system running the shell.\n\t\t\"\"\"\n\t\tsys.stdout.write ('%s\\n' % time.asctime (time.gmtime ()))\n\t\treturn False\n\n\t@cmdparser.CmdMethodDecorator()\n\tdef do_whoami (self, *ignored):\n\t\t\"\"\"whoami\n\t\t \n\t\t Ask who you are, and how the shell sees you during ACL processing.\n\t\t\"\"\"\n\t\tif self.gss_name is None or self.gss_life is None:\n\t\t\tsys.stderr.write ('You are nobody\\n')\n\t\t\treturn False\n\t\ttry:\n\t\t\timport gssapi\n\t\texcept ImportError as ie:\n\t\t\tsys.stderr.write ('This shell does not support GSSAPI\\n')\n\t\t\treturn False\n\t\ttry:\n\t\t\texp = time.asctime (time.gmtime (self.gss_life))\n\t\t\tsys.stdout.write ('You are: %s\\nExpiration: %s\\n' % (self.gss_name,exp))\n\t\texcept gssapi.raw.MissingCredentialsError:\n\t\t\tsys.stderr.write ('You are nobody\\n')\n\t\texcept gssapi.raw.ExpiredCredentialsError:\n\t\t\tsys.stderr.write ('You have expired\\n')\n\t\texcept gssapi.raw.InvalidCredentialsError:\n\t\t\tsys.stderr.write ('Your credentials are wrong\\n')\n\t\texcept gssapi.raw.GSSError as ge:\n\t\t\tsys.stderr.write ('GSSAPI Error: %s\\n' % str (ge))\n\t\texcept Exception as e:\n\t\t\tsys.stderr.write ('General error: %s\\n' % str (e))\n\t\treturn False\n\n\t\"\"\"Termination commands. No JSON wrappers, because this is\n\t not how a session under JSON should end.\n\t\"\"\"\n\tdef do_EOF (self, *ignored):\n\t\t\"\"\"Exit this shell.\n\t\t\"\"\"\n\t\treturn True\n\n\tdo_exit = do_EOF\n\n\tdo_quit = do_EOF\n\n\t\"\"\"Switch to the main command loop for arpa2shell.\n\t\"\"\"\n\t# def do_arpa2shell (self, *ignored):\n\t# \t# Special case: no shell above this one\n\t# \treturn False\n\n\t\"\"\"Bind a shell to self, and make it return the\n\t provided module as the shell to switch to.\n\t\"\"\"\n\tdef bound_shell (self, name, module):\n\t\tdef switch_shell (self, *ignored):\n\t\t\tself.next_shell = module\n\t\t\treturn True\n\t\tswitch_shell.__doc__ = 'Switch to the ' + name + ' shell: ' + module.intro\n\t\treturn switch_shell\n\n\t\"\"\"Add a shell object by introducing it to all\n\t the shells that we already know about. This\n\t transitively installs do_arpa2xxx functions\n\t that return True after setting next_shell.\n\n\t Think of this function as joining two sets by\n\t pairing all pairs from the two sets, in one\n\t direction only; it is explicitly repeated in\n\t both directions between the two sets so as to\n\t avoid infinite recursion.\n\n\t It is assumed that at least on instance is a\n\t mere ARPA2shell without subclass. This one\n\t can then be called to \"just\" be able to step\n\t into any of the other shells. This is not a\n\t necessity, however.\n\t\"\"\"\n\tdef know_about (self, shellname, shellobj):\n\t\tif (shellname,shellobj) not in self.known:\n\t\t\tself.known.append ( (shellname,shellobj) )\n\t\t\tfor (knownname,knownobj) in self.known:\n\t\t\t\tknownobj.know_about (shellname, shellobj)\n\t\tbound_switch = self.bound_shell (shellname, shellobj)\n\t\tself.__class__.__dict__ ['do_' + shellname] = bound_switch\n\n\n\nif __name__ == '__main__':\n\tshell = Cmd ()\n\tshell.cmdloop ()\n","sub_path":"demo-messaging/arpa2cmd.py","file_name":"arpa2cmd.py","file_ext":"py","file_size_in_byte":5454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"41606793","text":"import math\n\n\na = int(input(\"Insira o valor de A: \"))\nb = int(input(\"Insira o valor de B: \"))\nc = int(input(\"Insira o valor de C: \"))\n\ndelta = b**2 - 4*a*c\n\nif delta < 0:\n print(\"A equação não possui raizes reais.\")\n\nelif delta == 0:\n bhaskara = ((b)*-1 + math.sqrt(delta))/(2*a)\n print(f'A equação possui apenas uma raiz real: {bhaskara}.')\n\nelse:\n bhaskarapositivo = ((b)*-1 + math.sqrt(delta))/(2*a)\n bhaskaranegativo = ((b)*-1 - math.sqrt(delta))/(2*a)\n print(f'A equação possui duas raizes: {bhaskarapositivo} e {bhaskaranegativo}')","sub_path":"Lista 3/EBSS-AER-Alg-03-Ex-11.py","file_name":"EBSS-AER-Alg-03-Ex-11.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"70867485","text":"import torch.nn as nn\nimport torch\nimport os\nimport numpy as np\nfrom torch.nn import functional as F\nfrom functools import lru_cache\nimport math\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef save_checkpoint(discriminator, generator, epoch, data_folder):\n out_dir = '%s/models' % data_folder\n os.makedirs(out_dir)\n torch.save(discriminator.state_dict(), '%s/D_epoch_%d' % (out_dir, epoch))\n torch.save(generator.state_dict(), '%s/G_epoch_%d' % (out_dir, epoch))\n\n\ndef round_array(array, max_sum, invert=False):\n if invert and len(array) > 1:\n # invert sizes as low fitness values is better than high values\n array = (max_sum - np.array(array)) / (len(array) - 1)\n\n array = np.array(array)\n array = np.clip(array, 0, max_sum)\n rounded = np.floor(array)\n diff = int(max_sum) - int(np.sum(rounded))\n if diff > 0:\n for i in range(diff):\n max_index = (array - rounded).argmax()\n if len(array) == 2:\n max_index = array.argmin()\n rounded[max_index] += 1\n return rounded\n\n\ndef coord_1d_2d(x, rows):\n return x // rows, x % rows\n\n\ndef coord_2d_1d(r, c, rows):\n return r * rows + c\n\n\ndef get_neighbors(center, rows, cols):\n r, c = coord_1d_2d(center, rows)\n top = coord_2d_1d((r - 1) % rows, c, rows)\n bottom = coord_2d_1d((r + 1) % rows, c, rows)\n right = coord_2d_1d(r, (c + 1) % cols, rows)\n left = coord_2d_1d(r, (c - 1) % cols, rows)\n return [center, top, bottom, right, left]\n\n\n@lru_cache(maxsize=10)\ndef _permutations(len_a1, len_a2):\n pairs = []\n for start_j in range(len_a2):\n j = 0\n for i in range(len_a1):\n pairs.append((i, (j + start_j) % len_a2))\n j = (j + 1) % len_a2\n return pairs\n\n\ndef permutations(a1, a2, random=False):\n len_a1, len_a2 = len(a1), len(a2)\n if random:\n pairs = np.array(np.meshgrid(range(len_a1), range(len_a2))).T.reshape(-1, 2)\n np.random.shuffle(pairs)\n return pairs\n return _permutations(len_a1, len_a2)\n\n\ndef is_cuda_available(condition=True):\n return condition and torch.cuda.is_available()\n\n\ndef device_name(condition=True):\n return \"cuda\" if is_cuda_available(condition) else \"cpu\"\n\n\ndef cuda(variable, condition=True):\n return variable.cuda() if is_cuda_available(condition) else variable\n\n\ndef resize_channels(x, size):\n x = x.permute(2, 3, 0, 1)\n out = F.interpolate(x, size, mode='bilinear')\n return out.permute(2, 3, 0, 1)\n\n\n# based on https://github.com/github-pengge/PyTorch-progressive_growing_of_gans/blob/master/models/base_model.py\ndef resize_activations_avg(v, so):\n \"\"\"\n Resize activation tensor 'v' of shape 'si' to match shape 'so'.\n :param v:\n :param so:\n :return:\n \"\"\"\n si = list(v.size())\n so = list(so)\n assert len(si) == len(so)# and si[0] == so[0]\n\n # Decrease feature maps.\n if si[1] > so[1]:\n v = v[:, :so[1]]\n if si[0] > so[0]:\n v = v[:so[0], :]\n\n # Shrink spatial axes.\n if len(si) == 4 and (si[2] > so[2] or si[3] > so[3]):\n assert si[2] % so[2] == 0 and si[3] % so[3] == 0\n ks = (si[2] // so[2], si[3] // so[3])\n v = F.avg_pool2d(v, kernel_size=ks, stride=ks, ceil_mode=False, padding=0, count_include_pad=False)\n\n # Extend spatial axes. Below is a wrong implementation\n # shape = [1, 1]\n # for i in range(2, len(si)):\n # if si[i] < so[i]:\n # assert so[i] % si[i] == 0\n # shape += [so[i] // si[i]]\n # else:\n # shape += [1]\n # v = v.repeat(*shape)\n if si[2] != so[2]:\n assert so[2] / si[2] == so[3] / si[3] # currently only support this case\n v = F.interpolate(v, size=so[2], mode='nearest')#, align_corners=True)\n\n # Increase feature maps.\n if si[1] < so[1]:\n z = torch.zeros([v.shape[0], so[1] - si[1]] + so[2:])\n v = torch.cat([v, z], 1)\n if si[0] < so[0]:\n z = torch.zeros([so[0] - si[0], v.shape[1]] + so[2:])\n v = torch.cat([v, z], 0)\n return v\n\n\ndef resize_1d(x, size):\n return _resize(x, size, \"nearest\")\n\n\ndef resize_2d(x, size):\n return _resize(x, size, \"bilinear\", align_corners=True)\n\n\ndef _resize(x, size, mode, align_corners=None):\n x = x.clone().detach()\n x = x.expand(1, 1, *x.size())\n ret = F.interpolate(x, size=size, mode=mode, align_corners=align_corners)\n return ret[0, 0]\n\n\ndef resize_activations(w, size):\n out = w\n if w.size()[2] != size[3] or w.size()[2] != size[3]:\n out = F.interpolate(w, size=size[-2:])\n if w.size()[0] != size[0] or w.size()[1] != size[1]:\n out = resize_channels(w, size[:2])\n return out\n\n\ndef resize_conv(source_module, target_module):\n success = False\n if target_module.bias is not None and source_module.bias is not None and source_module.bias.size() != target_module.bias.size():\n target_module.bias = nn.Parameter(resize_1d(source_module.bias, target_module.bias.size()[0]))\n logger.debug(f\"bias {target_module.bias.size()} {source_module.bias.size()}\")\n try:\n w = resize_activations(source_module.weight, target_module.weight.size())\n logger.debug(f\"{source_module.weight.size()} {target_module.weight.size()} {w.size()}\")\n target_module.weight = nn.Parameter(w)\n success = True\n except Exception as e:\n logger.debug(\"error resizing weights\")\n logger.exception(e)\n return target_module, success\n\n\ndef calc_div_factor(w, num_layers, max_layers, min_w=3):\n n = min(max_layers, int(math.log(w // min_w, 2)))\n return 2 ** max(0, n - num_layers)\n\n\ndef num2tuple(num):\n return num if isinstance(num, tuple) else (num, num)\n\n\ndef conv2d_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n h_w, kernel_size, stride, pad, dilation = num2tuple(h_w), \\\n num2tuple(kernel_size), num2tuple(stride), num2tuple(pad), num2tuple(\n dilation)\n pad = num2tuple(pad[0]), num2tuple(pad[1])\n\n h = math.floor((h_w[0] + sum(pad[0]) - dilation[0] * (kernel_size[0] - 1) - 1) / stride[0] + 1)\n w = math.floor((h_w[1] + sum(pad[1]) - dilation[1] * (kernel_size[1] - 1) - 1) / stride[1] + 1)\n\n return h, w\n\n\ndef convtransp2d_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1, out_pad=0):\n h_w, kernel_size, stride, pad, dilation, out_pad = num2tuple(h_w), \\\n num2tuple(kernel_size), num2tuple(stride), num2tuple(\n pad), num2tuple(dilation), num2tuple(out_pad)\n pad = num2tuple(pad[0]), num2tuple(pad[1])\n\n h = (h_w[0] - 1) * stride[0] - sum(pad[0]) + dilation[0] * (kernel_size[0] - 1) + out_pad[0] + 1\n w = (h_w[1] - 1) * stride[1] - sum(pad[1]) + dilation[1] * (kernel_size[1] - 1) + out_pad[1] + 1\n\n return h, w\n\n\ndef conv2d_get_padding(h_w_in, h_w_out, kernel_size=1, stride=1, dilation=1):\n h_w_in, h_w_out, kernel_size, stride, dilation = num2tuple(h_w_in), num2tuple(h_w_out), \\\n num2tuple(kernel_size), num2tuple(stride), num2tuple(dilation)\n\n p_h = ((h_w_out[0] - 1) * stride[0] - h_w_in[0] + dilation[0] * (kernel_size[0] - 1) + 1)\n p_w = ((h_w_out[1] - 1) * stride[1] - h_w_in[1] + dilation[1] * (kernel_size[1] - 1) + 1)\n\n return (math.floor(p_h / 2), math.ceil(p_h / 2)), (math.floor(p_w / 2), math.ceil(p_w / 2))\n\n\ndef convtransp2d_get_padding(h_w_in, h_w_out, kernel_size=1, stride=1, dilation=1, out_pad=0):\n h_w_in, h_w_out, kernel_size, stride, dilation, out_pad = num2tuple(h_w_in), num2tuple(h_w_out), \\\n num2tuple(kernel_size), num2tuple(stride), num2tuple(\n dilation), num2tuple(out_pad)\n\n p_h = -(h_w_out[0] - 1 + 2*out_pad[0] - dilation[0] * (kernel_size[0] - 1) - (h_w_in[0] - 1) * stride[0]) / 2\n p_w = -(h_w_out[1] - 1 + 2*out_pad[1] - dilation[1] * (kernel_size[1] - 1) - (h_w_in[1] - 1) * stride[1]) / 2\n\n return (math.floor(p_h / 2), math.ceil(p_h / 2)), (math.floor(p_w / 2), math.ceil(p_w / 2))\n","sub_path":"util/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":8065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"159098185","text":"from .. import logger\nimport os\n\nfrom .base_storage import BaseStorage\nfrom .base_file import BaseFile\nfrom .amazon_s3_storage import AmazonS3Storage, AmazonS3File\n\n\ndef get_storage(path, **kwargs):\n # Dynamically select a storage backend depending on the format\n input_dir = os.path.expanduser(os.path.expandvars(path))\n if input_dir.startswith(\"s3://\"):\n return AmazonS3File(\n storage=AmazonS3Storage(**kwargs),\n path=path\n )\n if input_dir.startswith(\"od://\"):\n raise NotImplementedError(\"OneDrive storage not implemented yet\")\n if input_dir.startswith(\"gs://\"):\n raise NotImplementedError(\"Google storage not implemented yet\")\n if input_dir.startswith(\"acs://\"):\n raise NotImplementedError(\"Azure cloud storage not implemented yet\")\n if input_dir.startswith(\"gd://\"):\n raise NotImplementedError(\"Google drive not implemented yet\")\n else:\n return BaseFile(storage=BaseStorage(), path=input_dir)\n","sub_path":"organized/storage/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"617566476","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('forum', '0003_pinnedtopic'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='pinnedtopic',\n name='topic',\n ),\n migrations.AddField(\n model_name='pinnedtopic',\n name='post',\n field=models.ForeignKey(related_name='+', default=None, to='forum.Post'),\n preserve_default=False,\n ),\n ]\n","sub_path":"forum/migrations/0004_auto_20150310_1540.py","file_name":"0004_auto_20150310_1540.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"403076994","text":"# Libraries for decision tree classification training\nimport pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier\nfrom sklearn.model_selection import train_test_split # Import train_test_split function\nfrom sklearn import metrics #Import scikit-learn metrics module for accuracy calculation\n\n# Libraries for decision tree visualization\nfrom sklearn.tree import export_graphviz\nfrom sklearn.externals.six import StringIO \nfrom IPython.display import Image \nimport pydotplus\n\n# Plotting graph of metrics\nimport matplotlib.pyplot as pltgenerate_decision_tree\n\n\ndef generate_decision_tree(min_sample_leaf,decision_tree_image):\n \n X_train, X_test, y_train, y_test = train_test_split(biomed_data3.iloc[:,:-1], biomed_data3.iloc[:,-1], test_size=80/310, random_state=1)\n \n # Create a decision tree classifier with entropy as the criterion\n clf = DecisionTreeClassifier(min_samples_leaf=min_sample_leaf,criterion='gini',splitter=\"best\")\n\n # Train the model\n clf=clf.fit(X_train,y_train)\n\n # Predict the testing data results using the model\n y_pred = clf.predict(X_test)\n \n features = [\"pelvic_incidence\",\"pelvic_tilt\",\"lumbar_lordosis_angle\",\"sacral_slope\",\"pelvic_radius\",\"degree_spondylolisthesis\"]\n target_names = ['Normal','Hernia','Spondylolisthesis']\n \n # Metrics for analysis of the prediction\n metric_dict = metrics.classification_report(y_test, y_pred, target_names=target_names,output_dict=True)\n \n accuracy_list.append(metric_dict['accuracy'])\n precision_normal.append(metric_dict['Normal']['precision'])\n precision_hernia.append(metric_dict['Hernia']['precision'])\n precision_spondy.append(metric_dict['Spondylolisthesis']['precision'])\n recall_normal.append(metric_dict['Normal']['recall'])\n recall_hernia.append(metric_dict['Hernia']['recall'])\n recall_spondy.append(metric_dict['Spondylolisthesis']['recall'])\n \n \n # Visualize the decision tree\n dot_data = StringIO()\n export_graphviz(clf, out_file=dot_data, \n filled=True, rounded=True,\n special_characters=True,feature_names = features,class_names=['Normal','Hernia','Spondylolisthesis'])\n graph = pydotplus.graph_from_dot_data(dot_data.getvalue()) \n graph.write_png(decision_tree_image+\".png\")\n Image(graph.create_png())\n \n\ndef plotGraph():\n x_axis = ['5','15','25','40','50']\n metric_df = pd.DataFrame(index = x_axis)\n \n metric_df['Accuracy'] = accuracy_list\n metric_df['Precision_Normal']=precision_normal\n metric_df['Precision_Hernia']=precision_hernia\n metric_df['Precision_Spondylolisthesis']=precision_spondy\n metric_df['Recall_Normal']=recall_normal\n metric_df['Recall_Hernia']=recall_hernia\n metric_df['Recall_Spondylolisthesis']=recall_spondy\n \n metric_plot = metric_df.plot()\n metric_plot.set_xlabel(\"Minimum Number of leaf nodes\")\n metric_plot.set_ylabel(\"Metric values\")\n metric_plot.legend(bbox_to_anchor=(1.2, 0.5)) \n \n metric_plot.get_figure().savefig(\"question2_graph.png\")\n\n\n# Read the data\nbiomed_data3 = pd.read_csv(\"Biomechanical_Data_3Classes.csv\",header=0)\n\n# Randomly shuffle the data\nbiomed_data3 = biomed_data3.sample(frac=1).reset_index(drop=True)\n \naccuracy_list = list()\nprecision_hernia = list()\nprecision_normal = list()\nprecision_spondy = list()\nrecall_hernia = list()\nrecall_normal = list()\nrecall_spondy = list()\n\n# Generate Decision trees with different minimum leaf nodes\ngenerate_decision_tree(5,\"question2_5\")\ngenerate_decision_tree(15,\"question2_15\")\ngenerate_decision_tree(25,\"question2_25\")\ngenerate_decision_tree(40,\"question2_40\")\ngenerate_decision_tree(50,\"question2_50\")\n\n# Plot graphs of min. leaf nodes against metrics \nplotGraph()\n \n\n\n","sub_path":"question2_py.py","file_name":"question2_py.py","file_ext":"py","file_size_in_byte":3796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"547038690","text":"from typing import List\nimport pdb\nclass Solution:\n def merge(self, intervals: List[List[int]]) -> List[List[int]]:\n intervals.sort()\n l = len(intervals)\n merge_list = []\n temp_list = []\n next_list = []\n merge_flag = False\n for i in range(l-1):\n if merge_flag == False:\n temp_list = intervals[i]\n else:\n i += 1\n\n if i == l-1:\n next_list = intervals[l-1]\n else:\n next_list = intervals[i+1]\n if temp_list[0] <= next_list[0]:\n if temp_list[1] <= next_list[1]:\n temp_list[1] = next_list[1]\n merge_flag = True\n else:\n temp_list[0] = next_list[0]\n merge_flag = True\n\n merge_list.append([temp_list[0],temp_list[1]])\n\n return merge_list\n #pdb.set_trace()\n\n#S = Solution().merge([[1,3],[2,6],[8,10],[15,18]])\nS = Solution().merge([[0, 2], [1, 4], [3, 5]])\nprint(S)\n\n'''\nc_l = intervals[i][0]\np_l = intervals[i-1][0]\nc_r = intervals[i][1]\np_r = intervals[i-1][1]\n\nif c_l <= p_r and c_r >= p_l:\n if c_l >= p_l and c_r >= p_r:\n temp_list.append([p_l, c_r])\n elif c_l < p_l and c_r >= p_r:\n merge_list.append([c_l, c_r])\n elif c_l < p_l and c_r <= p_r:\n merge_list.append([c_l, p_r])\n else:\n merge_list.append([p_l, p_r])\n i -= 1\n if i == 0:\n break;\nelse:\n if i == 1:\n merge_list.append([c_l, c_r])\n merge_list.append([p_l, p_r])\n else:\n merge_list.append([c_l, c_r])\n\n#merge_list.append([p_l, p_r])\n'''\n","sub_path":"56.py","file_name":"56.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"247140670","text":"import datetime, random\nfrom collections import Counter as CollectionCounter, OrderedDict \n\nfrom django.shortcuts import render\nfrom django.template import Context\nfrom django.views.generic import ListView, DetailView, View\nfrom django.db.models import Count\nfrom django.http import JsonResponse, HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\n\nfrom .models import EventLog, Level, Computer\n# Create your views here.\n\n\nclass CreateData(ListView):\n\n\tdef get(self,request):\n\t\tlast_id = EventLog.objects.all()[0].id\n\n\t\tfor i in range(1,10):\n\t\t\ttry: \n\t\t\t\tevent = EventLog.objects.get(pk = random.randint(1,last_id))\n\t\t\t\tevent.pk = None\n\t\t\t\tevent.created_time = datetime.datetime.now()\n\t\t\t\tevent.save() \n\t\t\texcept:\n\t\t\t\tcontinue\n\n\t\treturn HttpResponse(\"done\")\n\n\nclass EventLogView(ListView):\n\tmodel = EventLog\n\ttemplate_name = \"articles/eventlog_list.html\"\n\tcontext_object_name = \"eventlog_list\"\n\tpaginate_by = 25\n\n\tdef get_queryset(self):\n\t\tif not \"order\" in self.request.GET:\n\t\t\treturn EventLog.objects.all()\n\t\telse:\n\t\t\treturn EventLog.objects.order_by(self.request.GET[\"order\"])\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(EventLogView,self).get_context_data(**kwargs)\n\t\tif \"order\" in self.request.GET:\n\t\t\tprint(self.request.GET[\"order\"])\n\t\t\tcontext[\"order\"] = self.request.GET[\"order\"]\n\t\treturn context\n\n\t@method_decorator(login_required)\n\tdef dispatch(self, *args, **kwargs):\n\t\treturn super(EventLogView, self).dispatch(*args, **kwargs)\n\n\nclass EventLogDetailView(DetailView):\n\tmodel = EventLog\n\ttemplate_name = \"articles/eventlog_detail.html\"\n\tcontext_object_name = \"eventlog\"\n\n\nclass DashboardView(ListView):\n\tmodel = EventLog\n\ttemplate_name = \"articles/dashboard.html\"\n\tcontext_object_name = \"critical_events\"\n\tqueryset = EventLog.objects.filter(level__level_display_name = \"Critical\")\n\tpaginate_by = 25\n\n\t@method_decorator(login_required)\n\tdef dispatch(self, *args, **kwargs):\n\t\treturn super(DashboardView,self).dispatch(*args, **kwargs)\n\n\nclass Dashboard2View(ListView):\n\tmodel = EventLog\n\ttemplate_name = \"articles/dashboard2.html\"\n\tcontext_object_name = \"critical_events\"\n\tqueryset = EventLog.objects.filter(level__level_display_name = \"Critical\")\n\tpaginate_by = 25\n\n\t@method_decorator(login_required)\n\tdef dispatch(self, *args, **kwargs):\n\t\treturn super(Dashboard2View,self).dispatch(*args, **kwargs)\n\n\nclass FetchDataView(View):\n\tdef get(self, request):\n\t\tevents_count_report = dict()\n\t\tqry = EventLog.objects.all().filter(created_time__gt = \n\t\t\t(datetime.datetime.now() - datetime.timedelta(1)))\n\t\tall_events_count_by_level = CollectionCounter([q.level_display() \\\n\t\t\tfor q in qry])\n\t\tif \"Information\" in all_events_count_by_level:\n\t\t\tdel(all_events_count_by_level[\"Information\"])\n\t\tevents_count_report['all_events_count_by_level'] = \\\n\t\t\tall_events_count_by_level\t\n\t\t# pull distinct computer name from qry\t\t\n\t\tcomputers_set = set([q.computer_name_display() for q in qry])\n\t\tqry_all_levels = Level.objects.all()\n\t\tall_levels = set([q.level_display_name for q in qry_all_levels])\t\t\n\t\tfor computer in computers_set:\n\t\t\tevents_count_by_level = CollectionCounter([q.level_display() \\\n\t\t\t\tfor q in qry if q.computer_name_display() == computer])\n\t\t\tfor level in all_levels:\n\t\t\t\tif (level not in events_count_by_level):\n\t\t\t\t\tevents_count_by_level[level] = 0\n\t\t\tif \"Information\" in events_count_by_level:\n\t\t\t\tdel events_count_by_level[\"Information\"]\t\t\n\t\t\tevents_count_report[computer] = events_count_by_level\n\t\treturn JsonResponse(events_count_report)\n\n\nclass CriticalEventsCount(View):\n\tdef get(self,request):\n\t\tcritical_events_count = dict()\n\t\tqry = EventLog.objects.all().filter(created_time__gt = \\\n\t\t\t(datetime.datetime.now() - datetime.timedelta(1))) \\\n\t\t\t.filter(level__level_display_name = 'Critical')\n\t\tcritical_events_count_by_computer = CollectionCounter( \\\n\t\t\t[q.computer_name_display() for q in qry])\n\t\tordered_critical_events_count_by_computer = OrderedDict( \\\n\t\t\tsorted(critical_events_count_by_computer.items(), \\\n\t\t\t\tkey=lambda t: t[1], reverse=True))\n\t\tcritical_events_count['total'] = \\\n\t\t\tsum(critical_events_count_by_computer.values())\n\t\tcritical_events_count['by_computer'] = ordered_critical_events_count_by_computer\n\t\treturn JsonResponse(critical_events_count)\n\n","sub_path":"flip/articles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"33469348","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 16 15:32:42 2020\n\n@author: mcsbi\n\"\"\"\n\"\"\"\nChallenge: Below, we have provided a list of lists that contain numbers. \nUsing list comprehension, create a new list threes that contains all the \nnumbers from the original list that are divisible by 3. This can be \naccomplished in one line of code.\n\"\"\"\nnums = [[4, 3, 12, 10], [8, 7, 6], [5, 18, 15, 7, 11], [9, 4], [24, 20, 17], [3, 5]]\n\nthrees = [number for lst in nums for number in lst if number % 3 == 0]\nprint(threes)\n","sub_path":"week2_MAP_FILTER/w2_20.py","file_name":"w2_20.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"90301661","text":"'''\nAuthor: Puffrora\nDate: 2021-11-09 20:14:29\nLastModifiedBy: Puffrora\nLastEditTime: 2021-11-09 21:20:49\n'''\n\nfrom typing import List\n\n\n# TC: O(N)\n# SC: O(1)\nclass Solution:\n def arrayNesting(self, nums: List[int]) -> int:\n\n res = 1\n\n for i in range(len(nums)):\n if nums[i] == -1:\n continue\n cur_len, cur_pos = 0, i\n while nums[cur_pos] != -1:\n cur_len += 1\n tmp = nums[cur_pos]\n nums[cur_pos] = -1\n cur_pos = tmp\n\n res = max(res, cur_len)\n\n return res\n","sub_path":"Leetcode/leetcode565 数组嵌套.py","file_name":"leetcode565 数组嵌套.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"585633827","text":"from . import DomoResource, api_operation\nfrom common.util import build_request\n\n\nclass User(DomoResource):\n \"\"\" Domo User \"\"\"\n\n @api_operation(\n title='Users',\n url='/api/admin/users',\n parameters={\n 'id': '(optional) Id of user(s) to retrieve. Multiple IDs are comma seperated',\n 'includeDetails': '(optional) Boolean to include additional user detail'\n },\n )\n def get(self):\n \"\"\" Return list of Domo users \"\"\"\n return 'Not implemented', 400\n\n\nclass Pages(DomoResource):\n \"\"\" User Pages \"\"\"\n\n @api_operation(\n title='User Pages',\n url='/api/pages/user',\n parameters={\n 'includeHidden': 'Boolean to return hidden pages'\n },\n )\n def get(self):\n \"\"\" Returns pages shared with the user \"\"\"\n self.parser.add_argument('includeHidden', type=bool)\n args = self.parser.parse_args()\n return build_request('/api/content/v1/pages/navigation', args)\n","sub_path":"resources/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"308094306","text":"import cv2\nimport numpy as np\nimport sys\nimport Spatial_Filtering as Spatial\n\n\ndef unsharp_masking(original_image):\n\n org_img = cv2.imread(original_image, 0).astype(np.uint8)\n\n blur_mask = [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]\n\n blurred_image = Spatial.SpatialFiltering(original_image, blur_mask, 9)\n\n mask = cv2.subtract(org_img, blurred_image)\n unsharped_image = cv2.add(org_img, mask)\n\n cv2.imshow('Mask', mask)\n cv2.imshow('Blurred image', blurred_image)\n cv2.imshow('Unsharped image', unsharped_image)\n cv2.imshow('original image', org_img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\nif __name__ == \"__main__\":\n\n Original_Image = str(sys.argv[1])\n\n unsharp_masking(Original_Image)\n","sub_path":"Unsharp Masking.py","file_name":"Unsharp Masking.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"543868937","text":"# Normalization is the process of scaling individual samples to have unit norm\n# This process can be useful if you plan to use a quadratic form\n'''\n\nWhat is Norm?\n\nIt's the total size or length of all vectors in a vector space or matrix. \nLength can be calculation using Distance formula \n\nL1 - Norm(Manhattan norm)\nL2 - Norm(Eucledian Doistance)\n\nFor more Detailed explanantion of Norms refer this -> https://rorasa.wordpress.com/2012/05/13/l0-norm-l1-norm-l2-norm-l-infinity-norm/\n\nMore on Distance -> http://www.improvedoutcomes.com/docs/WebSiteDocs/Clustering/Clustering_Parameters/Manhattan_Distance_Metric.htm\n\n'''\n\n\nfrom sklearn import preprocessing\nimport numpy as np\n\nX = [[ 1., -1., 2.],\n [ 2., 0., 0.],\n [ 0., 1., -1.]]\nX_normalized = preprocessing.normalize(X, norm='l2')\n\nprint(X_normalized) ","sub_path":"Preprocessing data/Normalize.py","file_name":"Normalize.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"610456714","text":"# python3\n\"\"\"\nMaximum Pairwise Product Problem\n\nFind the maximum product of two distinct numbers in a sequence of non-negative integers.\n\n\nInput: The first line is the number of integers; The second line is a sequence of non-negative integers.\nOutput: The maximum value that can be obtained by multiplying two different elements from the sequence.\n\"\"\"\n# 这题说的是两个不同的元素。数字可以是一样的。\n# 输入的第一行没有用\n\n\ndef max_pairwise_product(numbers):\n n = len(numbers)\n numbers.sort()\n return numbers[n-1] * numbers[n-2]\n\n\nif __name__ == '__main__':\n input_n = int(input())\n input_numbers = [int(x) for x in input().split()]\n print(max_pairwise_product(input_numbers))\n","sub_path":"C1W1 Programming Challenges/2_Maximum_Pairwise_Product.py","file_name":"2_Maximum_Pairwise_Product.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"194674966","text":"import math\nimport random\nimport numpy as np\nimport ROOT as root\nimport multilines as ML\nfrom utility import geometrycheck as gcheck\nfrom utility import euclid\n\nroot.gROOT.ProcessLine(\n\"struct DataStruct{\\\n vector* dirx;\\\n vector* diry;\\\n vector* dirz;\\\n vector* pointx;\\\n vector* pointy;\\\n vector* pointz;\\\n vector* breakpointx;\\\n vector* breakpointy;\\\n vector* bpangle;\\\n vector* radius;\\\n vector* wirex;\\\n vector* wirey;\\\n vector* wirez;\\\n vector* grid_id;\\\n vector* grid_side;\\\n vector* grid_layer;\\\n vector* grid_column;\\\n vector* break_layer;\\\n vector* charge;\\\n vector* calo_id;\\\n vector* calo_type;\\\n vector* calo_side;\\\n vector* calo_wall;\\\n vector* calo_column;\\\n vector* calo_row;\\\n};\");\n\ndef remove_hits(fromlayer, tolayer, cells, radii, info):\n infoarr = np.array(info)\n cellarr = np.array(cells)\n radarr = np.array(radii)\n for lay in range(fromlayer,tolayer):\n indx = np.where(infoarr[:,1]!=lay)[0]\n infoarr = infoarr[indx]\n cellarr = cellarr[indx]\n radarr = radarr[indx]\n return cellarr.tolist(), radarr.tolist(), infoarr.tolist()\n\n\ndef remove_doubles(cluster):\n k=1 # keys start at 1\n if len(cluster[k][2])>0 and len(cluster[k+1][2])>0: # one empty = no doubles\n for pos, mi in enumerate(cluster[k][2]): # check info entries against all of next cluster entry\n nextinfo = np.array(cluster[k+1][2]) # from modified next cluster\n if len(nextinfo)<1:\n break # nothing to compare to\n dublet = list(mi[-2:]) # comes as tuple -> list convert\n nilist = nextinfo[:,-2:].tolist() # smart slicing, back to list\n \n if dublet in nilist: # found a double entry\n indx = nilist.index(dublet) # first entry; should be only one\n nextradius = cluster[k+1][1][indx] # choose according to radius\n if cluster[k][1][pos] > nextradius: # smaller wins\n cluster[k][1][pos] = nextradius # in-place change\n cluster[k+1][0].pop(indx) # change cluster k+1\n cluster[k+1][1].pop(indx)\n cluster[k+1][2].pop(indx)\n return cluster\n\n\nNsims = 1000 # Number of simulated lines\n\n# Set up ROOT data structures for file output and storage\nfile = root.TFile(\"/tmp/multiscatter_calo.tsim\",\"recreate\")\ntree = root.TTree(\"hit_tree\",\"Hit data\")\ntree.SetDirectory(file)\n\ndataStruct = root.DataStruct()\ndataStruct.dirx = root.std.vector('double')()\ndataStruct.diry = root.std.vector('double')()\ndataStruct.dirz = root.std.vector('double')()\ndataStruct.pointx = root.std.vector('double')()\ndataStruct.pointy = root.std.vector('double')()\ndataStruct.pointz = root.std.vector('double')()\ndataStruct.bpointx = root.std.vector('double')()\ndataStruct.bpointy = root.std.vector('double')()\ndataStruct.bangle = root.std.vector('double')()\ndataStruct.radius = root.std.vector('double')()\ndataStruct.wirex = root.std.vector('double')()\ndataStruct.wirey = root.std.vector('double')()\ndataStruct.wirez = root.std.vector('double')()\ndataStruct.gridid = root.std.vector('int')()\ndataStruct.gridside = root.std.vector('int')()\ndataStruct.gridlayer = root.std.vector('int')()\ndataStruct.gridcolumn = root.std.vector('int')()\ndataStruct.breaklayer = root.std.vector('int')()\ndataStruct.charge = root.std.vector('int')()\ndataStruct.caloid = root.std.vector('int')()\ndataStruct.calotype = root.std.vector('int')()\ndataStruct.calowall = root.std.vector('int')()\ndataStruct.caloside = root.std.vector('int')()\ndataStruct.calorow = root.std.vector('int')()\ndataStruct.calocolumn = root.std.vector('int')()\n\ntree.Branch('dirx', dataStruct.dirx)\ntree.Branch('diry', dataStruct.diry)\ntree.Branch('dirz', dataStruct.dirz)\ntree.Branch('pointx', dataStruct.pointx)\ntree.Branch('pointy', dataStruct.pointy)\ntree.Branch('pointz', dataStruct.pointz)\ntree.Branch('breakpointx', dataStruct.bpointx)\ntree.Branch('breakpointy', dataStruct.bpointy)\ntree.Branch('bpangle', dataStruct.bangle)\ntree.Branch('radius', dataStruct.radius)\ntree.Branch('wirex', dataStruct.wirex)\ntree.Branch('wirey', dataStruct.wirey)\ntree.Branch('wirez', dataStruct.wirez)\ntree.Branch('grid_id', dataStruct.gridid)\ntree.Branch('grid_side', dataStruct.gridside)\ntree.Branch('grid_layer', dataStruct.gridlayer)\ntree.Branch('grid_column', dataStruct.gridcolumn)\ntree.Branch('break_layer', dataStruct.breaklayer)\ntree.Branch('charge', dataStruct.charge)\ntree.Branch('calo_id', dataStruct.caloid)\ntree.Branch('calo_type', dataStruct.calotype)\ntree.Branch('calo_side', dataStruct.caloside)\ntree.Branch('calo_wall', dataStruct.calowall)\ntree.Branch('calo_row', dataStruct.calorow)\ntree.Branch('calo_column', dataStruct.calocolumn)\n\nwgr = ML.demonstratorgrid()\ntgen = ML.track_generator()\ndcalo = gcheck.demonstratorcalo()\n\nfor i in range(Nsims):\n cluster = { }\n lines = []\n bpoints = []\n bangles = []\n blayer = []\n scatter_angle = 3.0 # multiple scattering angle width [degrees]\n lrtracker = random.randint(0,1) # random left or right tracker side\n sign = -1*(-1)**lrtracker\n\n # random line xy slope for the first straight line\n angle = random.uniform(-math.pi*0.5+0.17, math.pi*0.5-0.17) # taking vertical out\n sl = math.tan(angle)\n\n # make a first line with cells etc.\n dummy = tgen.single_line_manual_with_z(sl,0.0,0.0,5.0) # Line3 with vertex on foil at x=0,y=0,z=5.0\n cells, radii = wgr.hits(dummy, lrtracker) # left/right tracker half\n info = wgr.wireinfo\n if len(info)>0:\n # 2D projection not an issue since is parallel to z=0 plane by construction\n original = euclid.Line2(euclid.Point2(dummy.p.x, dummy.p.y), euclid.Vector2(dummy.v.x, dummy.v.y))\n lines.append(dummy)\n\n # break first line and pick scatter angle for continuation\n bl = random.randint(1,7) # tracker layer with break\n cellvariation = random.uniform(-21.0, 21.0)\n layerline = euclid.Line2(euclid.Point2(sign*53.0 + sign*bl*44.0 + cellvariation,0.0), euclid.Vector2(0.0,1.0))\n breakpoint = original.intersect(layerline)\n bpoints.append((breakpoint.x, breakpoint.y))\n blayer.append(bl)\n c, r, i = remove_hits(bl+1, 9, cells, radii, info) # return numpy arrays\n cluster[1] = (c, r, i)\n\n # next line continuing from breakpoint\n scat_angle = random.gauss(0.0,scatter_angle*math.pi/180.0) # random scattering angle\n newangle = angle + scat_angle # altering original slope angle with new angle\n sl = math.tan(newangle)\n nextdummy = euclid.Line3(euclid.Point3(breakpoint.x, breakpoint.y, 5.0), euclid.Vector3(1.0, sl, 0.0))\n caloinfo= dcalo.calohits(nextdummy, lrtracker)\n while len(caloinfo) < 1: # no calo was hit, try again\n scat_angle = random.gauss(0.0,scatter_angle*math.pi/180.0) # random scattering angle\n newangle = angle + scat_angle # altering original slope angle with new angle\n sl = math.tan(newangle)\n nextdummy = euclid.Line3(euclid.Point3(breakpoint.x, breakpoint.y, 5.0), euclid.Vector3(1.0, sl, 0.0))\n caloinfo= dcalo.calohits(nextdummy, lrtracker)\n\n bangles.append(scat_angle)\n lines.append(nextdummy)\n ncells, nradii = wgr.hits(nextdummy, lrtracker) # left/right tracker half\n ninfo = wgr.wireinfo\n\n if len(ninfo)>0:\n c2, r2, i2 = remove_hits(0, bl, ncells, nradii, ninfo) # return numpy arrays\n cluster[2] = (c2, r2, i2)\n cluster = remove_doubles(cluster)\n\n allcells = cluster[1][0] + cluster[2][0] # concatenate\n allradii = cluster[1][1] + cluster[2][1] # concatenate\n allinfo = cluster[1][2] + cluster[2][2] # concatenate\n\n file.cd()\n # Prepare data structure for this line\n dataStruct.dirx.clear()\n dataStruct.diry.clear()\n dataStruct.dirz.clear()\n dataStruct.pointx.clear()\n dataStruct.pointy.clear()\n dataStruct.pointz.clear()\n dataStruct.bpointx.clear()\n dataStruct.bpointy.clear()\n dataStruct.bangle.clear()\n dataStruct.radius.clear()\n dataStruct.wirex.clear()\n dataStruct.wirey.clear()\n dataStruct.wirez.clear()\n dataStruct.gridid.clear()\n dataStruct.gridside.clear()\n dataStruct.gridlayer.clear() \n dataStruct.gridcolumn.clear()\n dataStruct.breaklayer.clear()\n dataStruct.charge.clear()\n dataStruct.caloid.clear()\n dataStruct.calotype.clear()\n dataStruct.caloside.clear()\n dataStruct.calowall.clear()\n dataStruct.calorow.clear() \n dataStruct.calocolumn.clear()\n\n for entry in lines: # truth lines\n dataStruct.dirx.push_back(entry.v.x)\n dataStruct.diry.push_back(entry.v.y)\n dataStruct.dirz.push_back(entry.v.z)\n dataStruct.pointx.push_back(entry.p.x)\n dataStruct.pointy.push_back(entry.p.y)\n dataStruct.pointz.push_back(entry.p.z)\n dataStruct.charge.push_back(0)\n\n for bp, bang, bl in zip(bpoints, bangles, blayer):\n dataStruct.bpointx.push_back(bp[0])\n dataStruct.bpointy.push_back(bp[1])\n dataStruct.bangle.push_back(bang)\n dataStruct.breaklayer.push_back(bl)\n\n counter = 0\n type = caloinfo[0][1]\n for w,r,mi in zip(allcells,allradii,allinfo):\n if type == 1 and abs(w[1]) > abs(entry.p.y): \n continue # dismiss geiger hits outside xwall\n if type == 2 and abs(w[2]) > abs(entry.p.z): \n continue # dismiss geiger hits outside gveto\n dataStruct.radius.push_back(r)\n dataStruct.wirex.push_back(w[0])\n dataStruct.wirey.push_back(w[1])\n dataStruct.wirez.push_back(w[2])\n dataStruct.gridid.push_back(counter)\n gside = mi[0] # wire side\n grow = mi[1] # wire column\n gcol = mi[2] # wire layer\n dataStruct.gridlayer.push_back(grow)\n dataStruct.gridcolumn.push_back(gcol)\n dataStruct.gridside.push_back(gside) # not covered yet \n counter += 1 # count up all hits for entire event\n\n side = caloinfo[0][3]\n col = caloinfo[0][4]\n row = caloinfo[0][5]\n wall = caloinfo[0][6]\n dataStruct.caloid.push_back(0)\n dataStruct.calorow.push_back(row)\n dataStruct.calocolumn.push_back(col)\n dataStruct.calotype.push_back(type)\n dataStruct.caloside.push_back(side)\n dataStruct.calowall.push_back(wall)\n \n\n tree.Fill() # data structure fully filled, lines done\n \ntree.Write() # write all lines to disk\nfile.Close()\n","sub_path":"toysinglescatter_calo.py","file_name":"toysinglescatter_calo.py","file_ext":"py","file_size_in_byte":11228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"276993257","text":"#Use a function with uncertain numbers of input parameters\r\ndef func(*args):\r\n sum=0;\r\n for a in args:\r\n sum=sum+int(a);\r\n print(sum)\r\nfunc(2,3,7,8,16,5,4)\r\n\r\n# Write a function takes a gender info\r\ndef gender(gen=\"Unknown\"):\r\n if gen=='m':\r\n gen=\"m\"\r\n print(\"Your gender is \", gen)\r\n elif gen==\"f\":\r\n gen=\"f\";\r\n print(\"Your gender is \", gen)\r\n else:\r\n print(\"Error!\")\r\ngender(\"ttt\");\r\ngender();\r\n\r\n\r\n\r\n","sub_path":"25.py","file_name":"25.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"82503195","text":"import os\nimport json\nimport re\n\n\nclass NameSetEditor(object):\n\n def __init__(self):\n self.name_sets = list()\n\n def load_file(self, file_name):\n with open(file_name, 'r', encoding='utf-8-sig') as file:\n self.name_sets.append(json.loads(file.read()))\n\n def load_all(self):\n for root, dirnames, files in os.walk('./'):\n for file in files:\n if not file.startswith('_') and file.endswith('.json'):\n self.load_file(root + file)\n print('INFO: Loaded ' + str(len(self.name_sets)) + ' name sets.')\n\n def store(self):\n for name_set in self.name_sets:\n with open(name_set['tag'] + '.json', 'w', encoding='utf-8') as file:\n file.write(json.dumps(name_set, indent=' ', ensure_ascii=False))\n\n def change_tag(self, old_tag, new_tag):\n self.load_all()\n for name_set in self.name_sets:\n if name_set['tag'] == old_tag:\n name_set['tag'] = new_tag\n if 'templates' in name_set:\n for template in name_set['templates']:\n template['content'] = re.sub(old_tag, new_tag, template['content'])\n try:\n os.remove('./' + old_tag + '.json')\n except FileNotFoundError:\n pass\n self.store()\n\n @staticmethod\n def add_tag(name_set, new_tag, level, default_value, condition=None):\n if level == 'name_set':\n if new_tag not in name_set:\n if condition is not None:\n if condition(name_set):\n name_set[new_tag] = default_value\n else:\n name_set[new_tag] = default_value\n elif level == 'template':\n for template in name_set['templates']:\n if new_tag not in template:\n if condition is not None:\n if condition(template):\n template[new_tag] = default_value\n else:\n template[new_tag] = default_value\n elif level == 'name_list':\n for name_list in name_set['name_lists']:\n if new_tag not in name_list:\n if condition is not None:\n if condition(name_list):\n name_list[new_tag] = default_value\n else:\n name_list[new_tag] = default_value\n else:\n raise ValueError('Level needs to be either \"name_set\", \"template\" or \"name_list\". '\n 'No other values are accepted.')\n\n def add_tags_to_all(self, new_tag, level, default_value, condition=None):\n self.load_all()\n for name_set in self.name_sets:\n self.add_tag(name_set, new_tag, level, default_value, condition)\n self.store()\n\n def remove_tag(self, tag):\n self.load_all()\n for name_set in self.name_sets:\n for key in name_set:\n print(key)\n if key == tag:\n del name_set[key]\n print('INFO: Deleted ' + tag + ' in name set ' + name_set['tag'] + '.')\n break\n elif key == 'templates':\n for template in name_set['templates']:\n for t in template:\n if t == tag:\n del template[t]\n print('INFO: Deleted ' + tag + ' in a template.')\n break\n elif key == 'name_lists':\n for name_list in name_set['name_lists']:\n for n in name_list:\n if n == tag:\n del name_list[n]\n print('INFO: Deleted ' + tag + ' in name list ' + name_list['tag'] + '.')\n break\n self.store()\n\n def remove_name_list(self, tag):\n self.load_all()\n for name_set in self.name_sets:\n for name_list in list(name_set['name_lists']):\n if name_list['tag'] == tag:\n name_set['name_lists'].remove(name_list)\n self.store()\n\n def add_name_list_to_set(self, name_set_tag: str, name_list: dict):\n '''\n Adds a name list to a name set. Can add duplicates. Does not check if name list is a valid name list.\n\n :param name_set_tag: the name set the list should be added to. (str)\n :param name_list: the name list which should be added. (dict)\n '''\n self.load_all()\n for name_set in self.name_sets:\n if name_set['tag'] == name_set_tag:\n name_set['name_lists'].append(name_list)\n self.store()\n\n\nif __name__ == '__main__':\n utility = NameSetEditor()\n # utility.change_tag('area_names', 'areas')\n # utility.add_tag('use_markov', 'name_list', True)\n # utility.remove_tag('markovproperties')\n\n\n def predicate(name_list):\n return True\n\n\n # utility.add_tags_to_all('weight', 'name_list', 10)\n\n utility.remove_name_list('full_real_world_names')\n\n with open('/home/jonas/PycharmProjects/random_generators/name_generator/data_files/lakes.json', 'r', encoding='utf-8') as file:\n content = file.read()\n utility.add_name_list_to_set('lakes', json.loads(content))\n","sub_path":"procedural_generators/names/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":5395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"440406591","text":"import lotto\nfrom pprint import pprint\n\nnumber_of_iterations = 10000\n\ndef collect_data(func):\n distribution = {}\n for i in range(number_of_iterations):\n numbers = func()\n check_unique = dict((k,1) for k in numbers)\n assert len(check_unique) == 6\n assert min(numbers) > 0\n assert max(numbers) < 46\n for number in numbers:\n if number not in distribution:\n distribution[number] = 1\n else:\n distribution[number] += 1\n \n assert len(distribution) == 45\n min_count = min(distribution.values())\n max_count = max(distribution.values())\n ## pprint(distribution)\n mean = number_of_iterations * 6 / 45 \n print(\"min count: {}, {:0.3f} max_count: {},{:0.3f}\".\n format(min_count, min_count/mean, max_count, max_count/mean))\n assert min_count/mean > 0.92\n assert max_count/mean < 1.08\n \n \ndef test_man1():\n data = collect_data(lotto.lotto_man)\n \ndef test_man_bad():\n data = collect_data(lotto.lotto_man_bad) \n\ndef test_random_sample():\n collect_data(lotto.random_sample)\n","sub_path":"lotto/test_lotto.py","file_name":"test_lotto.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"17838766","text":"# Copyright 2018 Braxton Mckee\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom object_database.schema import Schema\nfrom object_database.messages import ClientToServer, ServerToClient, SchemaDefinition, getHeartbeatInterval\nfrom object_database.core_schema import core_schema\nfrom object_database.view import View, Transaction, _cur_view, SerializedDatabaseValue\nfrom object_database.identity import IdentityProducer\nimport object_database.keymapping as keymapping\nfrom typed_python.hash import sha_hash\nfrom typed_python.SerializationContext import SerializationContext\nfrom typed_python.Codebase import Codebase as TypedPythonCodebase\nfrom typed_python import Alternative\n\nimport typed_python, object_database\n\nimport queue\nimport threading\nimport logging\nimport traceback\nimport time\n\nfrom object_database.view import RevisionConflictException, DisconnectedException\n\n\nclass Everything:\n \"\"\"Singleton to mark subscription to everything in a slice.\"\"\"\n\nTransactionResult = Alternative(\n \"TransactionResult\",\n Success = {},\n RevisionConflict = {'key': str},\n Disconnected = {}\n )\n\n\nclass VersionedBase:\n def _best_version_offset_for(self, version):\n i = len(self.version_numbers) - 1\n\n while i >= 0:\n if self.version_numbers[i] <= version:\n return i\n i -= 1\n\n return None\n\n def isEmpty(self):\n return not self.version_numbers\n\n def validVersionIncoming(self, version_read, transaction_id):\n if not self.version_numbers:\n return True\n top = self.version_numbers[-1]\n assert transaction_id > version_read\n return version_read >= top\n\n def hasVersionInfoNewerThan(self, tid):\n if not self.version_numbers:\n return False\n return tid < self.version_numbers[-1]\n\n def newestValue(self):\n if self.version_numbers:\n return self.valueForVersion(self.version_numbers[-1])\n else:\n return self.valueForVersion(None)\n\n\nclass VersionedValue(VersionedBase):\n def __init__(self):\n self.version_numbers = []\n self.values = []\n\n def setVersionedValue(self, version_number, val):\n self.version_numbers.append(version_number)\n self.values.append(val)\n\n def valueForVersion(self, version):\n i = self._best_version_offset_for(version)\n\n if i is None:\n return None\n\n return self.values[i]\n\n def needsToTrack(self):\n return len(self.version_numbers) != 1 or self.values[0].serializedByteRep is None\n\n def cleanup(self, version_number):\n if not self.values:\n return True\n\n while self.values and self.version_numbers[0] < version_number:\n if len(self.values) == 1:\n if self.values[0].serializedByteRep is None:\n #this value was deleted and we can just remove this whole entry\n return True\n else:\n self.version_numbers[0] = version_number\n else:\n if self.version_numbers[1] <= version_number:\n self.values.pop(0)\n self.version_numbers.pop(0)\n else:\n self.version_numbers[0] = version_number\n\n def __repr__(self):\n return \"VersionedValue(ids=%s)\" % (self.version_numbers,)\n\n\nclass VersionedSet(VersionedBase):\n #values in sets are always strings\n def __init__(self):\n self.version_numbers = []\n self.adds = []\n self.removes = []\n\n def setVersionedAddsAndRemoves(self, version, adds, removes):\n assert not adds or not removes\n assert adds or removes\n assert isinstance(adds, set)\n assert isinstance(removes, set)\n\n if not self.version_numbers:\n if removes:\n adds = set(adds)\n adds.difference_update(removes)\n removes = set()\n\n self.adds.append(adds)\n self.removes.append(removes)\n self.version_numbers.append(version)\n\n def needsToTrack(self):\n return len(self.version_numbers) != 1 or not (len(self.adds[0]) or len(self.removes[0]))\n\n def updateVersionedAdds(self, version, adds):\n if not self.version_numbers or self.version_numbers[-1] != version:\n assert not self.version_numbers or self.version_numbers[-1] < version\n self.setVersionedAddsAndRemoves(version, adds, set())\n else:\n #someone could be iterating over this set in another thread\n new_last_adds = set(self.adds[-1])\n new_last_adds.update(adds)\n self.adds[-1] = new_last_adds\n\n def cleanup(self, version_number):\n if not self.version_numbers:\n return True\n\n while self.version_numbers and self.version_numbers[0] < version_number:\n if len(self.version_numbers) == 1:\n if not self.adds[0] and not self.removes[0]:\n #this value was deleted and we can just remove this whole entry\n return True\n else:\n self.version_numbers[0] = version_number\n else:\n if self.version_numbers[1] <= version_number:\n #merge slot 0 into slot 1\n #the new set should have no removes, and only adds\n assert not self.removes[0], (self.adds[0], self.removes[0])\n\n new_set = set(self.adds[0])\n new_set.update(self.adds[1])\n new_set.difference_update(self.removes[1])\n\n self.adds.pop(0)\n self.removes.pop(0)\n self.version_numbers.pop(0)\n\n self.adds[0] = new_set\n self.removes[0] = set()\n else:\n self.version_numbers[0] = version_number\n\n def valueForVersion(self, version):\n ix = self._best_version_offset_for(version)\n\n if ix is None:\n return SetWithEdits(set(), set(), set())\n\n return SetWithEdits(set(), self.adds[:ix+1], self.removes[:ix+1])\n\n def __repr__(self):\n return \"VersionedSet(ids=%s, adds=%s, removes=%s)\" % (self.version_numbers, self.adds, self.removes)\n\n\nclass SetWithEdits:\n AGRESSIVELY_CHECK_SET_ADDS = False\n\n def __init__(self, s, adds, removes):\n self.s = s\n self.adds = adds\n self.removes = removes\n\n def toSet(self):\n res = set(self.s)\n for i in range(len(self.adds)):\n res.update(self.adds[i])\n res.difference_update(self.removes[i])\n return res\n\n def pickAny(self, toAvoid):\n removed = set()\n\n for i in reversed(range(len(self.adds))):\n if SetWithEdits.AGRESSIVELY_CHECK_SET_ADDS:\n adds = set(self.adds[i])\n\n for a in self.adds[i]:\n if a not in removed and a not in toAvoid:\n return a\n\n if SetWithEdits.AGRESSIVELY_CHECK_SET_ADDS:\n time.sleep(0.0001)\n assert self.adds[i] == adds\n\n removed.update(self.removes[i])\n\n for a in self.s:\n if a not in removed and a not in toAvoid:\n return a\n\n\nclass ManyVersionedObjects:\n def __init__(self):\n #for each version number we have outstanding\n self._version_number_refcount = {}\n\n self._min_reffed_version_number = None\n\n #for each version number, the set of keys that are set with it\n self._version_number_objects = {}\n\n #for each key, a VersionedValue or VersionedSet\n self._versioned_objects = {}\n\n def keycount(self):\n return len(self._versioned_objects)\n\n def versionIncref(self, version_number):\n if version_number not in self._version_number_refcount:\n self._version_number_refcount[version_number] = 1\n\n if self._min_reffed_version_number is None:\n self._min_reffed_version_number = version_number\n else:\n self._min_reffed_version_number = min(version_number, self._min_reffed_version_number)\n else:\n self._version_number_refcount[version_number] += 1\n\n def versionDecref(self, version_number):\n assert version_number in self._version_number_refcount\n\n self._version_number_refcount[version_number] -= 1\n\n assert self._version_number_refcount[version_number] >= 0\n\n if self._version_number_refcount[version_number] == 0:\n del self._version_number_refcount[version_number]\n\n if version_number == self._min_reffed_version_number:\n if not self._version_number_refcount:\n self._min_reffed_version_number = None\n else:\n self._min_reffed_version_number = min(self._version_number_refcount)\n\n def setForVersion(self, key, version_number):\n if key in self._versioned_objects:\n return self._versioned_objects[key].valueForVersion(version_number)\n\n return SetWithEdits(set(),set(),set())\n\n def hasDataForKey(self, key):\n return key in self._versioned_objects\n\n def valueForVersion(self, key, version_number):\n return self._versioned_objects[key].valueForVersion(version_number)\n\n def _object_has_version(self, key, version_number):\n if version_number not in self._version_number_objects:\n self._version_number_objects[version_number] = set()\n\n self._version_number_objects[version_number].add(key)\n\n def setVersionedValue(self, key, version_number, serialized_val):\n self._object_has_version(key, version_number)\n\n if key not in self._versioned_objects:\n self._versioned_objects[key] = VersionedValue()\n\n versioned = self._versioned_objects[key]\n\n initialValue = versioned.newestValue()\n\n versioned.setVersionedValue(version_number, SerializedDatabaseValue(serialized_val, {}))\n\n return initialValue\n\n def setVersionedAddsAndRemoves(self, key, version_number, adds, removes):\n self._object_has_version(key, version_number)\n\n if key not in self._versioned_objects:\n self._versioned_objects[key] = VersionedSet()\n\n if adds or removes:\n self._versioned_objects[key].setVersionedAddsAndRemoves(version_number, adds, removes)\n\n def setVersionedTailValueStringified(self, key, serialized_val):\n if key not in self._versioned_objects:\n self._object_has_version(key, -1)\n self._versioned_objects[key] = VersionedValue()\n self._versioned_objects[key].setVersionedValue(-1, SerializedDatabaseValue(serialized_val, {}))\n\n def updateVersionedAdds(self, key, version_number, adds):\n self._object_has_version(key, version_number)\n\n if key not in self._versioned_objects:\n self._versioned_objects[key] = VersionedSet()\n self._versioned_objects[key].setVersionedAddsAndRemoves(version_number, adds, set())\n else:\n self._versioned_objects[key].updateVersionedAdds(version_number, adds)\n\n def cleanup(self, curTransactionId):\n \"\"\"Get rid of old objects we don't need to keep around and increase the min_transaction_id\"\"\"\n if self._min_reffed_version_number is not None:\n lowestId = min(self._min_reffed_version_number, curTransactionId)\n else:\n lowestId = curTransactionId\n\n t0 = time.time()\n\n if self._version_number_objects:\n while min(self._version_number_objects) < lowestId:\n toCollapse = min(self._version_number_objects)\n\n t1 = time.time()\n count = len(self._version_number_objects[toCollapse])\n\n for key in self._version_number_objects[toCollapse]:\n if key not in self._versioned_objects:\n pass\n elif self._versioned_objects[key].cleanup(lowestId):\n del self._versioned_objects[key]\n else:\n if self._versioned_objects[key].needsToTrack():\n self._object_has_version(key, lowestId)\n\n del self._version_number_objects[toCollapse]\n\n\nclass TransactionListener:\n def __init__(self, db, handler):\n self._thread = threading.Thread(target=self._doWork)\n self._thread.daemon = True\n self._shouldStop = False\n self._db = db\n self._db.registerOnTransactionHandler(self._onTransaction)\n self._queue = queue.Queue()\n self.serializationContext = TypedPythonCodebase.coreSerializationContext()\n self.handler = handler\n\n def setSerializationContext(self, context):\n self.serializationContext = context\n\n def start(self):\n self._thread.start()\n\n def stop(self):\n self._shouldStop = True\n self._thread.join()\n\n def __enter__(self):\n self.start()\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.stop()\n\n def flush(self):\n while self._queue.qsize():\n time.sleep(0.001)\n\n def _doWork(self):\n logger = logging.getLogger(__name__)\n while not self._shouldStop:\n try:\n todo = self._queue.get(timeout=0.1)\n except queue.Empty:\n todo = None\n\n if todo:\n try:\n self.handler(todo)\n except Exception:\n logger.error(\"Callback threw exception:\\n%s\", traceback.format_exc())\n\n\n def _onTransaction(self, key_value, priors, set_adds, set_removes, tid):\n changed = {}\n\n for k in key_value:\n o, fieldname = self._db._data_key_to_object(k)\n\n if o:\n if o not in changed:\n changed[o] = []\n\n if fieldname != \" exists\":\n changed[o].append((\n fieldname,\n View.unwrapSerializedDatabaseValue(self.serializationContext, key_value[k], o.__types__[fieldname]),\n View.unwrapSerializedDatabaseValue(self.serializationContext, priors[k], o.__types__[fieldname])\n ))\n\n self._queue.put(changed)\n\nclass DatabaseConnection:\n def __init__(self, channel):\n self._channel = channel\n self._transaction_callbacks = {}\n\n self._lock = threading.Lock()\n\n # transaction of what's in the KV store\n self._cur_transaction_num = 0\n\n # a datastructure that keeps track of all the different versions of the objects\n # we have mapped in.\n self._versioned_data = ManyVersionedObjects()\n\n # a map from lazy object id to (schema, typename)\n self._lazy_objects = {}\n self._lazy_object_read_blocks = {}\n\n self.initialized = threading.Event()\n self.disconnected = threading.Event()\n\n self.connectionObject = None\n\n # transaction handlers. These must be nonblocking since we call them under lock\n self._onTransactionHandlers = []\n\n self._flushEvents = {}\n\n # Map: schema.name -> schema\n self._schemas = {}\n\n self._messages_received = 0\n\n self._pendingSubscriptions = {}\n\n #if we have object-level subscriptions to a particular type (e.g. not everything)\n #then, this is from (schema, typename) -> {object_id -> transaction_id} so that\n #we can tell when the subscription should become valid. Subscriptions are permanent\n #otherwise, if we're subscribed, it's 'Everything'\n self._schema_and_typename_to_subscription_set = {}\n\n #from (schema,typename,field_val) -> {'values', 'index_values', 'identities'}\n self._subscription_buildup = {}\n\n self._channel.setServerToClientHandler(self._onMessage)\n\n self._flushIx = 0\n\n self._largeSubscriptionHeartbeatDelay = 0\n\n self.serializationContext = TypedPythonCodebase.coreSerializationContext()\n\n self._logger = logging.getLogger(__name__)\n\n def registerOnTransactionHandler(self, handler):\n self._onTransactionHandlers.append(handler)\n\n def setSerializationContext(self, context):\n assert isinstance(context, SerializationContext), context\n self.serializationContext = context\n return self\n\n def serializeFromModule(self, module):\n \"\"\"Give the project root we want to serialize from.\"\"\"\n self.setSerializationContext(\n TypedPythonCodebase.FromRootlevelModule(module).serializationContext\n )\n\n def _stopHeartbeating(self):\n self._channel._stopHeartbeating()\n\n def disconnect(self):\n self.disconnected.set()\n self._channel.close()\n\n def _noViewsOutstanding(self):\n with self._lock:\n return not self._versioned_data._version_number_refcount\n\n def authenticate(self, token):\n self._channel.write(\n ClientToServer.Authenticate(token=token)\n )\n\n def addSchema(self, schema):\n schema.freeze()\n\n with self._lock:\n if schema.name in self._schemas:\n return\n\n self._schemas[schema.name] = schema\n\n schemaDesc = schema.toDefinition()\n\n self._channel.write(\n ClientToServer.DefineSchema(\n name=schema.name,\n definition=schemaDesc\n )\n )\n\n def flush(self):\n \"\"\"Make sure we know all transactions that have happened up to this point.\"\"\"\n with self._lock:\n if self.disconnected.is_set():\n raise DisconnectedException()\n\n self._flushIx += 1\n ix = str(self._flushIx)\n e = self._flushEvents[ix] = threading.Event()\n self._channel.write(ClientToServer.Flush(guid=ix))\n\n e.wait()\n\n if self.disconnected.is_set():\n raise DisconnectedException()\n\n def subscribeToObject(self, t):\n self.subscribeToObjects([t])\n\n def subscribeToObjects(self, objects):\n for t in objects:\n self.addSchema(type(t).__schema__)\n self.subscribeMultiple([\n (type(t).__schema__.name, type(t).__qualname__, (\"_identity\", t._identity), False)\n for t in objects\n ])\n\n def _lazinessForType(self, typeObj, desiredLaziness):\n if desiredLaziness is not None:\n return desiredLaziness\n if hasattr(typeObj, '__object_database_lazy_subscription__'):\n return True\n return False\n\n def subscribeToIndex(self, t, block=True, lazySubscription=None, **kwarg):\n self.addSchema(t.__schema__)\n\n toSubscribe = []\n for fieldname,fieldvalue in kwarg.items():\n toSubscribe.append((\n t.__schema__.name,\n t.__qualname__,\n (fieldname, keymapping.index_value_to_hash(fieldvalue)),\n self._lazinessForType(t, lazySubscription)\n )\n )\n\n return self.subscribeMultiple(toSubscribe, block=block)\n\n def subscribeToType(self, t, block=True, lazySubscription=None):\n self.addSchema(t.__schema__)\n\n if self._isTypeSubscribedAll(t):\n return ()\n\n return self.subscribeMultiple([(t.__schema__.name, t.__qualname__, None, self._lazinessForType(t, lazySubscription))], block)\n\n def subscribeToNone(self, t, block=True):\n self.addSchema(t.__schema__)\n with self._lock:\n self._schema_and_typename_to_subscription_set.setdefault(\n (t.__schema__.name, t.__qualname__), set()\n )\n return ()\n\n def subscribeToSchema(self, *schemas, block=True, lazySubscription=None, excluding=()):\n for s in schemas:\n self.addSchema(s)\n\n unsubscribedTypes = []\n for schema in schemas:\n for tname, t in schema._types.items():\n if not self._isTypeSubscribedAll(t) and t not in excluding:\n unsubscribedTypes.append((schema.name, tname, None, self._lazinessForType(t, lazySubscription)))\n\n if unsubscribedTypes:\n return self.subscribeMultiple(unsubscribedTypes, block=block)\n\n return ()\n\n def isSubscribedToSchema(self, schema):\n return all(self._isTypeSubscribed(t) for t in schema._types.values())\n\n def isSubscribedToType(self, t):\n return self._isTypeSubscribed(t)\n\n def _isTypeSubscribed(self, t):\n return (t.__schema__.name, t.__qualname__) in self._schema_and_typename_to_subscription_set\n\n def _isTypeSubscribedAll(self, t):\n return self._schema_and_typename_to_subscription_set.get((t.__schema__.name, t.__qualname__)) is Everything\n\n def subscribeMultiple(self, subscriptionTuples, block=True):\n with self._lock:\n if self.disconnected.is_set():\n raise DisconnectedException()\n\n events = []\n\n for tup in subscriptionTuples:\n e = self._pendingSubscriptions.get(tup)\n\n if not e:\n e = self._pendingSubscriptions[(tup[0], tup[1], tup[2])] = threading.Event()\n\n assert tup[0] and tup[1]\n\n self._channel.write(\n ClientToServer.Subscribe(schema=tup[0], typename=tup[1], fieldname_and_value=tup[2], isLazy=tup[3])\n )\n\n events.append(e)\n\n if not block:\n return tuple(events)\n\n for e in events:\n e.wait()\n\n with self._lock:\n if self.disconnected.is_set():\n raise DisconnectedException()\n\n return ()\n\n def waitForCondition(self, cond, timeout):\n #eventally we will replace this with something that watches the calculation\n t0 = time.time()\n while time.time() - t0 < timeout:\n with self.view():\n try:\n if cond():\n return True\n except Exception:\n self._logger.error(\"Condition callback threw an exception:\\n%s\", traceback.format_exc())\n\n time.sleep(min(timeout / 20, .25))\n return False\n\n def _data_key_to_object(self, key):\n schema_name, typename, identity, fieldname = keymapping.split_data_key(key)\n\n schema = self._schemas.get(schema_name)\n if not schema:\n return None,None\n\n cls = schema._types.get(typename)\n\n if cls:\n return cls.fromIdentity(identity), fieldname\n\n return None,None\n\n def __str__(self):\n return \"DatabaseConnection(%s)\" % id(self)\n\n def __repr__(self):\n return \"DatabaseConnection(%s)\" % id(self)\n\n def current_transaction(self):\n if not hasattr(_cur_view, \"view\"):\n return None\n return _cur_view.view\n\n def view(self, transaction_id=None):\n with self._lock:\n if self.disconnected.is_set():\n raise DisconnectedException()\n\n if transaction_id is None:\n transaction_id = self._cur_transaction_num\n\n assert transaction_id <= self._cur_transaction_num\n\n view = View(self, transaction_id)\n\n self._versioned_data.versionIncref(transaction_id)\n\n return view\n\n def transaction(self):\n \"\"\"Only one transaction may be committed on the current transaction number.\"\"\"\n with self._lock:\n if self.disconnected.is_set():\n raise DisconnectedException()\n\n view = Transaction(self, self._cur_transaction_num)\n\n transaction_id = self._cur_transaction_num\n\n self._versioned_data.versionIncref(transaction_id)\n\n return view\n\n def _releaseView(self, view):\n with self._lock:\n self._versioned_data.versionDecref(view._transaction_num)\n\n def isSubscribedToObject(self, object):\n return not self._suppressKey(object._identity)\n\n def _suppressKey(self, k):\n keyname = schema, typename, ident, fieldname = keymapping.split_data_key(k)\n\n subscriptionSet = self._schema_and_typename_to_subscription_set.get((schema,typename))\n\n if subscriptionSet is Everything:\n return False\n if isinstance(subscriptionSet, set) and ident in subscriptionSet:\n return False\n return True\n\n def _suppressIdentities(self, index_key, identities):\n schema, typename, fieldname, valhash = keymapping.split_index_key_full(index_key)\n\n subscriptionSet = self._schema_and_typename_to_subscription_set.get((schema,typename))\n\n if subscriptionSet is Everything:\n return identities\n elif subscriptionSet is None:\n return set()\n else:\n return identities.intersection(subscriptionSet)\n\n def cleanup(self):\n with self._lock:\n self._versioned_data.cleanup(self._cur_transaction_num)\n\n def _onMessage(self, msg):\n self._messages_received += 1\n\n if msg.matches.Disconnected:\n with self._lock:\n self.disconnected.set()\n self.connectionObject = None\n\n for e in self._lazy_object_read_blocks.values():\n e.set()\n\n for e in self._flushEvents.values():\n e.set()\n\n for e in self._pendingSubscriptions.values():\n e.set()\n\n for q in self._transaction_callbacks.values():\n try:\n q(TransactionResult.Disconnected())\n except Exception:\n self._logger.error(\n \"Transaction commit callback threw an exception:\\n%s\",\n traceback.format_exc()\n )\n\n self._transaction_callbacks = {}\n self._flushEvents = {}\n elif msg.matches.FlushResponse:\n with self._lock:\n e = self._flushEvents.get(msg.guid)\n if not e:\n self._logger.error(\"Got an unrequested flush response: %s\", msg.guid)\n else:\n e.set()\n elif msg.matches.Initialize:\n with self._lock:\n self._cur_transaction_num = msg.transaction_num\n self.identityProducer = IdentityProducer(msg.identity_root)\n self.connectionObject = core_schema.Connection.fromIdentity(msg.connIdentity)\n self.initialized.set()\n elif msg.matches.TransactionResult:\n with self._lock:\n try:\n self._transaction_callbacks.pop(msg.transaction_guid)(\n TransactionResult.Success() if msg.success\n else TransactionResult.RevisionConflict(key=msg.badKey)\n )\n except Exception:\n self._logger.error(\n \"Transaction commit callback threw an exception:\\n%s\",\n traceback.format_exc()\n )\n elif msg.matches.Transaction:\n with self._lock:\n key_value = {}\n priors = {}\n\n writes = {k:msg.writes[k] for k in msg.writes}\n set_adds = {k: msg.set_adds[k] for k in msg.set_adds}\n set_removes = {k: msg.set_removes[k] for k in msg.set_removes}\n\n for k,val_serialized in writes.items():\n if not self._suppressKey(k):\n key_value[k] = val_serialized\n\n priors[k] = self._versioned_data.setVersionedValue(k, msg.transaction_id,\n bytes.fromhex(val_serialized)\n if val_serialized is not None else None\n )\n\n for k,a in set_adds.items():\n a = self._suppressIdentities(k, set(a))\n\n self._versioned_data.setVersionedAddsAndRemoves(k, msg.transaction_id, a, set())\n\n for k,r in set_removes.items():\n r = self._suppressIdentities(k, set(r))\n self._versioned_data.setVersionedAddsAndRemoves(k, msg.transaction_id, set(), r)\n\n self._cur_transaction_num = msg.transaction_id\n\n self._versioned_data.cleanup(self._cur_transaction_num)\n\n for handler in self._onTransactionHandlers:\n try:\n handler(key_value, priors, set_adds, set_removes, msg.transaction_id)\n except Exception:\n self._logger.error(\n \"_onTransaction handler %s threw an exception:\\n%s\",\n handler,\n traceback.format_exc()\n )\n\n\n elif msg.matches.SubscriptionIncrease:\n with self._lock:\n subscribedIdentities = self._schema_and_typename_to_subscription_set.setdefault((msg.schema, msg.typename), set())\n if subscribedIdentities is not Everything:\n subscribedIdentities.update(\n msg.identities\n )\n elif msg.matches.SubscriptionData:\n with self._lock:\n lookupTuple = (msg.schema, msg.typename, msg.fieldname_and_value)\n\n if lookupTuple not in self._subscription_buildup:\n self._subscription_buildup[lookupTuple] = {'values': {}, 'index_values': {}, 'identities': None, 'markedLazy': False}\n else:\n assert not self._subscription_buildup[lookupTuple]['markedLazy'], 'received non-lazy data for a lazy subscription'\n\n self._subscription_buildup[lookupTuple]['values'].update({k:msg.values[k] for k in msg.values})\n self._subscription_buildup[lookupTuple]['index_values'].update({k: msg.index_values[k] for k in msg.index_values})\n\n if msg.identities is not None:\n if self._subscription_buildup[lookupTuple]['identities'] is None:\n self._subscription_buildup[lookupTuple]['identities'] = set()\n self._subscription_buildup[lookupTuple]['identities'].update(msg.identities)\n elif msg.matches.LazyTransactionPriors:\n with self._lock:\n for k,v in msg.writes.items():\n self._versioned_data.setVersionedTailValueStringified(k,bytes.fromhex(v) if v is not None else None)\n elif msg.matches.LazyLoadResponse:\n with self._lock:\n for k,v in msg.values.items():\n self._versioned_data.setVersionedTailValueStringified(k,bytes.fromhex(v) if v is not None else None)\n\n self._lazy_objects.pop(msg.identity, None)\n\n e = self._lazy_object_read_blocks.pop(msg.identity, None)\n if e:\n e.set()\n\n elif msg.matches.LazySubscriptionData:\n with self._lock:\n lookupTuple = (msg.schema, msg.typename, msg.fieldname_and_value)\n\n assert lookupTuple not in self._subscription_buildup\n\n self._subscription_buildup[lookupTuple] = {\n 'values': {},\n 'index_values': msg.index_values,\n 'identities': msg.identities,\n 'markedLazy': True\n }\n\n elif msg.matches.SubscriptionComplete:\n with self._lock:\n event = self._pendingSubscriptions.get((msg.schema, msg.typename,\n tuple(msg.fieldname_and_value) if msg.fieldname_and_value is not None else None))\n\n if not event:\n self._logger.error(\"Received unrequested subscription to schema %s / %s / %s. have %s\",\n msg.schema, msg.typename, msg.fieldname_and_value, self._pendingSubscriptions)\n return\n\n lookupTuple = (msg.schema, msg.typename, msg.fieldname_and_value)\n\n identities = self._subscription_buildup[lookupTuple]['identities']\n values = self._subscription_buildup[lookupTuple]['values']\n index_values = self._subscription_buildup[lookupTuple]['index_values']\n markedLazy = self._subscription_buildup[lookupTuple]['markedLazy']\n del self._subscription_buildup[lookupTuple]\n\n sets = self.indexValuesToSetAdds(index_values)\n\n if msg.fieldname_and_value is None:\n if msg.typename is None:\n for tname in self._schemas[msg.schema]._types:\n self._schema_and_typename_to_subscription_set[msg.schema, tname] = Everything\n else:\n self._schema_and_typename_to_subscription_set[msg.schema, msg.typename] = Everything\n else:\n assert msg.typename is not None\n subscribedIdentities = self._schema_and_typename_to_subscription_set.setdefault((msg.schema, msg.typename), set())\n if subscribedIdentities is not Everything:\n subscribedIdentities.update(\n identities\n )\n\n t0 = time.time()\n heartbeatInterval = getHeartbeatInterval()\n\n #this is a fault injection to allow us to verify that heartbeating during this\n #function will keep the server connection alive.\n for _ in range(self._largeSubscriptionHeartbeatDelay):\n self._channel.sendMessage(\n ClientToServer.Heartbeat()\n )\n time.sleep(heartbeatInterval)\n\n totalBytes = 0\n for k,v in values.items():\n if v is not None:\n totalBytes += len(v)\n\n if totalBytes > 1000000:\n self._logger.info(\"Subscription %s loaded %.2f mb of raw data.\", lookupTuple, totalBytes / 1024.0 ** 2)\n\n if markedLazy:\n schema_and_typename = lookupTuple[:2]\n for i in identities:\n self._lazy_objects[i] = schema_and_typename\n\n for key, val in values.items():\n self._versioned_data.setVersionedValue(key, msg.tid, None if val is None else bytes.fromhex(val))\n\n #this could take a long time, so we need to keep heartbeating\n if time.time() - t0 > heartbeatInterval:\n #note that this needs to be 'sendMessage' which sends immediately,\n #not, 'write' which queues the message after this function finishes!\n self._channel.sendMessage(\n ClientToServer.Heartbeat()\n )\n t0 = time.time()\n\n for key, setval in sets.items():\n self._versioned_data.updateVersionedAdds(key, msg.tid, set(setval))\n\n #this could take a long time, so we need to keep heartbeating\n if time.time() - t0 > heartbeatInterval:\n #note that this needs to be 'sendMessage' which sends immediately,\n #not, 'write' which queues the message after this function finishes!\n self._channel.sendMessage(\n ClientToServer.Heartbeat()\n )\n t0 = time.time()\n\n #this should be inline with the stream of messages coming from the server\n assert self._cur_transaction_num <= msg.tid\n\n self._cur_transaction_num = msg.tid\n\n event.set()\n else:\n assert False, \"unknown message type \" + msg._which\n\n def indexValuesToSetAdds(self, indexValues):\n #indexValues contains (schema:typename:identity:fieldname -> indexHashVal) which builds\n #up the indices we need. We need to transpose to a dictionary ordered by the hash values,\n #not the identities\n\n t0 = time.time()\n heartbeatInterval = getHeartbeatInterval()\n\n setAdds = {}\n\n for iv in indexValues:\n val = indexValues[iv]\n\n if val is not None:\n schema_name, typename, identity, field_name = keymapping.split_data_reverse_index_key(iv)\n\n index_key = keymapping.index_key_from_names_encoded(schema_name, typename, field_name, val)\n\n setAdds.setdefault(index_key, set()).add(identity)\n\n #this could take a long time, so we need to keep heartbeating\n if time.time() - t0 > heartbeatInterval:\n #note that this needs to be 'sendMessage' which sends immediately,\n #not, 'write' which queues the message after this function finishes!\n self._channel.sendMessage(\n ClientToServer.Heartbeat()\n )\n t0 = time.time()\n return setAdds\n\n def _get_versioned_set_data(self, key, transaction_id):\n with self._lock:\n if self.disconnected.is_set():\n raise DisconnectedException()\n\n return self._versioned_data.setForVersion(key, transaction_id)\n\n def _get_versioned_object_data(self, key, transaction_id):\n with self._lock:\n if self._versioned_data.hasDataForKey(key):\n return self._versioned_data.valueForVersion(key, transaction_id)\n\n if self.disconnected.is_set():\n raise DisconnectedException()\n\n identity = keymapping.split_data_key(key)[2]\n if identity not in self._lazy_objects:\n return None\n\n event = self._loadLazyObject(identity)\n\n event.wait()\n\n with self._lock:\n if self.disconnected.is_set():\n raise DisconnectedException()\n\n if self._versioned_data.hasDataForKey(key):\n return self._versioned_data.valueForVersion(key, transaction_id)\n\n return None\n\n def requestLazyObjects(self, objects):\n with self._lock:\n for o in objects:\n k = keymapping.data_key(type(o), o._identity, \" exists\")\n\n if o._identity in self._lazy_objects and not self._versioned_data.hasDataForKey(k):\n self._loadLazyObject(o._identity)\n\n def _loadLazyObject(self, identity):\n e = self._lazy_object_read_blocks.get(identity)\n\n if e:\n return e\n\n e = self._lazy_object_read_blocks[identity] = threading.Event()\n\n self._channel.write(\n ClientToServer.LoadLazyObject(\n identity=identity,\n schema=self._lazy_objects[identity][0],\n typename=self._lazy_objects[identity][1]\n )\n )\n\n return e\n\n def _set_versioned_object_data(self,\n key_value,\n set_adds,\n set_removes,\n keys_to_check_versions,\n indices_to_check_versions,\n as_of_version,\n confirmCallback\n ):\n assert confirmCallback is not None\n\n transaction_guid = self.identityProducer.createIdentity()\n\n self._transaction_callbacks[transaction_guid] = confirmCallback\n\n out_writes = {}\n\n for k,v in key_value.items():\n out_writes[k] = v.serializedByteRep.hex() if v.serializedByteRep is not None else None\n if len(out_writes) > 10000:\n self._channel.write(\n ClientToServer.TransactionData(writes=out_writes, set_adds={}, set_removes={},\n key_versions=(),index_versions=(), transaction_guid=transaction_guid)\n )\n self._channel.write(ClientToServer.Heartbeat())\n out_writes = {}\n\n ct = 0\n out_set_adds = {}\n for k,v in set_adds.items():\n out_set_adds[k] = tuple(v)\n ct += len(v)\n\n if len(out_set_adds) > 10000 or ct > 100000:\n self._channel.write(\n ClientToServer.TransactionData(writes={}, set_adds=out_set_adds, set_removes={},\n key_versions=(),index_versions=(), transaction_guid=transaction_guid)\n )\n self._channel.write(ClientToServer.Heartbeat())\n out_set_adds = {}\n ct = 0\n\n ct = 0\n out_set_removes = {}\n for k,v in set_removes.items():\n out_set_removes[k] = tuple(v)\n ct += len(v)\n\n if len(out_set_removes) > 10000 or ct > 100000:\n self._channel.write(\n ClientToServer.TransactionData(writes={}, set_adds={}, set_removes=out_set_removes,\n key_versions=(),index_versions=(), transaction_guid=transaction_guid)\n )\n self._channel.write(ClientToServer.Heartbeat())\n out_set_removes = {}\n ct = 0\n\n keys_to_check_versions = list(keys_to_check_versions)\n while len(keys_to_check_versions) > 10000:\n self._channel.write(\n ClientToServer.TransactionData(writes={}, set_adds={}, set_removes={},\n key_versions=keys_to_check_versions[:10000],index_versions=(), transaction_guid=transaction_guid)\n )\n self._channel.write(ClientToServer.Heartbeat())\n keys_to_check_versions = keys_to_check_versions[10000:]\n\n indices_to_check_versions = list(indices_to_check_versions)\n while len(indices_to_check_versions) > 10000:\n self._channel.write(\n ClientToServer.TransactionData(writes={}, set_adds={}, set_removes={},\n key_versions=(),index_versions=indices_to_check_versions[:10000], transaction_guid=transaction_guid)\n )\n indices_to_check_versions = indices_to_check_versions[10000:]\n\n self._channel.write(\n ClientToServer.TransactionData(\n writes=out_writes,\n set_adds=out_set_adds,\n set_removes=out_set_removes,\n key_versions=keys_to_check_versions,\n index_versions=indices_to_check_versions,\n transaction_guid=transaction_guid\n )\n )\n\n self._channel.write(\n ClientToServer.CompleteTransaction(\n as_of_version=as_of_version,\n transaction_guid=transaction_guid\n )\n )\n","sub_path":"object_database/database_connection.py","file_name":"database_connection.py","file_ext":"py","file_size_in_byte":43412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"478307782","text":"##########################################################\n# UAT-specific configuration settings for MyLocker.\n###########################################################\nfrom .base import *\n\nALLOWED_HOSTS = ['sdl-uat.stanford.edu']\n\n# MQUINN 04-27-2015: When updating using the update.py script,\n# use the uat, rather than master, branch.\nGIT_BRANCH_TO_UPDATE_FROM = 'uat'\n\n# MQUINN 09-25-2015: There are some libraries (i.e., python-linkedin)\n# that make HTTP(S) calls via the Python requests library,\n# but do not expose the ability to modify\n# the HTTP request mechanism used. The way to get around this is\n# to set environment variables that the Python requests library will read;\n# namely, $HTTP_PROXY and $HTTPS_PROXY. Note that this cannot go in base.py,\n# because local development machines will not have access to this proxy.\nos.environ['HTTP_PROXY'] = STF_PROXY_MAP_REQUESTS_LIB['https']\nos.environ['HTTPS_PROXY'] = STF_PROXY_MAP_REQUESTS_LIB['https']\n\n# MQUINN 10-27-2015 : SCPD DEV URL for verifying POST'ed data.\nSCPD_AUTH_VERIFY_ENDPOINT = 'https://oneceauth.stanford.edu/api/MyLocker/DecryptMyLocker'\n","sub_path":"Mylocker-Upgrade-master/mylocker/mylocker/settings/uat.py","file_name":"uat.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"230138666","text":"# Define blueprint for rsvp view\nfrom flask import Blueprint, request, jsonify, make_response\n\nfrom app.api.v1.models import rsvp\nfrom app.api.v1.utils import QuestionerStorage, validate_request_data, validate_route_param, check_is_empty, parse_request, endpoint_error_response\n\ndb = QuestionerStorage()\n\nrsvp_view_blueprint = Blueprint('rsvp_bp', '__name__')\n\n@rsvp_view_blueprint.route('/meetups//rsvps', methods=['POST'])\ndef create_rsvp(meetup_id):\n response = {}\n data = {}\n\n # check meetup_id in route can be converted to int\n validated_meetup_id = validate_route_param(meetup_id)\n if type(validated_meetup_id) != int:\n return jsonify(validated_meetup_id), validated_meetup_id['status']\n \n # check if meetup_id is for existing meetup \n if is_meetup_id_invalid(validated_meetup_id):\n response = {\n \"status\" : 404,\n \"error\": \"Meetup with id {} not found\".format(validated_meetup_id)\n }\n return make_response(jsonify(response), response['status'])\n \n # Get request data\n data = parse_request(request)\n if type(data) == dict and 'error' in data:\n return make_response(jsonify(data), data['status'])\n\n # perform standard validation checks\n res_valid_data = rsvp_validate_request_data(data, validated_meetup_id)\n\n # process data if valid, else, return validation findings\n if data == res_valid_data:\n # send to storage\n response = save(res_valid_data)\n return make_response(jsonify(response), response['status'])\n else:\n # return error from validation findings\n response = endpoint_error_response(data, res_valid_data)\n return make_response(jsonify(response), response['status'])\n\ndef save(rsvp_record):\n \"\"\"Sends the rsvp to be recorded to storage.\"\"\"\n # convert the meetup and user attributes to int\n if type(rsvp_record['meetup']) != int:\n rsvp_record['meetup'] = int(rsvp_record['meetup'])\n elif type(rsvp_record['user']) != int:\n rsvp_record['user'] = int(rsvp_record['user'])\n\n # send to storage\n db_response = db.save_item('rsvps', rsvp_record, 'add_new')\n\n if all(item in db_response.items() for item in rsvp_record.items()):\n # get the meetup record\n meetup = db.get_record(rsvp_record['meetup'], db.meetup_list)\n \n # update the rsvp record being returned with required return attributes\n status = rsvp_record.pop('response')\n rsvp_record.update({\n 'status': status,\n 'topic': meetup['topic']\n })\n\n return {\n \"status\": 201,\n \"data\": [rsvp_record]\n }\n else:\n return {\n \"status\": 503,\n \"error\": 'An error occurred while saving the record.'\n }\n\ndef is_meetup_id_invalid(meetup_id):\n \"\"\"Checks whether the supplied meetup id exists\\n Returns boolean\"\"\"\n exists = False\n exists = db.check_id_unique(int(meetup_id), db.meetup_list)\n return exists\n\ndef rsvp_validate_request_data(req_data, meetup_id):\n \"\"\"Validates the rsvp data received\"\"\"\n # data = {\n # \"meetup\": 1, required\n # \"user\": 2, required\n # \"response\": \"yes | no | maybe\", required\n # }\n # \n # parse the recevied data to check for empty or none\n received_data = check_is_empty(req_data)\n # exit if indeed data is empty else check that response value is allowed\n if 'error' in received_data:\n return received_data\n # Confirm that the meetup supplied in route matches the id in request data\n elif 'meetup' in received_data:\n # check if meetup id can be parsed as an int\n try:\n parsed_meetup_id = int(received_data['meetup'])\n except:\n response = {\n \"status\": 400,\n \"error\": \"Invalid meetup id: {}\".format(received_data['meetup'])\n }\n return response\n else:\n # check if parsed meetup id matches the one in route\n if parsed_meetup_id != meetup_id:\n response = {\n 'status': 400,\n 'error': 'Meetup ID in request route ({}) does not match meetup id in request data ({}). i.e. {} != {} '.format(meetup_id, received_data['meetup'], meetup_id, received_data['meetup'])\n }\n return response\n # Confirm that the supplied value for response is what is expected\n if 'response' in received_data and received_data['response'] not in ['yes', 'no', 'maybe']:\n response = {\n 'status': 400,\n 'error': 'Invalid response. Must be one of: yes | no | maybe'\n }\n return response\n\n # all is ok. Perform standard validation checks\n req_fields = ['meetup', 'user', 'response']\n other_fields = []\n\n dict_req_fields = {}\n dict_other_fields = {}\n\n sanitized_data = []\n\n # get the required fields' data and put in own dictionary\n for field in req_fields:\n if field in req_data:\n dict_req_fields.update({field: req_data[field]})\n # append required fields dictionary to sanitized_data list\n sanitized_data.append(dict_req_fields)\n\n # get the non required fields' data and put in own dictionary\n for field in other_fields:\n if field in req_data:\n dict_other_fields.update({field: req_data[field]})\n # append non required fields dictionary to sanitized_data list\n sanitized_data.append(dict_other_fields)\n\n# send sanitized_data list to actual validation function and return response\n return validate_request_data(sanitized_data, req_fields)\n","sub_path":"app/api/v1/views/rsvpviews.py","file_name":"rsvpviews.py","file_ext":"py","file_size_in_byte":5659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"418691846","text":"import numpy as np\n\nclass SudokuGrid:\n\n def __init__(self, inputFile):\n self.grid = np.zeros([9, 9], dtype=str)\n f = open(inputFile, 'r') \n lines = f.readlines()\n f.close()\n \n if len(lines) < 9:\n raise ValueError(\"The format of input data is not correct\")\n\n for i in range(9):\n columnValues = lines[i].rstrip().split(' ')\n if len(columnValues) != 9:\n raise ValueError(\"The format of input data is not correct\")\n self.grid[i] = columnValues\n \n\n def print(self):\n print('\\n'.join(' '.join(str(cell) for cell in row) for row in self.grid))\n\n def setValue(self, value, row, col):\n self.grid[row-1][col-1] = value\n \n def valueAt(self, row, col):\n return self.grid[row-1, col-1]\n\n def convert(self, outputFormat='list'):\n if outputFormat == 'list':\n return list(self.grid.flatten())\n\n\n\nif __name__ == '__main__':\n sudokuGrid = SudokuGrid('/Users/apple/Documents/git-repos/sudoku/sudoku-as-csp/input-data/18/4.sd')\n\n #Test print() \n sudokuGrid.print()\n\n #Test valueAt() \n print(sudokuGrid.valueAt(3, 4))\n\n #Test setValue() \n sudokuGrid.setValue(99, 3, 4)\n print(sudokuGrid.valueAt(3, 4))\n\n #Test convert()\n print(sudokuGrid.convert()) \n","sub_path":"sudoku_grid.py","file_name":"sudoku_grid.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"646332800","text":"from collections import defaultdict\nimport numpy as np\n\n\nclass PageRank():\n\n def __init__(self, file_name):\n self.links = defaultdict(list)\n with open(file_name, 'r') as file:\n for line in file:\n pages = line.rstrip('\\n').split(' ')\n page, in_links = pages[0], pages[1:]\n for link in in_links:\n self.links[page].append(link)\n self.pages = self.links.keys()\n self.norm = []\n\n def procedure(self, lamb=0.15):\n\n I = {}\n R = {}\n\n\n count = 0\n old_l2_norm = 0\n x = dict()\n\n for page in self.pages:\n x[page] = 0\n\n for page in self.pages:\n I[page] = 1/len(self.pages)\n\n\n while count < 4:\n for page in self.pages:\n R[page] = lamb/len(self.pages)\n\n for page in self.pages:\n Q = set()\n for same_page in self.pages:\n if page is not same_page:\n for q in self.links[same_page]:\n if q is page and same_page in self.pages:\n Q.add(same_page)\n\n if len(Q) > 0:\n for q in Q:\n R[q] += ((1 - lamb) * I[page]) / len(Q)\n else:\n for p in self.pages:\n R[p] += ((1 - lamb) * I[page]) / len(self.pages)\n\n x[page] = (R[page] - I[page])\n\n for j in self.pages:\n I[j] = R[j]\n\n vector = []\n\n for key, val in x.items():\n vector.append(val)\n\n current_l2_norm = np.linalg.norm(vector, 2)\n\n sum1 = 0\n for k, v in R.items():\n sum1 = sum1 + v\n\n self.norm.append(\"norm \" + str(current_l2_norm) + \" sum \" + str(sum1) + '\\n')\n\n change_l2_norm = current_l2_norm - old_l2_norm\n\n if change_l2_norm < 0.0005:\n count = count + 1\n else:\n count = 0\n old_l2_norm = current_l2_norm\n return R\n\n def write_norm(self, file_name):\n with open(file_name, 'w') as file:\n for item in self.norm:\n file.write(item)\n file.close()\n\n\npr0 = PageRank(\"ungrade.txt\")\nprint(\"\\n\" + \"Ungraded results\")\nlist_sort0 = pr0.procedure()\n#list_sort0 = pr0.procedure(lamb=0.25)\n#list_sort0 = pr0.procedure(lamb=0.35)\n#list_sort0 = pr0.procedure(lamb=0.5)\nprint(sorted(list_sort0.items(), reverse=True, key=lambda x: x[1]))\n\n\npr = PageRank(\"g1.txt\")\nprint(\"\\n\" + \"G1 results\")\nlist_sort = pr.procedure()\n#list_sort = pr.procedure(lamb=0.25)\n#list_sort = pr.procedure(lamb=0.35)\n#list_sort = pr.procedure(lamb=0.5)\nprint(sorted(list_sort.items(), reverse=True, key=lambda x: x[1]))\n\nprint(\"\\n\" + \"G2 results\")\npr1 = PageRank(\"g2.txt\")\n\nlist_sort1 = pr1.procedure()\n#list_sort1 = pr1.procedure(lamb=0.25)\n#list_sort1 = pr1.procedure(lamb=0.35)\n#list_sort1 = pr1.procedure(lamb=0.5)\nprint(sorted(list_sort1.items(), reverse=True, key=lambda x: x[1]))\n\n\n\n\n","sub_path":"Assignment_2/Task_3.py","file_name":"Task_3.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"628621562","text":"from matplotlib import *\nimport pylab\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef plot3d(filename):\n fig = pylab.figure()\n ax = Axes3D(fig)\n\n content = []\n f = open('../Pareto/' + filename, 'r')\n for line in f:\n content.append(line.split())\n\n for i in range(0, len(content)):\n content[i] = [float(x) for x in content[i]]\n\n for i in range(0, len(content)):\n content[i] = [int(content[i][0])] + content[i][1:]\n\n x = []\n y = []\n z = []\n labels = []\n\n for i in range(0, len(content)):\n x.append(content[i][1])\n y.append(content[i][2])\n z.append(content[i][3])\n labels.append(str(content[i][0]))\n\n for i in range(len(content)):\n ax.scatter(x[i],y[i],z[i],color='b') \n\n pylab.xlabel('Overall Deviation')\n pylab.ylabel('Edge value')\n ax.set_zlabel('Connectivity')\n pylab.locator_params(nticks=2)\n\n pylab.savefig(\"../Images/plots/all\")\n","sub_path":"Project 3/Python/plotter3d.py","file_name":"plotter3d.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"178828231","text":"import urllib.request,time,platform,os,sys,io\n#sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf8') #改变标准输出的默认编码\ndef clear():\n print(u'内容较多,显示3秒后翻页')\n time.sleep(3)\n OS=platform.system()\n if OS==u'windows':\n os.system('cls')\n else:\n os.system('clear')\n\ndef linkBaidu():\n url='http://www.baidu.com'\n response=''\n try:\n response=urllib.request.urlopen(url,timeout=3)\n except urllib.request.URLError:\n print(u'网络地址错误')\n exit()\n with open('./baidu.txt','w',encoding='utf-8') as fp:#当文本文件里面有中文时,需要进行编码转换\n fp.write(response.read().decode('utf-8'))\n print(u'获取url信息,response.geturl() :\\n%s'%response.geturl())\n print(u'获取返回代码,response.getcode() :\\n%s' % response.getcode())\n print(u'获取返回信息,response.info() :\\n%s' % response.info())\n print(u'获取的网页信息已存入当前目录的baidu.txt中,请自行查看')\n\nif __name__=='__main__':\n linkBaidu()","sub_path":"爬虫实战/testUrllib2.py","file_name":"testUrllib2.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"253248060","text":"# -*- coding:utf-8 -*-\n# 作者 :JunFengG\n# 创建时间 :2018/10/5 0005 9:54 \n# 文件 :RandomForest\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import cross_validation\n\n#读取训练数据与测试数据\ntrain=pd.read_csv('train.csv',dtype={'Age':np.float64})\ntest=pd.read_csv('test.csv',dtype={'Age':np.float64})\n\n#预处理\ndef harmonize_data(titanic):\n # 均值填充\n titanic['Age'] = titanic['Age'].fillna(titanic['Age'].median())\n #将sex属性转换为数值\n titanic.loc[titanic['Sex']=='male','Sex']=0\n titanic.loc[titanic['Sex']=='female','Sex']=1\n #填充\n titanic['Embarked'] = titanic['Embarked'].fillna('S')\n #将属性转化为数值\n titanic.loc[titanic['Embarked']=='S','Embarked']=0\n titanic.loc[titanic['Embarked']=='C','Embarked']=1\n titanic.loc[titanic['Embarked']=='Q','Embarked']=2\n # 均值填充\n titanic['Fare'] = titanic['Fare'].fillna(titanic['Fare'].median())\n return titanic\n\n\ntrain_data=harmonize_data(train)\ntest_data=harmonize_data(test)\n\n#确定模型的变量特征\npredictors = ['Pclass','Sex','Age','SibSp','Parch','Fare','Embarked']\n\n\n'''\n随机森林的参数\nn_estimators 子模型的数量\ncriterion 判断节点是否继续分类的计算方法\nmax_features 节点分裂时,参与判断的最大特征数\nmax_depth 树的最大深度\nmin_samples_split 分裂所需的最小样本数\nmin_samples_leaf 叶节点最小样本数\nmax_leaf_nodes 叶节点最大样本数\n'''\n\nalg=RandomForestClassifier(\n random_state=1,\n n_estimators=150,\n min_samples_split=4,\n min_samples_leaf=2\n\n)\n\n#进行交叉验证 3折\nscores = cross_validation.cross_val_score(\n alg,\n train_data[predictors],\n train_data['Survived'],\n cv=3\n)\nprint('\\nscores.mean()=',scores.mean())\nprint('\\nscores.std()=',scores.std())\n\n\n#预测结果输出\ndef create_submission(alg,train,test,predictors,filename):\n alg.fit(train[predictors],train['Survived'])\n predictions = alg.predict(test[predictors])\n submission = pd.DataFrame({\n 'PassengerId':test['PassengerId'],\n 'Survived':predictions\n })\n print(\"\\nsubmission\\n\", submission)\n submission.to_csv(filename,index=False)\n\ncreate_submission(alg,train_data,test_data,predictors,'./run-01.csv')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Python3DataAnalysiAndMachineLearning/chapter12/RandomForest.py","file_name":"RandomForest.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"359531614","text":"#############################################################################\n##############################################################################\n#Interpolate CTD data vertically onto a regular grid\n#############################################################################\n#############################################################################\n\nfrom aux_funcs import *\n\ndef pivspline_extrap(afield,datevec,prsvec,pdall,moornum):\n panpiv=pd.DataFrame(index=datevec,\n columns=prsvec)\n for ii,dd in enumerate(datevec):\n ind=pdall['date bin']==dd\n x=pdall['pressure'][ind]\n y=pdall[afield][ind]\n Y=array([Y for X, Y in sorted(zip(x, y))])\n if sum(isnan(Y))!=len(Y):\n #first, interpolate between measuements we do have so that I can then select\n #consistent pressures to interpolate from\n prsnonan=sort(x)[~isnan(Y)]\n f1=interpolate.interp1d(prsnonan,Y[~isnan(Y)],kind='linear')\n interprs=prsvec[(prsvec<=max(prsnonan))&(prsvec>=min(prsnonan))]\n pan1=f1(interprs)\n #pressures between which to calculate extrapolation gradient\n #trying top 20db that exist (this can be v. variable!)\n #CF5 is worst(top inst can go down to 250m)\n z1=int((min(prsnonan)+3)/2)*2\n z2=z1+20\n # print(z1,min(prsnonan))\n s1=pan1[interprs==z1]\n s2=pan1[interprs==z2]\n s0=s1-(s2-s1)*z1/(z2-z1)\n f2=interpolate.interp1d(hstack((0,interprs)),hstack((s0,pan1)),kind='linear',fill_value='extrapolate')\n pani=f2(prsvec)\n panpiv.iloc[ii]=pd.to_numeric(pani)\n return panpiv.T\n\ndef Interp_CTD(moornum):\n savename='1810JHIL'\n\n\n #############################################################################\n #### Start by loading all temp and sal data\n #############################################################################\n\n moorname='CF'+str(moornum)\n\n def flattendic(adic):\n flatdic=[]\n for key in adic:\n flatdic=hstack((flatdic,adic[key]))\n return flatdic\n\n # CF8 needs to be loaded from original files, did not do any corrections to it.\n if moorname=='CF8':\n ctdlist=hstack((glob.glob(datadir+'NOC_M1/nocm1_02_2015/microcat/*.microcat'),\n glob.glob(datadir+'NOC_M1/nocm1_01_2014/microcat/*.microcat')))\n time=array([])\n tmp=array([])\n sal=array([])\n prs=array([])\n meanprs=array([])\n datebin=array([])\n aveconst=4*24\n for dd in ctdlist:\n dat=pd.read_csv(dd,header=11,sep=' ')\n prs_hrly=hrly_ave(dat.iloc[:,6],aveconst/4)\n d=unique([datetime.datetime(int(dat.iloc[ii,0]),\n int(dat.iloc[ii,1]),\n int(dat.iloc[ii,2])) for ii in range(len(dat))])[:len(prs_hrly)]\n\n\n time_hrly=array([datetime.datetime.toordinal(adate) for adate in d])\n\n tmpins_hrly=hrly_ave(dat.iloc[:,4],aveconst/4)\n c=hrly_ave(dat.iloc[:,5],aveconst/4)\n sal_hrly=con2sal(c,tmpins_hrly,prs_hrly)\n meanprs_hrly=nanmean(prs_hrly)*ones(len(prs_hrly))\n SA_hrly=gsw.SA_from_SP(sal_hrly,prs_hrly,CFlon[moornum-1],CFlat[moornum-1])\n tmp_hrly=gsw.pt0_from_t(SA_hrly,tmpins_hrly,prs_hrly)\n\n\n datebin=hstack((datebin,d))\n time=hstack((time,time_hrly))\n prs=hstack((prs,prs_hrly))\n meanprs=hstack((meanprs,meanprs_hrly))\n tmp=hstack((tmp,tmp_hrly))\n sal=hstack((sal,sal_hrly))\n\n elif moorname=='CF1':#CF1- load a reconstructed version (will eventually be choosing from a couple different options)\n [cf1date,cf1time,cf1prs,cf1mnprs,cf1sal,cf1tmp]=pd.read_pickle(open('../pickles/CF1recon/CF1_recon_JH1810.pickle', 'rb'))\n tmp=flattendic(cf1tmp)\n sal=flattendic(cf1sal)\n prs=flattendic(cf1prs)\n datebin=flattendic(cf1date)\n meanprs=flattendic(cf1mnprs)\n else:\n [date_all,month_all,prs_all,sal_all,tmp_all]=pickle.load(open('../pickles/TSdailydic/TS_daily_dic_wJHIL.pickle','rb'))\n tmp=flattendic(tmp_all[int(moornum)])\n sal=flattendic(sal_all[int(moornum)])\n prs=flattendic(prs_all[int(moornum)])\n datebin=flattendic(date_all[int(moornum)])\n meanprs_dic={}\n for key in prs_all[int(moornum)]:\n meanprs_dic[key]=mean(prs_all[int(moornum)][key])*ones(len(prs_all[int(moornum)][key]))\n meanprs=flattendic(meanprs_dic)\n\n #############################################################################\n #### Create panda of all data, pivot and interpolate\n #############################################################################\n\n pdall=pd.DataFrame({'nominal pressure':meanprs,'temperature':tmp,'salinity':sal,\n 'pressure':prs,'date bin':datebin})\n\n\n dmin=min(pdall['date bin'])\n dmax=max(pdall['date bin'])\n dlen=int(divmod((dmax-dmin).total_seconds(),60*60*24)[0])+1\n datevec = array([dmin + datetime.timedelta(days=float(x)) for x in range(0, dlen)])\n\n prsvec=arange(0,int(max(prs))+1,2)\n\n\n salinterp=pivspline_extrap('salinity',datevec,prsvec,pdall,moornum)\n tmpinterp=pivspline_extrap('temperature',datevec,prsvec,pdall,moornum)\n\n figure()\n plot(salinterp,salinterp.index);\n plot(pdall['salinity'],pdall['pressure'],'k.')\n gca().invert_yaxis()\n ylabel('pressure')\n title(moorname+': Salinity: measured and interpolated')\n savefig('../figures/interpolation/TS/'+moorname+'_sal_measinterp_'+savename+'.png',bbox_inches='tight')\n\n\n figure()\n plot(tmpinterp,tmpinterp.index);\n plot(pdall['temperature'],pdall['pressure'],'k.')\n gca().invert_yaxis()\n ylabel('pressure')\n title(moorname+': Temperature (mcat only): measured and interpolated')\n savefig('../figures/interpolation/TS/'+moorname+'_tmp_mcat_measinterp_'+savename+'.png',bbox_inches='tight')\n\n plotcontour(salinterp,cm.YlGnBu_r,30,35,moorname)\n savefig('../figures/interpolation/TS/'+moorname+'_sal_'+savename+'.png',bbox_inches='tight')\n\n\n plotcontour(tmpinterp,cm.RdBu_r,-2,8,moorname)\n savefig('../figures/interpolation/TS/'+moorname+'tmp_mcat_'+savename+'.png',bbox_inches='tight')\n\n\n salinterp = salinterp.apply(pd.to_numeric, errors='coerce')\n tmpinterp = tmpinterp.apply(pd.to_numeric, errors='coerce')\n\n pden=pd.DataFrame(index=salinterp.index,\n columns=salinterp.columns)\n\n for jj in range(shape(pden)[1]):\n nanind=(~isnan(salinterp.values[:,jj]))\n sal_nonan=salinterp.values[:,jj][nanind]\n tmp_nonan=tmpinterp.values[:,jj][nanind]\n # tmp_wtid_nonan=tmpinterp_wtid.values[:,jj][nanind]\n SA=gsw.SA_from_SP(sal_nonan,salinterp.index[nanind],CFlon[moornum-1],CFlat[moornum-1])\n pden.values[nanind,jj]=pd.to_numeric(gsw.sigma0(SA,gsw.CT_from_pt(SA,tmp_nonan)))\n\n\n figure()\n plot(pden,tmpinterp.index);\n gca().invert_yaxis()\n ylabel('pressure')\n title(moorname+': Potential density')\n savefig('../figures/interpolation/TS/'+moorname+'_pden_mcat_measinterp_'+savename+'.png',bbox_inches='tight')\n ########################################################################################\n #################################### Save fields ####################################\n ########################################################################################\n\n pickle.dump([salinterp,tmpinterp,pden],\n open('../pickles/TSinterp/'+moorname+'_saltmpinterp_'+savename+'_notid.pickle','wb'),protocol=2)\n\n\nfor ii in range(1,9):\n Interp_CTD(ii)\n","sub_path":"DataProcessing_2016recovery/Interp_CTD_extrap.py","file_name":"Interp_CTD_extrap.py","file_ext":"py","file_size_in_byte":7753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"335167054","text":"import sys, paramiko, json, vm, besthypervisor\n\n\ndef create_controller(tenant_name):\n\n with open('/home/vpmaddur/Project/'+tenant_name+'/'+tenant_name+'.json') as json_input:\n tenant_input=json.load(json_input)\n\n network=tenant_input[0][\"Networks\"]\n print(network)\n\n ######################DHCP File #######################################\n for net in network:\n if net[\"Name\"]!=\"mgmt\":\n dhcp_subnet=net[\"Subnet\"]\n print(\"PK1\")\n dhcp_netmask=net[\"netmask\"]\n app_subnet=dhcp_subnet.split('.')\n dhcp_router=\".\".join(app_subnet[0:3])+\".1\"\n dhcp_low_range=\".\".join(app_subnet[0:3])+\".2\"\n dhcp_high_range=\".\".join(app_subnet[0:3])+\".254\"\n dhcp=open('/home/vpmaddur/Project/nokia/dhcpd.conf', 'a')\n dhcp.write('subnet '+dhcp_subnet+' netmask '+dhcp_netmask+' {\\n')\n dhcp.write(' option routers '+dhcp_router+';\\n')\n dhcp.write(' option subnet-mask '+dhcp_netmask+';\\n')\n dhcp.write(' option domain-name-servers '+dhcp_router+';\\n')\n dhcp.write(' range '+dhcp_low_range+' '+dhcp_high_range+';\\n')\n dhcp.write('}\\n\\n')\n dhcp.close()\n print(\"PK2\")\n\n\n for net in network:\n if net[\"Name\"]==\"mgmt\":\n count=0\n bst_hyp=besthypervisor.best_hypervisor(tenant_name)\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(bst_hyp, port=22, username='root', key_filename='/root/.ssh/id_rsa' )\n vm.vm_create(tenant_name, net, count, network,bst_hyp)\n ssh.exec_command('cp /home/vpmaddur/Project/admin/'+tenant_name+'.img /home/vpmaddur/Project/'+tenant_name+'/etc/'+tenant_name+'-'+net[\"Name\"]+'-'+str(count)+'.img')\n vm.vm_start(tenant_name,net,bst_hyp,count,ssh)\n ssh.close()\n","sub_path":"admin/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"198499287","text":"import re\r\n\r\ndef readfile(filename):\r\n with open(filename, 'r', encoding='utf-8') as f:\r\n for line in f:\r\n r = re.search('id=\"P571\".*>([0-9]+)', line) \r\n if r:\r\n date = r.group(1)\r\n return date\r\n \r\ndef main(otherfile, info):\r\n with open(otherfile, 'w', encoding='utf-8') as t:\r\n table = 'Год основания:' + '\\t' + info\r\n t.write(table)\r\n \r\n \r\nif __name__ == \"__main__\":\r\n main('text.txt', readfile('hse.html'))\r\n\r\n","sub_path":"hw10/hse.py","file_name":"hse.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"560337546","text":"# -*- coding:utf-8 -*-\n__author__ = 'TimLee'\n__date__ = '5/13/17 7:27 AM'\n\nfrom django.conf.urls import url\nfrom .views import OrgView, AddUserAskView, OrgHomeView, OrgCourseView, OrgDescView, OrgTeacherView, AddFavView\nfrom .views import TeacherListView, TeacherDetailView\n\nurlpatterns = [\n # 课程机构列表页\n url(r'^list/$', OrgView.as_view(), name=\"org_list\"),\n\n # 用户咨询课程 (Ajax)\n url(r'^add_ask/$', AddUserAskView.as_view(), name=\"add_ask\"),\n\n # 机构详情首页\n url(r'^home/(?P.*)/$', OrgHomeView.as_view(), name=\"org_home\"),\n\n # 机构课程列表首页\n url(r'^course/(?P.*)/$', OrgCourseView.as_view(), name=\"org_course\"),\n\n # 机构介绍页\n url(r'^desc/(?P.*)/$', OrgDescView.as_view(), name=\"org_desc\"),\n\n # 机构讲师页\n url(r'^org_teacher/(?P.*)/$', OrgTeacherView.as_view(), name=\"org_teacher\"),\n\n # 机构收藏功能(Ajax)\n url(r'^add_fav/$', AddFavView.as_view(), name=\"add_fav\"),\n\n # 讲师列表页\n url(r'^teacher/list/$', TeacherListView.as_view(), name='teacher_list'),\n\n # 讲师详情页面\n url(r'^teacher/detail/(?P.*)/$', TeacherDetailView.as_view() , name='teacher_detail'),\n\n\n\n\n]\n","sub_path":"apps/organization/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"455003246","text":"import re\nimport urllib\nimport pandas as pd\nfrom urllib.parse import urljoin\nfrom PIL import ImageFile\n\n\nclass Xpath:\n\n @staticmethod\n def pop_last_no(x, which_match=1):\n p = x.split(\"[\")\n if which_match >= len(p):\n which_match = len(p) - 1\n p[-which_match] = re.sub(\"\\\\d+\", \"*\", p[-which_match], count=1)\n p = \"[\".join(p)\n return p\n\n @staticmethod\n def get_group(xpath_series):\n count = 0\n whichmatch = 0\n for i in range(max(xpath_series.str.count(\"/\"))):\n p = xpath_series.apply(lambda x: Xpath.pop_last_no(x, which_match=(i + 1))).duplicated().sum()\n if count < p:\n count = p\n whichmatch = i + 1\n if whichmatch == 0:\n return None\n s = xpath_series.apply(lambda x: Xpath.pop_last_no(x, whichmatch))\n s = s.loc[s.duplicated()]\n s = s.to_frame(name=0)\n s = s.groupby([0], as_index=False)\n s = s.size()\n s = s.sort_values(ascending=False)\n s = '|'.join(s.loc[s >= 4].index.tolist())\n return s\n\n\nclass Image:\n\n @staticmethod\n def get_image_content_length(image_src):\n # get file size *and* image size (None if not known)\n file = urllib.request.urlopen(image_src)\n p = ImageFile.Parser()\n while True:\n data = file.read(1024)\n if not data:\n break\n p.feed(data)\n if p.image:\n return p.image.width * p.image.height\n break\n file.close()\n return None\n\n @staticmethod\n def get_image_size(attributes, base_url):\n width = ''\n height = ''\n if 'width' in attributes.keys() and 'height' in attributes.keys():\n width = re.findall(r\"\\d+\", attributes[\"width\"])\n if width:\n width = width[0]\n height = re.findall(r\"\\d+\", attributes[\"height\"])\n if height:\n height = height[0]\n\n if 'style' in attributes.keys():\n style = attributes.get('style')\n if bool(re.search(r'^width', style)) and bool(re.search(r'^height', style)):\n style = style.split(\"; \")\n width = [re.findall(r\"\\d+\", x)[0] for x in style if re.search(r'^width', x)][0]\n height = [re.findall(r\"\\d+\", x)[0] for x in style if re.search(r'^height', x)][0]\n\n if width and height:\n width = int(width)\n height = int(height)\n return width * height\n else:\n keys = list(attributes.keys())\n key = pd.Series(keys).loc[pd.Series(keys).str.contains(\"src\")].iloc[0]\n src = attributes[key]\n src = urljoin(base_url, src)\n return Image.get_image_content_length(src)\n","sub_path":"article_curator/utilityfunc.py","file_name":"utilityfunc.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"163269163","text":"class Solution:\n def gridGame(self, grid: List[List[int]]) -> int:\n N = len(grid[0])\n prefix = []\n suffix = []\n sumPre, sumPost = 0, 0\n for i in range(N):\n sumPre += grid[0][i]\n prefix.append(sumPre)\n sumPost += grid[1][N - 1 - i]\n suffix.append(sumPost)\n suffix = suffix[::-1]\n \n row0 = sum(grid[0])\n row1 = sum(grid[1])\n \n res = 0\n rob2 = []\n for i in range(N):\n rob2.append(max(row0 - prefix[i], row1 - suffix[i]))\n return min(rob2)","sub_path":"2017_GridGame.py","file_name":"2017_GridGame.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"194190346","text":"from tkinter import *\nfrom quiz_brain import QuizBrain\n\nTHEME_COLOR = \"#375362\"\n\n\nclass QuizUI:\n\n def __init__(self, quiz_brain: QuizBrain):\n self.quiz_brain = quiz_brain\n self.question = self.quiz_brain.next_question()\n self.window = Tk()\n self.window.title(\"Quizzler\")\n\n self.window.config(bg=THEME_COLOR, padx=20, pady=20)\n\n self.score_label = Label(text=f\"Score : {self.quiz_brain.score}\", bg=THEME_COLOR, fg=\"white\")\n self.score_label.grid(row=0, column=1, padx=20, pady=20)\n\n self.canvas = Canvas(width=300, height=250)\n self.question_text = self.canvas.create_text(150, 125, text=f\"{self.question}\", font=(\"Arial\", 13, \"italic\"),\n width=280)\n self.canvas.grid(row=1, column=0, columnspan=2, padx=20)\n\n true_image = PhotoImage(file='images/true.png')\n self.true_button = Button(image=true_image, command=self.correct_button_handle)\n self.true_button.grid(row=2, column=0, padx=20, pady=20)\n false_image = PhotoImage(file='images/false.png')\n self.false_button = Button(image=false_image, command=self.wrong_button_handle)\n self.false_button.grid(row=2, column=1, padx=20, pady=20)\n\n self.window.mainloop()\n\n def get_next_question(self):\n self.canvas.config(bg='white')\n if self.quiz_brain.still_has_questions():\n self.canvas.itemconfig(self.question_text, text=self.quiz_brain.next_question())\n self.score_label.config(text=f\"Score : {self.quiz_brain.get_score()}\")\n else:\n self.canvas.itemconfig(self.question_text, text=\"You have reached the end of the quiz\")\n self.true_button.config(state=\"disabled\")\n self.false_button.config(state=\"disabled\")\n\n def correct_button_handle(self):\n self.give_feedback(self.quiz_brain.check_answer('True'))\n\n def wrong_button_handle(self):\n self.give_feedback(self.quiz_brain.check_answer('False'))\n\n def give_feedback(self, result):\n if result:\n self.canvas.config(bg='green')\n else:\n self.canvas.config(bg='red')\n self.window.after(1000, self.get_next_question)\n\n def flash(self):\n self.canvas.config(bg=\"white\")\n","sub_path":"34/quizzler-app-start/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"193101667","text":"import pandas as pd\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.utils.data as Data\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nimport re\r\nfrom torch.nn.utils.rnn import pad_sequence\r\nfrom tqdm import tqdm\r\nimport numpy as np\r\nfrom sklearn.metrics import accuracy_score, f1_score\r\nfrom sklearn.model_selection import train_test_split\r\nimport os\r\nimport pickle\r\nfrom model import *\r\nfrom utils import *\r\nimport csv\r\nimport random\r\nimport nltk\r\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" # see issue #152\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"]= \"1\"\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n\r\nimdb = pd.read_csv('.../IMDB Dataset.csv')\r\nimdb.loc[imdb['sentiment']=='positive', 'sentiment'] = 0\r\nimdb.loc[imdb['sentiment']=='negative', 'sentiment'] = 1\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(imdb['review'], imdb['sentiment'], test_size=0.2)\r\n\r\ntrain_data = preprocess(X_train, max_length=384, device=device)\r\ntest_data = preprocess(X_test, max_length=384, device=device)\r\ny_train = torch.tensor(y_train.values.astype(int), device=device)\r\ny_test = torch.tensor(y_test.values.astype(int), device=device)\r\n\r\n\r\ntrain_dataset = Data.TensorDataset(train_data, y_train)\r\ntest_dataset = Data.TensorDataset(test_data, y_test)\r\n\r\ntrain_dataloader = Data.DataLoader(train_dataset, batch_size=32)\r\ntest_dataloader = Data.DataLoader(test_dataset, batch_size=32)\r\n\r\n\r\nnasmi = NASMI(config, 2, device, training=True).cuda()\r\noptimizer = optim.Adam(nasmi.parameters(), lr=0.001, eps=1e-8)\r\nCE = nn.CrossEntropyLoss()\r\nscheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, 5)\r\n\r\n\r\nsave_weight_path = '.../alpha_weight/cola'\r\nsave_model_path = '.../model_weight/cola'\r\nif not os.path.isdir(save_weight_path):\r\n os.mkdir(save_weight_path)\r\nif not os.path.isdir(save_model_path):\r\n os.mkdir(save_model_path)\r\n \r\nepochs = 10\r\nstopping_round = 0\r\ntest_accuracy_list = []\r\nfor epoch in tqdm(range(epochs)):\r\n train_predict_result = []\r\n CE_mean_loss = []\r\n discrete_mean_loss = []\r\n first_mean_loss = []\r\n second_mean_loss = []\r\n nasmi.train()\r\n for X_train_batch, y_train_batch in train_dataloader:\r\n output, total_discrete_z, discrete_loss = nasmi(X_train_batch)\r\n softmax = nn.Softmax(dim=1)\r\n train_pred_prob = softmax(output)\r\n train_pred_prob = train_pred_prob.cpu().detach().numpy()[:,1]\r\n for prob in train_pred_prob:\r\n train_predict_result.append(prob)\r\n\r\n CE_loss = CE(output,torch.tensor(y_train_batch, dtype=torch.long).cuda())\r\n mean_layer_loss, first_loss, second_loss = Mutual_Information(total_discrete_z, alpha=1)\r\n total_loss = 0.5*(CE_loss) + 0.5(discrete_loss + mean_layer_loss)\r\n \r\n CE_mean_loss.append(CE_loss.cpu().detach().numpy())\r\n discrete_mean_loss.append(discrete_loss.cpu().detach().numpy())\r\n first_mean_loss.append(first_loss.cpu().detach().numpy())\r\n second_mean_loss.append(second_loss.cpu().detach().numpy())\r\n \r\n total_loss.backward()\r\n nn.utils.clip_grad_norm_(nasmi.parameters(), 1)\r\n optimizer.step()\r\n optimizer.zero_grad()\r\n \r\n test_predict_result = []\r\n with torch.no_grad():\r\n nasmi.eval()\r\n for X_test_batch, y_test_batch in test_dataloader:\r\n output, _, _ = nasmi(X_test_batch)\r\n softmax = nn.Softmax(dim=1)\r\n test_pred_prob = softmax(output)\r\n test_pred_prob = test_pred_prob.cpu().detach().numpy()[:,1]\r\n for prob in test_pred_prob:\r\n test_predict_result.append(prob)\r\n \r\n structure_path = os.path.join(save_weight_path, 'NAS_structure{}.pkl'.format(epoch))\r\n\r\n CE_mean_loss = np.mean(CE_mean_loss)\r\n discrete_mean_loss = np.mean(discrete_mean_loss)\r\n first_mean_loss = np.mean(first_mean_loss)\r\n second_mean_loss = np.mean(second_mean_loss)\r\n \r\n train_predict_result = np.where(np.array(train_predict_result)>0.5,1,0)\r\n train_accuracy = accuracy_score(y_train.cpu(), train_predict_result)\r\n train_micro_f1 = f1_score(y_train.cpu(), train_predict_result, average='binary')\r\n train_macro_f1 = f1_score(y_train.cpu(), train_predict_result, average='binary')\r\n \r\n test_predict_result = np.where(np.array(test_predict_result)>0.5,1,0)\r\n test_accuracy = accuracy_score(y_test.cpu(), test_predict_result)\r\n test_micro_f1 = f1_score(y_test.cpu(), test_predict_result, average='binary')\r\n test_macro_f1 = f1_score(y_test.cpu(), test_predict_result, average='binary')\r\n test_accuracy_list.append(test_accuracy)\r\n \r\n model_weight_path = os.path.join(save_model_path, 'nasmi{}.pkl'.format(epoch))\r\n torch.save(nasmi.state_dict(), model_weight_path)\r\n \r\n print('Epochs:{},CE Loss:{:5f} Discrete Loss:{:5f} First Loss:{:5f} Second Loss:{:5f} train accuracy:{:5f} test accuracy:{:5f} train micro_f1:{:5f} train macro_f1:{:5f} test micro_f1:{:5f} test macro_f1:{:5f}'.format(\r\n epoch,CE_mean_loss,discrete_mean_loss,first_mean_loss, second_mean_loss, train_accuracy,test_accuracy, train_micro_f1, train_macro_f1, test_micro_f1, test_macro_f1))","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":5176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"568835408","text":"import subprocess\nimport os\nimport shutil\nimport glob\nfrom operator import itemgetter\n\"\"\"\nMap from file_path -> List of [error row column]\n\"\"\"\nmypy = dict()\npytype = dict()\npylint = dict()\n\ne_mypy = dict()\ne_pytype = dict()\ne_pylint = dict()\n\n\"\"\"\nmypy_pytype = dict()\npytype_mypy = dict()\nmypy_pylint = dict()\npylint_mypy = dict()\n\"\"\"\n\nmypy_read_path = \"/home/bew/Desktop/wp2/FilteredRepoNFiles/mypy/\"\npytype_read_path = \"/home/bew/Desktop/wp2/FilteredRepoNFiles/pytype/\" \npylint_read_path = \"/home/bew/Desktop/wp2/FilteredRepoNFiles/pylint/\"\n\nT = True\nF = False\n\ndo_mypy = T\ndo_pytype = T\ndo_pylint = T\n\n# SINGLE or DUPE\nisSingle = T\n\nL = 22\nR = 24\n\nmypy_noline = 0\n\nif do_mypy:\n\tfor ii in range(L,R):\n\t\tprint(ii)\n\t\tmypy_read_path_mod = mypy_read_path + str(ii)\n\t\twith open(mypy_read_path_mod) as fp:\n\t\t\tfor f in fp:\n\t\t\t\tt = f.strip()\n\t\t\t\tif \"error:\" in t:\n\t\t\t\t\t#print(t)\n\t\t\t\t\tif \"cannot perform relative import\" in t:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif \"Duplicate module named\" in t:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif \"Are you missing an __init__.py?\" in t:\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tleft = t.rfind(\"[\")\n\t\t\t\t\tright = t.rfind(\"]\")\n\t\t\t\t\terror_type = t[left+1:right]\n\t\t\t\t\tcut = t.find(\" error: \")\n\t\t\t\t\ttt = t[cut:]\n\t\t\t\t\tttt = t[:cut]\n\t\t\t\t\tx = ttt.split(':')\n\t\t\t\t\t#print(\"t = {}\".format(t))\n\t\t\t\t\t#print(\"ttt = {}\".format(ttt))\n\t\t\t\t\t#print(\"x = {}\".format(x))\n\t\t\t\t\tif x[1] == '':\n\t\t\t\t\t\tmypy_noline += 1\n\t\t\t\t\t\tcontinue\n\t\t\t\t\trow = int(x[1])\n\t\t\t\t\tif len(x) > 2:\n\t\t\t\t\t\tif x[2] == '' or True:\n\t\t\t\t\t\t\tcolumn = -1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcolumn = int(x[2])\n\t\t\t\t\telse:\n\t\t\t\t\t\tcolumn = -1\n\t\t\t\t\ty = [error_type, row, column]\n\n\t\t\t\t\tif False and error_type == \"return-value\":\n\t\t\t\t\t\tprint(x[0],y)\n\t\t\t\t\t#print(x[0])\n\t\t\t\t\t#print(y)\n\n\t\t\t\t\t# Might be able to make this more efficient\n\t\t\t\t\tif x[0] in mypy:\n\t\t\t\t\t\tz = mypy[x[0]]\n\t\t\t\t\t\t\n\t\t\t\t\t\tif y not in z:\n\t\t\t\t\t\t\tz.append(y)\n\t\t\t\t\t\t\tmypy[x[0]] = z\n\n\t\t\t\t\t\t\tif error_type in e_mypy:\n\t\t\t\t\t\t\t\tz = e_mypy[error_type]\n\t\t\t\t\t\t\t\te_mypy[error_type] = z + 1\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\te_mypy[error_type] = 1\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tz.append(y)\n\t\t\t\t\t\tmypy[x[0]] = z\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tz = [y]\n\t\t\t\t\t\tmypy[x[0]] = z\n\n\t\t\t\t\t\tif error_type in e_mypy:\n\t\t\t\t\t\t\tz = e_mypy[error_type]\n\t\t\t\t\t\t\te_mypy[error_type] = z + 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\te_mypy[error_type] = 1\n\n\n\tif 0:\n\t\tprint(\"----------------------------------------------------------------\")\n\t\tfor m in mypy:\n\t\t\tzz = mypy[m]\n\t\t\tprint(m)\n\t\t\tprint(zz)\n\t\t\t#for z in zz:\n\t\t\t#\tprint(z)\n\tif 0:\n\t\tfor m in sorted(e_mypy):\n\t\t\tprint(m,e_mypy[m])\n\nif do_pytype:\n\tfor ii in range(L,R):\n\t\tprint(ii)\n\t\tpytype_read_path_mod = pytype_read_path + str(ii)\n\t\twith open(pytype_read_path_mod) as fp:\n\t\t\tfor f in fp:\n\t\t\t\tt = f.strip()\n\t\t\t\tif \"[\" in t and \"]\" in t and \"line\" in t and \"File\" in t:\n\t\t\t\t\tleft = t.rfind(\"[\")\n\t\t\t\t\tright = t.rfind(\"]\")\n\t\t\t\t\terror_type = t[left+1:right]\n\t\t\t\t\tif \"_K, _V\" in error_type:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif \"_K, _V\" in error_type:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif \"str]\" in error_type:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif \"str, Any], tuple\" in error_type:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif \"Any], int\" in error_type:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif \"_KT, _VT\" in error_type:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif \"list\" == error_type or \"str\" == error_type:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif \"-\" not in error_type or \":\" in error_type:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t#print(t)\n\t\t\t\t\t#print(error_type)\n\t\t\t\t\tx = t.split(',')\n\t\t\t\t\tname = x[0].replace(\"File \",\"\")\n\t\t\t\t\tname = name.replace(\"\\\"\",\"\")\n\t\t\t\t\tname = name.replace(\"/home/bew/archive/typed_project/\",\"\")\n\t\t\t\t\trow = x[1].replace(\" line \",\"\")\n\t\t\t\t\t#print(\"> {}\".format(t))\n\t\t\t\t\t#print(\">> {}\".format(row))\n\t\t\t\t\tif len(x) <= 2 or \": \" in row:\n\t\t\t\t\t\trow = row.split(':')\n\t\t\t\t\t\trow = row[0]\n\t\t\t\t\tif \") [\" in row:\n\t\t\t\t\t\trow = row.split(')')\n\t\t\t\t\t\trow = row[0]\n\t\t\t\t\trow = int(row)\n\t\t\t\t\t#print(name)\n\t\t\t\t\t#print(row)\n\t\t\t\t\tcolumn = -2\n\t\t\t\t\ty = [error_type, row, column]\n\t\t\t\t\t#print(error_type, row, column)\n\t\t\t\t\t#bew = int(row)\n\n\t\t\t\t\tif name in pytype:\n\t\t\t\t\t\tz = pytype[name]\n\t\t\t\t\t\t\n\t\t\t\t\t\tif y not in z:\n\t\t\t\t\t\t\tz.append(y)\n\t\t\t\t\t\t\tpytype[name] = z\n\n\t\t\t\t\t\t\tif error_type in e_pytype:\n\t\t\t\t\t\t\t\tz = e_pytype[error_type]\n\t\t\t\t\t\t\t\te_pytype[error_type] = z + 1\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\te_pytype[error_type] = 1\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tz.append(y)\n\t\t\t\t\t\tpytype[name] = z\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tz = [y]\n\t\t\t\t\t\tpytype[name] = z\n\n\t\t\t\t\t\tif error_type in e_pytype:\n\t\t\t\t\t\t\tz = e_pytype[error_type]\n\t\t\t\t\t\t\te_pytype[error_type] = z + 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\te_pytype[error_type] = 1\n\tif 0:\n\t\tfor m in pytype:\n\t\t\tprint(m)\n\t\t\tprint(pytype[m])\n\nif do_pylint:\n\tprev = \"\"\n\tfor ii in range(L,R):\n\t\tprint(ii)\n\t\tpylint_read_path_mod = pylint_read_path + str(ii)\n\t\twith open(pylint_read_path_mod) as fp:\n\t\t\tfor f in fp:\n\t\t\t\tt = f.strip()\n\t\t\t\tif t.endswith(\")\"):\n\t\t\t\t\tif \"Your code has been rated\" in t:\n\t\t\t\t\t\tline = fp.readline()\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tleft = t.rfind(\"(\")\n\t\t\t\t\tright = t.rfind(\")\")\n\t\t\t\t\terror_type = t[left+1:right]\n\n\t\t\t\t\tx = t.split(':')\n\t\t\t\t\tname = x[0].replace(\"File \",\"\")\n\t\t\t\t\tname = name.replace(\"\\\"\",\"\")\n\t\t\t\t\tname = name.replace(\"/home/bew/archive/typed_project/\",\"\")\n\t\t\t\t\tgood = 1\n\t\t\t\t\tif len(x) < 2:\n\t\t\t\t\t\t#print(prev)\n\t\t\t\t\t\t#print(t)\n\t\t\t\t\t\tgood = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tif \"sys.argv[0]\" not in t:\n\t\t\t\t\t\t\trow = int(x[1])\n\t\t\t\t\t\t\tif True:\n\t\t\t\t\t\t\t\tcolumn = -3\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tcolumn = int(x[2])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t#print(prev)\n\t\t\t\t\t\t\t#print(t)\n\t\t\t\t\t\t\tgood = 0\n\t\t\t\t\t\t\n\t\t\t\t\t#print(t)\n\t\t\t\t\t#print(name)\n\t\t\t\t\t#print(error_type)\n\t\t\t\t\tif \"-\" not in error_type:\n\t\t\t\t\t\t#print(\"ERRORRRRRRRRRRRRRRRRRRRR {}\".format(error_type))\n\t\t\t\t\t\tgood = 0\n\n\t\t\t\t\tif good == 1:\n\t\t\t\t\t\ty = [error_type, row, column]\n\t\t\t\t\t\tif name in pylint:\n\t\t\t\t\t\t\tz = pylint[name]\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tif y not in z:\n\t\t\t\t\t\t\t\tz.append(y)\n\t\t\t\t\t\t\t\tpylint[name] = z\n\n\t\t\t\t\t\t\t\tif error_type in e_pylint:\n\t\t\t\t\t\t\t\t\tz = e_pylint[error_type]\n\t\t\t\t\t\t\t\t\te_pylint[error_type] = z + 1\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\te_pylint[error_type] = 1\n\t\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\t\tz.append(y)\n\t\t\t\t\t\t\tpylint[name] = z\n\t\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tz = [y]\n\t\t\t\t\t\t\tpylint[name] = z\n\n\t\t\t\t\t\t\tif error_type in e_pylint:\n\t\t\t\t\t\t\t\tz = e_pylint[error_type]\n\t\t\t\t\t\t\t\te_pylint[error_type] = z + 1\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\te_pylint[error_type] = 1\n\t\t\t\tprev = t\n\tif 1:\n\t\tyy = []\n\t\tyy.append([\"(facebook)buck/tools/build/modules/find_duplicate_classes_in_jars.py\", [\"syntax-error\",221,101]])\n\t\tyy.append([\"(facebook)buck/test/com/facebook/buck/android/testdata/android_project/java/com/resourceref/generator.py\", [\"syntax-error\",12,74]])\n\t\tyy.append([\"(ActiveState)code/recipes/Python/82347_pulse/recipe-82347.py\",[\"syntax-error\",14,23]])\n\t\tyy.append([\"(ActiveState)code/recipes/Python/576837_Crop_PDF_File_with_pyPdf/recipe-576837.py\",[\"syntax-error\",32,682]])\n\t\tyy.append([\"(ActiveState)code/recipes/Python/576483_Convert_Subnetmask_CIDR_notatidotdecimal/recipe-576483.py\",[\"syntax-error\",6,32]])\n\t\tyy.append([\"(quarkslab)irma/probe/extras/tools/nsrl/import_nsrl.py\",[\"syntax-error\",24,35]])\n\t\tyy.append([\"(gnachman)iTerm2/tests/ranges.py\",[\"syntax-error\",8,80]])\n\t\tyy.append([\"(oilshell)oil/Python-2.7.13/Tools/unicode/mkstringprep.py\",[\"syntax-error\",117,304]])\n\t\tyy.append([\"(WZQ1397)automatic-repo/python/FileSystem/backupSmallFileToTarToLocal.py\",[\"syntax-error\",42,177]])\n\t\tyy.append([\"(mapsme)omim/data/benchmarks/tk_results_viewer.py\",[\"syntax-error\",251,60]])\n\t\tyy.append([\"(ric2b)Vivaldi-browser/chromium/tools/win/pe_summarize.py\",[\"syntax-error\",69,55]])\n\t\tyy.append([\"(ric2b)Vivaldi-browser/chromium/remoting/tools/extract_android_native_lib.py\",[\"syntax-error\",16,64]])\n\t\tyy.append([\"(ric2b)Vivaldi-browser/chromium/remoting/tools/remove_spaces.py\",[\"syntax-error\",13,30]])\n\t\tyy.append([\"(ric2b)Vivaldi-browser/chromium/third_party/android_platform/development/scripts/stack.py\",[\"syntax-error\",50,19]])\n\t\tyy.append([\"(endlessm)chromium-browser/third_party/angle/third_party/deqp/src/scripts/cppcheck.py\",[\"syntax-error\",155,26]])\n\t\tyy.append([\"(endlessm)chromium-browser/third_party/angle/third_party/deqp/src/scripts/log/log_to_xml.py\",[\"syntax-error\",186,35]])\n\t\tyy.append([\"(endlessm)chromium-browser/third_party/angle/third_party/deqp/src/external/vulkancts/scripts/verify_submission.py\",[\"syntax-error\",79,55]])\n\t\tyy.append([\"(mlperf)training_results_v0.6/Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/mkldnn/scripts/generate_mkldnn_debug.py\",[\"syntax-error\",192,316]])\n\n\t\tfor yyy in yy:\n\t\t\tname = yyy[0]\n\t\t\ty = yyy[1]\n\t\t\tif name in pylint:\n\t\t\t\tz = pylint[name]\n\t\t\t\tz.append(y)\n\t\t\t\tpylint[name] = z\n\t\t\telse:\n\t\t\t\tz = [y]\n\t\t\t\tpylint[name] = z\n\t\t\terror_type = y[0]\n\t\t\tif error_type in e_pylint:\n\t\t\t\tz = e_pylint[error_type]\n\t\t\t\te_pylint[error_type] = z + 1\n\t\t\telse:\n\t\t\t\te_pylint[error_type] = 1\n\n\tif 0:\n\t\tfor m in pylint:\n\t\t\tprint(m)\n\t\t\tprint(pylint[m])\n\n\n\n\n\n\ndef compare(X, Y, id, does):\n\tif does == 0:\n\t\treturn\n\n\tresult = dict()\n\teee = dict()\n\tfor m in X:\n\t\tif m in Y:\n\t\t\txx = X[m]\n\t\t\tyy = Y[m]\n\t\t\t#print(\"> {}\".format(m))\n\t\t\t#print(xx)\n\t\t\t#print(yy)\n\n\t\t\t# THIS IS INEFFICIENT (how to spell?)\n\t\t\tfor x in xx:\n\t\t\t\tfor y in yy:\n\t\t\t\t\tif x[1] == y[1]:\n\n\t\t\t\t\t\tif x[0] == \"operator\":\n\t\t\t\t\t\t\tprint(m, x)\n\n\t\t\t\t\t\tif x[0] in result:\n\t\t\t\t\t\t\tz = result[x[0]]\n\t\t\t\t\t\t\tr = []\n\t\t\t\t\t\t\t#print(\">x = {}\".format(x))\n\t\t\t\t\t\t\t#print(\">y = {}\".format(y))\n\t\t\t\t\t\t\t#print(\">Z = {}\".format(z))\n\t\t\t\t\t\t\tincr = 0\n\t\t\t\t\t\t\tfor u in z:\n\t\t\t\t\t\t\t\tif y[0] == u[0]:\n\t\t\t\t\t\t\t\t\tincr = 1\n\t\t\t\t\t\t\t\t\tr.append([y[0], u[1] + 1])\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tr.append(u)\n\t\t\t\t\t\t\tif incr == 0:\n\t\t\t\t\t\t\t\tr.append([y[0], 1])\n\t\t\t\t\t\t\t#print(\">R = {}\".format(r))\n\t\t\t\t\t\t\tresult[x[0]] = r\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tz = [[y[0], 1]]\n\t\t\t\t\t\t\tresult[x[0]] = z\n\n\tif id == 1:\n\t\teee = e_mypy\n\t\tprint(\"----- Mypy -> Pytype -----\")\n\telif id == 2:\n\t\teee = e_pytype\n\t\tprint(\"----- Pytype -> Mypy -----\")\n\telif id == 3:\n\t\teee = e_mypy\n\t\tprint(\"----- Mypy -> Pylint -----\")\n\telif id == 4:\n\t\teee = e_pylint\n\t\tprint(\"----- Pylint -> Mypy -----\")\n\telif id == 5:\n\t\teee = e_pytype\n\t\tprint(\"----- Pytype -> Pylint -----\")\n\telif id == 6:\n\t\teee = e_pylint\n\t\tprint(\"----- Pylint -> Pytype -----\")\n\n\tfor c in result:\n\t\ta = result[c]\n\t\ta.sort(key=itemgetter(1,0), reverse=True)\n\t\tresult[c] = a\n\n\tfor c in sorted(result):\n\t\t#print(c, result[c])\n\t\ttotal = -1\n\t\tif id == 1 or id == 3:\n\t\t\ttotal = e_mypy[c]\n\t\telif id == 2 or id == 5:\n\t\t\ttotal = e_pytype[c]\n\t\telif id == 4 or id == 6:\n\t\t\ttotal = e_pylint[c]\n\t\tuse = 0\n\t\tfor r in result[c]:\n\t\t\tuse += r[1]\n\t\tprint(\"{} {}/{}\".format(c,use,total))\n\t\tprint(\" \",result[c])\n\n\tprint(\"Not in any:\")\n\tfor i in eee:\n\t\tif i not in result:\n\t\t\tprint(i)\n\tprint(\"\")\n\n\n\n\ncompare(mypy, pytype, 1, 1)\ncompare(pytype, mypy, 2, 0)\ncompare(mypy, pylint, 3, 1)\ncompare(pylint, mypy, 4, 0)\ncompare(pytype, pylint, 5, 0)\ncompare(pylint, pytype, 6, 0)\n\ndef compareThree(X, Y, Z, id, does):\n\tif does == 0:\n\t\treturn\n\n\tresult = dict()\n\teee = dict()\n\tfor m in X:\n\t\tif m in Y:\n\t\t\tif m in Z:\n\t\t\t\txx = X[m]\n\t\t\t\tyy = Y[m]\n\t\t\t\tvv = Z[m]\n\t\t\t\t#print(\"> {}\".format(m))\n\t\t\t\t#print(xx)\n\t\t\t\t#print(yy)\n\n\t\t\t\t# THIS IS INEFFICIENT (how to spell?)\n\t\t\t\tfor x in xx:\n\t\t\t\t\tfor y in yy:\n\t\t\t\t\t\tfor v in vv:\n\t\t\t\t\t\t\t#if x[0] == \"operator\" and y[0] == \"unsupported-operands\" and v[0] == \"invalid-unary-operand-type\":\n\t\t\t\t\t\t\t#\tprint(m, x, y, v)\n\t\t\t\t\t\t\tif x[1] == y[1] and y[1] == v[1]:\n\t\t\t\t\t\t\t\t#print(\">x = {}\".format(x))\n\t\t\t\t\t\t\t\t#print(\">y = {}\".format(y))\n\t\t\t\t\t\t\t\t#print(\">v = {}\".format(v))\n\t\t\t\t\t\t\t\tif x[0] in result:\n\t\t\t\t\t\t\t\t\tz = result[x[0]]\n\t\t\t\t\t\t\t\t\tr = []\n\t\t\t\t\t\t\t\t\t#print(\">x = {}\".format(x))\n\t\t\t\t\t\t\t\t\t#print(\">y = {}\".format(y))\n\t\t\t\t\t\t\t\t\t#print(\">Z = {}\".format(z))\n\t\t\t\t\t\t\t\t\tincr = 0\n\t\t\t\t\t\t\t\t\tfor u in z:\n\t\t\t\t\t\t\t\t\t\tif y[0] == u[0] and v[0] == u[1]:\n\t\t\t\t\t\t\t\t\t\t\tincr = 1\n\t\t\t\t\t\t\t\t\t\t\tr.append([y[0], v[0], u[2] + 1])\n\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\tr.append(u)\n\t\t\t\t\t\t\t\t\tif incr == 0:\n\t\t\t\t\t\t\t\t\t\tr.append([y[0], v[0], 1])\n\t\t\t\t\t\t\t\t\t#print(\">R = {}\".format(r))\n\t\t\t\t\t\t\t\t\tresult[x[0]] = r\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tz = [[y[0],v[0],1]]\n\t\t\t\t\t\t\t\t\tresult[x[0]] = z\n\n\tif id == 1:\n\t\teee = e_mypy\n\t\tprint(\"----- Mypy -> [Pytype, Pylint] -----\")\n\telif id == 2:\n\t\teee = e_pytype\n\t\tprint(\"----- Pytype -> [Pylint, Mypy] -----\")\n\telif id == 3:\n\t\teee = e_pylint\n\t\tprint(\"----- Pylint -> [Mypy, Pyliint] -----\")\n\n\tfor c in result:\n\t\ta = result[c]\n\t\ta.sort(key=itemgetter(2,1,0), reverse=True)\n\t\tresult[c] = a\n\n\tfor c in sorted(result):\n\t\t#print(c, result[c])\n\t\t\"\"\"\n\t\tprint(c)\n\t\tprint(\" \",result[c])\n\t\t\"\"\"\n\t\ttotal = -1\n\t\tif id == 1:\n\t\t\ttotal = e_mypy[c]\n\t\telif id == 2:\n\t\t\ttotal = e_pytype[c]\n\t\telif id == 3:\n\t\t\ttotal = e_pylint[c]\n\t\tuse = 0\n\t\tfor r in result[c]:\n\t\t\tuse += r[2]\n\t\tprint(\"{} {}/{}\".format(c,use,total))\n\t\tprint(\" \",result[c])\n\n\tprint(\"Not in any:\")\n\tfor i in eee:\n\t\tif i not in result:\n\t\t\tprint(i)\n\t\t\n\n\tprint(\"\")\n\ncompareThree(mypy, pytype, pylint, 1, 0)\ncompareThree(pytype, pylint, mypy, 2, 0)\ncompareThree(pylint, mypy, pytype, 3, 0)\n\n\n\n\n\n\n\n\"\"\"\nif do_mp_pt:\n\tfor m in mypy:\n\t\tif m in pytype:\n\t\t\txx = mypy[m]\n\t\t\tyy = pytype[m]\n\t\t\t#print(\"> {}\".format(m))\n\t\t\t#print(xx)\n\t\t\t#print(yy)\n\n\t\t\t# THIS IS INEFFICIENT (how to spell?)\n\t\t\tfor x in xx:\n\t\t\t\tfor y in yy:\n\t\t\t\t\tif x[1] == y[1]:\n\t\t\t\t\t\tif x[0] in mypy_pytype:\n\t\t\t\t\t\t\tz = mypy_pytype[x[0]]\n\t\t\t\t\t\t\tr = []\n\t\t\t\t\t\t\t#print(\">x = {}\".format(x))\n\t\t\t\t\t\t\t#print(\">y = {}\".format(y))\n\t\t\t\t\t\t\t#print(\">Z = {}\".format(z))\n\t\t\t\t\t\t\tincr = 0\n\t\t\t\t\t\t\tfor u in z:\n\t\t\t\t\t\t\t\tif y[0] == u[0]:\n\t\t\t\t\t\t\t\t\tincr = 1\n\t\t\t\t\t\t\t\t\tr.append([y[0], u[1] + 1])\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tr.append(u)\n\t\t\t\t\t\t\tif incr == 0:\n\t\t\t\t\t\t\t\tr.append([y[0], 1])\n\t\t\t\t\t\t\t#print(\">R = {}\".format(r))\n\t\t\t\t\t\t\tmypy_pytype[x[0]] = r\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tz = [[y[0], 1]]\n\t\t\t\t\t\t\tmypy_pytype[x[0]] = z\n\n\tprint(\"----- Mypy -> Pytype -----\")\n\tfor c in mypy_pytype:\n\t\ta = mypy_pytype[c]\n\t\ta.sort(key=itemgetter(1,0), reverse=True)\n\t\tmypy_pytype[c] = a\n\t#for c in mypy_pytype:\n\tfor c in sorted(mypy_pytype):\n\t\tprint(c, mypy_pytype[c])\n\tprint(\"\")\n\n\tfor m in pytype:\n\t\tif m in mypy:\n\t\t\txx = pytype[m]\n\t\t\tyy = mypy[m]\n\t\t\tfor x in xx:\n\t\t\t\tfor y in yy:\n\t\t\t\t\tif x[1] == y[1]:\n\t\t\t\t\t\tif x[0] in pytype_mypy:\n\t\t\t\t\t\t\tz = pytype_mypy[x[0]]\n\t\t\t\t\t\t\tr = []\n\t\t\t\t\t\t\tincr = 0\n\t\t\t\t\t\t\tfor u in z:\n\t\t\t\t\t\t\t\tif y[0] == u[0]:\n\t\t\t\t\t\t\t\t\tincr = 1\n\t\t\t\t\t\t\t\t\tr.append([y[0], u[1] + 1])\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tr.append(u)\n\t\t\t\t\t\t\tif incr == 0:\n\t\t\t\t\t\t\t\tr.append([y[0], 1])\n\t\t\t\t\t\t\tpytype_mypy[x[0]] = r\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tz = [[y[0], 1]]\n\t\t\t\t\t\t\tpytype_mypy[x[0]] = z\n\n\tprint(\"----- Pytype -> Mypy -----\")\n\tfor c in pytype_mypy:\n\t\ta = pytype_mypy[c]\n\t\ta.sort(key=itemgetter(1,0), reverse=True)\n\t\tpytype_mypy[c] = a\n\t#for c in pytype_mypy:\n\tfor c in sorted(pytype_mypy):\n\t\tprint(c, pytype_mypy[c])\n\t#print(com[c])\n\"\"\"\n\n\n\n\n#print(\"Mypy: Error messages with no line number = {}\".format(mypy_noline))","sub_path":"type _tools/CompareErrors.py","file_name":"CompareErrors.py","file_ext":"py","file_size_in_byte":14323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"83603678","text":"import numpy as np\nimport torch\nfrom opytimizer import Opytimizer\nfrom opytimizer.core.function import Function\nfrom opytimizer.optimizers.pso import PSO\nfrom opytimizer.spaces.search import SearchSpace\nfrom sklearn.datasets import load_digits\nfrom sklearn.model_selection import train_test_split\nfrom torch import optim\nfrom torch.autograd import Variable\n\n# Loading digits dataset\ndigits = load_digits()\n\n# Gathering samples and targets\nX = digits.data\nY = digits.target\n\n# Splitting the data\nX_train, X_val, Y_train, Y_val = train_test_split(\n X, Y, test_size=0.5, random_state=42)\n\n# Reshaping the data\nX_train = X_train.reshape(-1, 8, 8)\nX_val = X_val.reshape(-1, 8, 8)\n\n# Converting to sequence shape\nX_train = np.swapaxes(X_train, 0, 1)\nX_val = np.swapaxes(X_val, 0, 1)\n\n# Converting from numpy array to torch tensors\nX_train = torch.from_numpy(X_train).float()\nX_val = torch.from_numpy(X_val).float()\nY_train = torch.from_numpy(Y_train).long()\n\n\nclass LSTM(torch.nn.Module):\n def __init__(self, n_features, n_hidden, n_classes):\n # Overriding initial class\n super(LSTM, self).__init__()\n\n # Saving number of hidden units as a property\n self.n_hidden = n_hidden\n\n # Creating LSTM cell\n self.lstm = torch.nn.LSTM(n_features, n_hidden)\n\n # Creating linear layer\n self.linear = torch.nn.Linear(n_hidden, n_classes, bias=False)\n\n def forward(self, x):\n # Gathering batch size\n batch_size = x.size()[1]\n\n # Variable to hold hidden state\n h0 = Variable(torch.zeros(\n [1, batch_size, self.n_hidden]), requires_grad=False)\n\n # Variable to hold cell state\n c0 = Variable(torch.zeros(\n [1, batch_size, self.n_hidden]), requires_grad=False)\n\n # Performing forward pass\n fx, _ = self.lstm.forward(x, (h0, c0))\n\n return self.linear.forward(fx[-1])\n\n\ndef fit(model, loss, opt, x, y):\n # Declaring initial variables\n x = Variable(x, requires_grad=False)\n y = Variable(y, requires_grad=False)\n\n # Resetting the gradient\n opt.zero_grad()\n\n # Performing the foward pass\n fw_x = model.forward(x)\n output = loss.forward(fw_x, y)\n\n # Performing backward pass\n output.backward()\n\n # Updating parameters\n opt.step()\n\n return output.item()\n\n\ndef predict(model, x_val):\n # Declaring validation variable\n x = Variable(x_val, requires_grad=False)\n\n # Performing backward pass with this variable\n output = model.forward(x)\n\n # Getting the index of the prediction\n y_val = output.data.numpy().argmax(axis=1)\n\n return y_val\n\n\ndef lstm(opytimizer):\n # Some model parameters\n n_features = 8\n n_hidden = 128\n n_classes = 10\n\n # Instanciating the model\n model = LSTM(n_features, n_hidden, n_classes)\n\n # Input variables\n batch_size = 100\n epochs = 5\n\n # Gathering parameters from Opytimizer\n # Pay extremely attention to their order when declaring due to their bounds\n learning_rate = opytimizer[0][0]\n momentum = opytimizer[1][0]\n\n # Declaring the loss function\n loss = torch.nn.CrossEntropyLoss(reduction='mean')\n\n # Declaring the optimization algorithm\n opt = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum)\n\n # Performing training loop\n for _ in range(epochs):\n # Initial cost as 0.0\n cost = 0.0\n\n # Calculating the number of batches\n num_batches = len(Y_train) // batch_size\n\n # For every batch\n for k in range(num_batches):\n # Declaring initial and ending for each batch\n start, end = k * batch_size, (k + 1) * batch_size\n\n # Cost will be the loss accumulated from model's fitting\n cost += fit(model, loss, opt,\n X_train[:, start:end, :], Y_train[start:end])\n\n # Predicting samples from evaluating set\n preds = predict(model, X_val)\n\n # Calculating accuracy\n acc = np.mean(preds == Y_val)\n\n return 1 - acc\n\n\n# Creating Function's object\nf = Function(pointer=lstm)\n\n# Number of agents\nn_agents = 10\n\n# Number of decision variables\nn_variables = 2\n\n# Number of running iterations\nn_iterations = 100\n\n# Lower and upper bounds (has to be the same size as n_variables)\nlower_bound = [0, 0]\nupper_bound = [1, 1]\n\n# Creating the SearchSpace class\ns = SearchSpace(n_agents=n_agents, n_iterations=n_iterations,\n n_variables=n_variables, lower_bound=lower_bound,\n upper_bound=upper_bound)\n\n# Hyperparameters for the optimizer\nhyperparams = {\n 'w': 0.7,\n 'c1': 1.7,\n 'c2': 1.7\n}\n\n# Creating PSO's optimizer\np = PSO(hyperparams=hyperparams)\n\n# Finally, we can create an Opytimizer class\no = Opytimizer(space=s, optimizer=p, function=f)\n\n# Running the optimization task\nhistory = o.start()\n","sub_path":"examples/integrations/pytorch/lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":4805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"550211300","text":"\"\"\"Add craeted at column to liked videos\n\nRevision ID: 289a1dbe60a5\nRevises: c8b2be2c0ece\nCreate Date: 2020-05-12 12:30:49.587670\n\n\"\"\"\nimport sqlalchemy as sa\n\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"289a1dbe60a5\"\ndown_revision = \"c8b2be2c0ece\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column(\n \"liked_videos\",\n sa.Column(\n \"created_at\",\n sa.DateTime(),\n server_default=sa.text(\"now()\"),\n nullable=True,\n ),\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column(\"liked_videos\", \"created_at\")\n # ### end Alembic commands ###\n","sub_path":"alembic/versions/289a1dbe60a5_add_craeted_at_column_to_liked_videos.py","file_name":"289a1dbe60a5_add_craeted_at_column_to_liked_videos.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"164723916","text":"#função que treina a rede neural\n\nfrom dasxlib.tensor import Tensor\nfrom dasxlib.nn import NeuralNet\nfrom dasxlib.loss import Loss, MSE\nfrom dasxlib.optim import Optimizer, SGD\nfrom dasxlib.data import DataIterator, BatchIterator\n\ndef train(net: NeuralNet,\n inputs: Tensor,\n targets: Tensor,\n num_epochs: int = 5000,\n iterator: DataIterator = BatchIterator(),\n loss: Loss = MSE(),\n optimizer: Optimizer = SGD()) -> None:\n \n for epoch in range(num_epochs):\n epoch_loss = 0.0\n for batch in iterator(inputs, targets):\n predicted = net.forward(batch.inputs)\n epoch_loss += loss.loss(predicted, batch.targets)\n grad = loss.grad(predicted, batch.targets)\n net.backward(grad)\n optimizer.step(net)\n print(epoch, epoch_loss)","sub_path":"dasxlib/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"256871237","text":"\"\"\" ModelConnectionTask module.\n\"\"\"\nimport copy\n\nimport numpy as np\n\nfrom multiml import logger\nfrom multiml.task.basic import MLBaseTask\n\n\nclass ModelConnectionTask(MLBaseTask):\n \"\"\" Build a single task connecting with multiple tasks.\n\n ``ModelConnectionTask`` connects multiple ML tasks considering the\n input/output variables and dependencies of the tasks, then builds a single\n task. ML model of component tasks are trained diferentially, thus\n each ML model must be implemented by the same deep\n learning library, i.e. Keras or Pytorch. Each subtask must contain\n * ``input_var_names``, `output_var_names`` and `true_var_names`,\n * loss function,\n to compile subtask dependencies and data I/O formats. The following\n examples shows a workflow and its attributes, which are automatically\n compiled:\n\n Examples:\n >>> (input0, input1, input2)\n >>> | | |\n >>> [subtask0] |\n >>> | |\n >>> (output0) |\n >>> | |\n >>> [subtask1]------+\n >>> |\n >>> (output1)\n >>> \n >>> input_var_names = ['input0', 'input1', 'input2']\n >>> output_var_names = ['output0', 'output1']\n >>> input_var_index = [[0, 1], [2]]\n >>> output_var_index = [[0], [1]]\n >>> cavhe_var_index = [[], [0]]\n \n Examples:\n >>> task = ModelConnectionTask(subtasks=[your_subtask0, your_subtask2],\n >>> optimizer='SGD')\n >>> task.execute()\n \"\"\"\n def __init__(self,\n subtasks,\n use_multi_loss=False,\n variable_mapping=None,\n **kwargs):\n \"\"\" Constructor of ModelConnectionTask.\n\n Args:\n subtasks (list): list must contains ordered instrance objects\n inherited from ``MLBaseTask``.\n use_multi_loss (bool): If False, intermediate losses are not\n considered in training steps.\n variable_mapping (list(str, str)): Input variables are replaced\n following this list. Used for the case that the input varialbes\n change from pre-training to main-training (with model connecting).\n **kwargs: Arbitrary keyword arguments passed to ``MLBaseTask``.\n \"\"\"\n super().__init__(**kwargs)\n\n self._subtasks = subtasks\n self._use_multi_loss = use_multi_loss\n self._variable_mapping = variable_mapping\n\n self._input_var_index = None\n self._output_var_index = None\n\n if self._input_var_names is not None:\n logger.warn(\n 'input_var_names is geiven but it will be set automatically ')\n self._input_var_names = None\n\n if self._output_var_names is not None:\n logger.warn(\n 'output_var_names is geiven but it will be set automatically ')\n self._output_var_names = None\n\n def compile(self):\n \"\"\" Compile subtasks and this task.\n \"\"\"\n for subtask in self._subtasks:\n subtask.compile()\n\n self.compile_index()\n super().compile()\n\n def compile_loss(self):\n \"\"\" Compile loss.\n\n Loss functions are retrieved from subtasks, thus each subtask must\n contain ``loss``. ``loss_weights`` are set according to options:\n * If ``use_multi_loss`` is False, only loss of the last subtask is\n considered with weight = 1.0,\n * If ``use_multi_loss`` is True and ``loss_weights`` is given\n explicitly, given ``loss_weights`` is used.\n * If ``use_multi_loss`` is True and ``loss_weights`` is None,\n ``loss_weights`` is retrieved from each subtask.\n \"\"\"\n if self._use_multi_loss:\n self.ml.loss = []\n\n for index, subtask in enumerate(self._subtasks):\n self.ml.loss.append(subtask.ml.loss)\n\n if self._loss_weights is None:\n self.ml.loss_weights = []\n\n for index, subtask in enumerate(self._subtasks):\n loss_weights = subtask.ml.loss_weights\n\n if loss_weights is None:\n self.ml.loss_weights.append(1.0)\n elif isinstance(loss_weights, (int, float)):\n self.ml.loss_weights.append(loss_weights)\n\n else:\n raise ValueError(\n f'loss_weights {loss_weights} is not supported')\n\n elif isinstance(self._loss_weights, list):\n self.ml.loss_weights = self._loss_weights\n\n elif isinstance(self._loss_weights, dict):\n self.ml.loss_weights = []\n\n for index, subtask in enumerate(self._subtasks):\n loss_weights = self._loss_weights[subtask.task_id]\n self.ml.loss_weights.append(loss_weights)\n\n else:\n self.ml.loss = []\n self.ml.loss_weights = []\n\n num_subtasks = len(self._subtasks)\n self.ml.loss = [None] * num_subtasks\n self.ml.loss_weights = [0.0] * num_subtasks\n\n self.ml.loss[-1] = self._subtasks[-1].ml.loss\n self.ml.loss_weights[-1] = 1.0\n\n # TODO: special treatment for ensemble tasks\n new_loss = []\n new_loss_weights = []\n\n for index, loss in enumerate(self.ml.loss):\n if isinstance(loss, list):\n new_loss += loss\n new_loss_weights += self._subtasks[index].ml.loss_weights\n\n else:\n new_loss.append(loss)\n new_loss_weights.append(self.ml.loss_weights[index])\n\n if len(new_loss) == len(new_loss_weights):\n self.ml.loss = new_loss\n self.ml.loss_weights = new_loss_weights\n else:\n raise ValueError(\n 'Length of loss and loss_weights is not conssitent.')\n\n def compile_index(self):\n \"\"\" Compile subtask dependencies and I/O variables.\n \"\"\"\n self.set_output_var_index()\n self.set_input_var_index()\n\n self.set_ordered_subtasks()\n\n def predict_update(self, data=None):\n \"\"\" Predict and update results to storegate.\n \"\"\"\n y_pred = self.predict(data)\n\n offset = 0\n for index, subtask in enumerate(self._subtasks):\n output_var_names = subtask.output_var_names\n\n data = self._squeeze(y_pred[index + offset], 2)\n\n # TODO: special treatment for ensemble tasks\n if isinstance(subtask.ml.loss, list):\n offset += len(subtask.ml.loss) - 1\n\n self._storegate.update_data(data=data,\n var_names=output_var_names,\n phase='auto')\n\n def get_input_true_data(self, phase=None):\n \"\"\" Returns input and true data retrieved from storegate.\n\n The list of input data contains data for each *variable*, thus the\n length is equal to the length of ``input_var_names``. The list of\n true data contains data for each *subtask*, thus the length is equal to\n the length of ``subtasks``.\n\n Returns:\n (list, list): list of input data, and list of true data.\n\n \"\"\"\n input_ret = []\n true_ret = []\n\n for var_name in self.input_var_names:\n input_data = self.storegate.get_data(var_names=var_name,\n phase=phase)\n input_ret.append(input_data)\n\n for subtask in self._subtasks:\n true_var_names = subtask.true_var_names\n true_data = self.storegate.get_data(var_names=true_var_names,\n phase=phase)\n\n # TODO: special treatment for ensemble tasks\n if isinstance(subtask.ml.loss, list):\n true_ret += [true_data] * len(subtask.ml.loss)\n else:\n true_ret.append(true_data)\n\n return input_ret, true_ret\n\n def set_output_var_index(self):\n \"\"\" Set output_var_names and output_var_index.\n \"\"\"\n self._output_var_index = []\n self._output_var_names = []\n\n for subtask in self._subtasks:\n\n output_index = []\n output_var_names = subtask.output_var_names\n\n if output_var_names is None:\n continue\n\n if isinstance(output_var_names, str):\n output_var_names = [output_var_names]\n\n for output_var_name in output_var_names:\n if output_var_name in self.output_var_names:\n output_index.append(\n self.output_var_names.index(output_var_name))\n\n else:\n self.output_var_names.append(output_var_name)\n output_index.append(\n self.output_var_names.index(output_var_name))\n\n self._output_var_index.append(output_index)\n\n def set_input_var_index(self):\n \"\"\" Set input_var_names and input_var_index.\n \"\"\"\n self._input_var_index = []\n self._input_var_names = []\n\n for subtask in self._subtasks:\n input_index = []\n cache_index = []\n input_var_names = subtask.input_var_names\n\n if input_var_names is None:\n continue\n\n if isinstance(input_var_names, str):\n input_var_names = [input_var_names]\n\n input_var_names = self._apply_variable_mapping(input_var_names)\n\n for input_var_name in input_var_names:\n if input_var_name in self.input_var_names:\n input_index.append(\n self.input_var_names.index(input_var_name))\n\n elif input_var_name in self.output_var_names:\n index = self.output_var_names.index(input_var_name)\n index = (index + 1) * -1\n input_index.append(index)\n\n else:\n self.input_var_names.append(input_var_name)\n input_index.append(\n self.input_var_names.index(input_var_name))\n\n self._input_var_index.append(input_index)\n\n def set_ordered_subtasks(self):\n \"\"\" Order subtasks based on input_var_names and output_var_names.\n \"\"\"\n import networkx as nx\n\n def _flatten(data):\n from collections.abc import Iterable\n for v in data:\n if isinstance(v, Iterable) and not isinstance(v, (str, bytes)):\n yield from _flatten(v)\n else:\n yield v\n\n # dag built with subtasks and input/output names\n dag_sub = nx.DiGraph()\n for i_subtask, subtask in enumerate(self._subtasks):\n # Add subtask name\n dag_sub.add_node(i_subtask)\n\n # Add variable name\n input_var_names = subtask.input_var_names\n if isinstance(input_var_names, str):\n input_var_names = [input_var_names]\n\n input_var_names = self._apply_variable_mapping(input_var_names)\n\n for var in _flatten(input_var_names):\n if not dag_sub.has_node('var_' + var):\n dag_sub.add_node('var_' + var)\n dag_sub.add_edge('var_' + var, i_subtask)\n\n output_var_names = subtask.output_var_names\n if isinstance(output_var_names, str):\n output_var_names = [output_var_names]\n for var in output_var_names:\n if not dag_sub.has_node('var_' + var):\n dag_sub.add_node('var_' + var)\n dag_sub.add_edge(i_subtask, 'var_' + var)\n\n # dag built with subtasks\n dag = nx.DiGraph()\n for i_subtask, subtask in enumerate(self._subtasks):\n dag.add_node(i_subtask)\n\n for node in nx.topological_sort(dag_sub):\n if isinstance(node, int):\n predecessors = set([\n u for v in dag_sub.predecessors(node)\n for u in dag_sub.predecessors(v)\n ])\n for v in predecessors:\n dag.add_edge(v, node)\n\n successors = set([\n u for v in dag_sub.successors(node)\n for u in dag_sub.successors(v)\n ])\n for v in successors:\n dag.add_edge(node, v)\n\n new_subtasks = []\n new_input_var_index = []\n new_output_var_index = []\n\n for i_subtask in nx.topological_sort(dag):\n new_subtasks.append(self._subtasks[i_subtask])\n new_input_var_index.append(self._input_var_index[i_subtask])\n new_output_var_index.append(self._output_var_index[i_subtask])\n\n self._subtasks = new_subtasks\n self._input_var_index = new_input_var_index\n self._output_var_index = new_output_var_index\n\n def _apply_variable_mapping(self, input_vars):\n \"\"\" Convert variable name by given mapping.\n \"\"\"\n if self._variable_mapping is None:\n return input_vars\n\n ret = []\n if isinstance(input_vars[0], list):\n for v in input_vars:\n ret.append(self._apply_variable_mapping(v))\n return ret\n\n for input_var in input_vars:\n for (v_from, v_to) in self._variable_mapping:\n if v_from == input_var:\n input_var = v_to\n break\n ret.append(input_var)\n\n return ret\n\n @staticmethod\n def _squeeze(numpy_outputs, dim=0):\n \"\"\" Squeeze ndarray.\n \"\"\"\n offset = 0\n for index, shape in enumerate(numpy_outputs.shape):\n if shape == 1 and index >= dim:\n axis = index - offset\n numpy_outputs = np.squeeze(numpy_outputs, axis=axis)\n offset += 1\n return numpy_outputs\n","sub_path":"multiml/task/basic/ml_model_connection.py","file_name":"ml_model_connection.py","file_ext":"py","file_size_in_byte":14069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"220962855","text":"#!/bin/python\n\nimport sys\n\ndef solve(n, s, d, m):\n sum_counts = 0\n for i in range(0, n):\n sum = 0\n for j in range(i, m+i):\n if j >= len(s):\n sum += 0\n else:\n sum += s[j]\n if sum == d:\n sum_counts += 1\n return sum_counts\n\nn = int(raw_input().strip())\ns = map(int, raw_input().strip().split(' '))\nd, m = raw_input().strip().split(' ')\nd, m = [int(d), int(m)]\nresult = solve(n, s, d, m)\nprint(result)\n","sub_path":"src/python/birthday-chocolate.py","file_name":"birthday-chocolate.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"493841425","text":"import itertools\r\n\r\nfile_name = \"A-small-attempt2\"\r\n\r\n\r\ndef read_file():\r\n with open(file_name+\".in\", \"r\") as f:\r\n return map(str.strip, f.readlines(int(f.readline())))\r\n\r\n\r\ndef write(lines):\r\n with open(file_name+\".out\", \"w\") as out:\r\n for index, answer in enumerate(lines):\r\n out.write(\"Case #\"+str(index+1)+\": \"+str(answer)+\"\\n\")\r\n\r\n\r\ndef order_string(string):\r\n current_char = string[0]\r\n builder = \"\"\r\n for char in string:\r\n if char >= current_char:\r\n current_char = char\r\n builder = char + builder\r\n else:\r\n builder += char\r\n return builder\r\n\r\n\r\nwrite(map(order_string, read_file()))","sub_path":"codes/CodeJamCrawler/16_1_1/NathanMerrill/Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"324822194","text":"from flask import Flask, escape, request\n\napp = Flask(__name__)\n\n@app.route('/')\ndef hello():\n name = request.args.get(\"name\", \"World\")\n return f'Hello, {escape(name)}!
heeh'\nif __name__ == \"__main__\":\n app.debug=True\n app.run(host=\"0.0.0.0\", port=4863)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"268332743","text":"#from __future__ import print_function\n#import pandas as pd\ndict_3 = {\"aaa\":{\"A\":[1,0,1,0],\"B\":[1,1,1,1],\"C\":[1,1,1,0]},\"bbb\":{\"w\":[1,0,1,0],\"D\":[1,1,1,1],\"F\":[1,1,1,0]}}\n\n#print pd.DataFrame(dict_3)\n\n\n\nfrom itertools import izip_longest\nimport csv\n\nwith open('Protein.csv','wb') as file:\n writer = csv.writer(file, delimiter='\\t')\n for protein_complex in dict_3:\n #print file,protein_complex\n print >> file , protein_complex\n writer.writerow(dict_3[protein_complex].keys())\n for row in izip_longest(*dict_3[protein_complex].values(), fillvalue=''):\n writer.writerow(list(row))","sub_path":"dict_3.py","file_name":"dict_3.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"4592594","text":"\"\"\"\nWorkflow:\n1. Take filename as input (i.e., `scriptname filename`)\n2. Read header and save as array\n3. Output header and first three lines as example text\n4. Prompt for header choice and values to look for\n5. Display one sample line for each value prompted for\n6. Sort the file by the header choice\n7. Output single CSV based off of x header values\n\"\"\"\n\nimport csv\nimport sys\nimport io\nimport operator\nfrom operator import itemgetter\n\n# Accept filename as input and assign to variable\nfilename = sys.argv[1]\n\n# Open file\nwith io.open(filename, newline='') as csv_file:\n\tdialect = csv.Sniffer().sniff(csv_file.read(4096))\n\tcsv_file.seek(0)\n\tcsv_reader = csv.reader(csv_file, dialect)\n\t\n\theader = []\n\tline_count = 0\n\n\tfor row in csv_reader:\n\t\tif line_count == 0:\n\t\t\t# Read header and save as array\t\n\t\t\tfor column in row:\n\t\t\t\theader.append(column)\n\t\t\t# Output header and first three lines as example text\n\t\t\tline_count += 1\n\t\t\tblank_column = [None] * len(header)\n\t\telse:\n\t\t\tfor i,column in enumerate(row):\n\t\t\t\tif column != \"\":\n\t\t\t\t\tblank_column[i] = 1\n\t\t\n\t# Check for empty columns and remove them to prevent later index errors also delete the column from the headers.\n\tfor i,column in enumerate(blank_column):\n\t\tif column == None:\n\t\t\theader[i]=None\n\t\t\t\n\n\t# Ask for header choice\n\tprint('\\nWHICH HEADER WOULD YOU LIKE TO USE TO SPLIT? \\n')\n\t\n\tfor i,column in enumerate(header):\n\t\tif column != None:\n\t\t\tprintValue = str(i)\n\t\t\tprintValue += \". \"\n\t\t\tprintValue += header[i]\n\t\t\tprint(printValue)\n\n\t# fix the count here\n\theader_choice = int(input('Please enter a value between 0 and {}: '.format(len(header))))\n\t\n\nwith io.open(filename, newline='') as csv_file:\n\tcsv_reader = csv.reader(csv_file, dialect)\n\t\n\t# Sort the file based off of header choice\n\tsortedlist = sorted(csv_reader, key=lambda row: row[header_choice], reverse=True)\n\t\n\tchoices = []\n\n\tline_count = 0\n\tfor row in sortedlist:\n\t\t# capture each value and discard if non-unique\n\t\t# print remaining values\n\t\tchoices.append(row[header_choice])\n\t\tline_count += 1\n\t\tif line_count == len(sortedlist):\n\t\t\tbreak\n\n\t# Display list of possible values and prompt for choices\n\tprint('\\nWHICH VALUE(S) DO YOU WANT? \\n')\n\t\n\t# Remove the header value from this list.\n\tdel choices[0]\n\t#choices.pop(0)\n\t\n\t# Make the list unique using set\n\tunique_choices = set(choices)\n\tunique_value = []\n\tchoices = list(unique_choices)\n\t\n\t# Set the count to 0\n\tcount = 0\n\t\n\t# Loop through the options menu until the user has picked all values they want or there are none remaining\n\twhile count < len(unique_choices):\n\t\t# Set the choice_count to 0\n\t\tchoice_count = 0\n\t\tfor value in unique_choices:\n\t\t\tprintValue = str(choice_count)\n\t\t\tprintValue += \". \"\n\t\t\tprintValue += value\n\t\t\tprint(printValue)\n\t\t\tchoice_count += 1\n\t\tprint ('{}. ALL'.format(choice_count))\n\t\tif count > 0:\n\t\t\tchoice_count += 1\n\t\t\tprint ('{}. No more'.format(choice_count))\n\t\n\t\t# Extend this to allow selection of all values\n\t\tunique_value.append(int(input('Please enter a value between 0 and {}: '.format(choice_count))))\n\t\n\t\tif unique_value[count] == len(unique_choices):\n\t\t\tprint(\"\")\n\t\t\tprint(\"you selected all\")\n\t\t\tcount = len(unique_choices)\n\t\t\t# Make unique_value contain all options.\n\t\t\tunique_value = []\n\t\t\tfor choice in choices:\n\t\t\t\tunique_value.append(choices.index(choice))\n\t\telif unique_value[-1] == (len(unique_choices)+1):\n\t\t\tprint(\"\")\n\t\t\tprint(\"You are done selecting\")\n\t\t\tdel unique_value[-1]\n\t\t\tcount = len(unique_choices)\n\t\telse:\n\t\t\t# print a single matching row\n\t\t\tprint('You selected: {}'.format(choices[unique_value[-1]]))\n\t\t\tcount += 1\n\t\n\t\t\n\t# Display a sample line that matches each value to verify it's working\n\tprint(header)\n\tfor value in unique_value:\n\t\t# Use the header_choice variable to find the row that needs to be sorted by.\n\t\t# For example if the message_type is chosen, return the header, then return each record that has the matching value.\n\t\tfor row in sortedlist:\n\t\t\tif row[header_choice] == choices[value]:\n\t\t\t\tprint(row)\n\t\t\t\tbreak\n\t\n\t# Split into a single CSV based off of the values chosen\n\twith open('split.csv', 'w') as newfile:\n\t\tfor value in unique_value:\t\n\t\t\twr = csv.writer(newfile, quoting=csv.QUOTE_ALL)\n\t\t\t# Output the header to said new file\n\t\t\twr.writerow(header)\n\t\t\t# then output every matching row to the same file\n\t\t\tfor row in sortedlist:\n\t\t\t\tif row[header_choice] == choices[value]:\n\t\t\t\t\twr.writerow(row)\n","sub_path":"csv_singlesplit.py","file_name":"csv_singlesplit.py","file_ext":"py","file_size_in_byte":4354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"548054265","text":"# Copyright (c) 2020 Cristi Rusu \n# Copyright (c) 2020 Paul Irofti \n#\n# Permission to use, copy, modify, and/or distribute this software for any\n# purpose with or without fee is hereby granted, provided that the above\n# copyright notice and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n# import matplotlib.pyplot as plt\nimport numpy as np\nfrom skimage.util.shape import view_as_blocks\n# from skimage.io import imread\nfrom scipy.fftpack import dct\nimport timeit\nimport multiprocessing\n# import glob\nimport sys\nimport pickle\nimport tifffile\n#############################################################################\nn_components = 8 # number of atoms (n)\nn_features = n_components # signal dimension (m)\nn_nonzeros = 16 # sparsity (s)\nn_iterations = 100 # number of dictionary learning iterations (K)\n# tiffs = ['tiffs/19920612_AVIRIS_IndianPine_NS-line.tif']\ntiffs = ['tiffs/19920612_AVIRIS_IndianPine_EW-line_R.tif']\n# tiffs = glob.glob('tiffs/*.tif')\n\nPROC_NUM = 8 # how many processes to spawn\n#############################################################################\n\n\ndef process_data(queue1, queue2, barrier, images, n_features, n_nonzeros,\n n_iterations):\n # Data\n total_samples = []\n for i in range(images.shape[0]):\n samples = images[i, :, :]\n size_x = samples.shape[0]\n size_y = samples.shape[1]\n pad_x = (0, (n_features - size_x % n_features) % n_features)\n pad_y = (0, (n_features - size_y % n_features) % n_features)\n samples = np.pad(samples, (pad_x, pad_y),\n 'constant', constant_values=(0, 0))\n size_x = samples.shape[0]\n size_y = samples.shape[1]\n samples = view_as_blocks(samples, block_shape=(n_features, n_features))\n # (n_samples, n_features, n_features)\n samples = samples.reshape(int(size_x * size_y / n_features ** 2),\n n_features, n_features)\n # print(samples.max())\n for m in range(samples.shape[0]):\n samples[m] = samples[m] - samples[m].mean()\n if i == 0:\n total_samples = samples\n else:\n total_samples = np.concatenate((total_samples, samples), axis=0)\n\n del (samples)\n # print(total_samples.shape)\n\n n_samples = total_samples.shape[0]\n Q1 = dct(np.eye(n_components), norm='ortho', axis=0)\n Q2 = dct(np.eye(n_components), norm='ortho', axis=0)\n codes = np.zeros((total_samples.shape[0], Q1.shape[0], Q2.shape[0]))\n for m in range(n_samples):\n aux = Q1.T @ total_samples[m] @ Q2\n aux = aux.reshape(aux.size)\n ind_sort = np.argsort(np.abs(aux))[::-1]\n aux[ind_sort[n_nonzeros + 1:]] = 0\n codes[m] = aux.reshape(codes.shape[1], codes.shape[2])\n\n # errs = np.zeros(n_iterations + 1, dtype=float)\n err = 0.0\n for m in range(n_samples):\n err += np.linalg.norm(total_samples[m] - Q1 @ codes[m] @ Q2.T, 'fro')\n # err = err / total_samples.shape[0]\n # send the error to master\n queue1.put(err)\n\n # get confirmation to continue\n valuee = queue2.get()\n # barrier.wait()\n\n # main loop\n for iter in range(n_iterations):\n # update useful matrices\n XXT = np.zeros((Q1.shape[0], Q1.shape[0]))\n XYT = np.zeros((Q1.shape[0], total_samples.shape[1]))\n for m in range(n_samples):\n XXT = XXT + codes[m] @ codes[m].T\n XYT = XYT + codes[m] @ Q2.T @ total_samples[m].T\n Z = XYT.T @ XXT\n # send partial results for the calculation of Q1\n # barrier.wait()\n queue1.put(Z)\n\n # get new Q1\n Q1 = queue2.get()\n\n # update useful matrices\n XTX = np.zeros((Q2.shape[0], Q2.shape[0]))\n XTY = np.zeros((Q2.shape[0], total_samples.shape[2]))\n for m in range(n_samples):\n XTX = XTX + codes[m].T @ codes[m]\n XTY = XTY + codes[m].T @ Q1.T @ total_samples[m]\n Z = XTY.T @ XTX\n # send partial results for the calculation of Q2\n queue1.put(Z)\n # barrier.wait()\n\n # get new Q2\n Q2 = queue2.get()\n\n # update representations\n for m in range(n_samples):\n aux = Q1.T @ total_samples[m] @ Q2\n aux = aux.reshape(aux.size)\n ind_sort = np.argsort(np.abs(aux))[::-1]\n aux[ind_sort[n_nonzeros + 1:]] = 0\n codes[m] = aux.reshape(codes.shape[1], codes.shape[2])\n\n # compute error\n err = 0.0\n for m in range(n_samples):\n err += np.linalg.norm(total_samples[m] - Q1@codes[m]@Q2.T, 'fro')\n # err = err / total_samples.shape[0]\n # send the error to master\n # print('')\n # print(n_samples)\n # print(type(err))\n # print(err)\n # print('')\n queue1.put(err)\n\n # get confirmation to continue\n valuee = queue2.get()\n # barrier.wait()\n\n\ndef save_test(n_components, n_procs, errs):\n fname = 'sepsum'\n with open('data/{0}-n{1}-p{2}.dat'.format(\n fname, n_components, n_procs), 'wb') as fp:\n pickle.dump(errs, fp)\n\n\nif __name__ == '__main__':\n # print(\"Number of cpus:\", multiprocessing.cpu_count())\n start_time = timeit.default_timer()\n # send data from master to workers\n queue1 = multiprocessing.Queue()\n # send data from workers to master\n queue2 = multiprocessing.Queue()\n\n # user input?\n if len(sys.argv) > 1:\n PROC_NUM = int(sys.argv[1])\n n_components = int(sys.argv[2])\n n_features = int(sys.argv[3])\n\n I = tifffile.imread(tiffs[0])\n images = I.shape[0]\n # we cannot have more processes than images\n PROC_NUM = np.min([PROC_NUM, images])\n barrier = multiprocessing.Barrier(PROC_NUM)\n\n # distribute images among processes as equitable as possible\n images_per_process = (np.ones(PROC_NUM, dtype=int) *\n int(np.ceil(images / PROC_NUM)))\n diff = PROC_NUM * int(np.ceil(images / PROC_NUM)) - images\n index = len(images_per_process) - 1\n while diff > 0:\n images_per_process[index] -= 1\n diff -= 1\n index -= 1\n grid = [0]\n for val in images_per_process:\n grid.append(grid[-1] + val)\n # print(grid)\n\n processes = []\n for i in range(len(grid) - 1):\n print(grid[i], grid[i + 1])\n t = multiprocessing.Process(target=process_data, args=(\n queue1, queue2, barrier, I[grid[i]:grid[i + 1], :, :],\n n_features, n_nonzeros, n_iterations))\n t.start()\n processes.append(t)\n\n errs = np.zeros(n_iterations + 1, dtype=float)\n # collect initialization errors\n values = []\n for i in range(PROC_NUM):\n values.append(queue1.get())\n # print(values)\n errs[0] = np.sum(values)\n\n # and then send confirmation to continue\n for i in range(PROC_NUM):\n queue2.put(1)\n\n # main loop\n for iter in range(n_iterations):\n Z = np.zeros((n_components, n_components))\n for i in range(PROC_NUM):\n aux = queue1.get()\n Z += aux\n\n # update Q1\n U, _, V = np.linalg.svd(Z)\n Q1 = U @ V\n # send new Q1 to everyone\n for i in range(PROC_NUM):\n queue2.put(Q1)\n\n Z = np.zeros((n_components, n_components))\n for i in range(PROC_NUM):\n aux = queue1.get()\n Z += aux\n\n # update Q2\n U, _, V = np.linalg.svd(Z)\n Q2 = U @ V\n # send new Q2 to everyone\n for i in range(PROC_NUM):\n queue2.put(Q2)\n\n # collect initialization errors\n values.clear()\n # print(values)\n for i in range(PROC_NUM):\n values.append(queue1.get())\n # v = queue1.get()\n # print(i, v)\n # values.append(v)\n # print(type(values))\n # print(values)\n errs[iter + 1] = np.sum(values)\n\n # and then send confirmation to continue\n for i in range(PROC_NUM):\n queue2.put(1)\n\n # wrap up processes\n for proc in processes:\n proc.join()\n\n elapsed = timeit.default_timer() - start_time\n print(\"elapsed\", elapsed)\n\n # Results\n # plt.title(\"RMSE evolution\")\n # plt.plot(range(n_iterations + 1), errs)\n # plt.show()\n save_test(n_components, PROC_NUM, errs)\n","sub_path":"Python/test-sepsum-parallel-hyperspectral.py","file_name":"test-sepsum-parallel-hyperspectral.py","file_ext":"py","file_size_in_byte":8845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"109190250","text":"# Imports\nimport sqlite3\nimport os\nimport pandas as pd\nimport numpy as np\n\n# Create filepath to csv file\nDATABASE_FILEPATH = os.path.join(os.path.dirname(__file__), \"buddymove_holidayiq.csv\")\n\n# Read in csv file to dataframe\nplace_review_df = pd.read_csv(DATABASE_FILEPATH)\n\n# Basic dataframe exploration\nprint(place_review_df.sample(5)) # Get sample of 5 random rows from dataframe\nprint(\"\\n\", place_review_df.shape) # Check shape of data\nprint(\"\\n\", place_review_df.isnull().sum()) # Check for null values\n\n# Rename User Id column to something more easily useable\nplace_review_df = place_review_df.rename(columns={'User Id' : \"UserId\"})\n\n# Convert dataframe to SQL\nplace_review_df.to_sql('buddymove_holidayiq.sqlite3', con=sqlite3.connect('buddymove_holidayiq.sqlite3'), if_exists='replace')\n\n# Instantiate connection\nconnection = sqlite3.connect('buddymove_holidayiq.sqlite3')\nconnection.row_factory = sqlite3.Row\nprint(\"\\n\", type(connection)) # Check connection type\n\n# Instantiate cursor\ncursor = connection.cursor()\nprint(type(cursor)) # Check cursor type\n\n\n\"\"\"--------------------------------------- SQL COMMAND CODE ---------------------------------------\"\"\"\n\n\n# SQL command code to select number of rows in data\nrow_count = \"\"\"\n SELECT\n\t count(UserId) as row_count\n\n FROM\n\t \"buddymove_holidayiq.sqlite3\"\n \"\"\"\n\n# SQL command code to select number of users who had Nature category review of at least 100 and Shopping category review of at least 100\nnature_shop_count = \"\"\"\n SELECT\n count(UserId) as high_nature_shop_count\n\n FROM\n \"buddymove_holidayiq.sqlite3\"\n\n WHERE\n Nature >= 100 AND\n Shopping >= 100\n \"\"\"\n\n# SQL command code to select the average number of reviews for each category\naverage_reviews = \"\"\"\n SELECT\n AVG(Sports) as avg_sports_review,\n AVG(Religious) as avg_religious_review,\n AVG(Nature) as avg_nature_review,\n AVG(Theatre) as avg_theatre_review,\n AVG(Shopping) as avg_shopping_review,\n AVG(Picnic) as avg_picnic_review\n\n FROM\n \"buddymove_holidayiq.sqlite3\"\n \"\"\"\n\n\n\"\"\"--------------------------------------- SQL EXECUTION CODE ---------------------------------------\"\"\"\n\n\n# Return row_count result from execution\ncount_result = cursor.execute(row_count).fetchall() # Execute row_count\n\n# Return number of users who had Nature category review of at least 100 and Shopping category review of at least 100\nnature_shop_result = cursor.execute(nature_shop_count).fetchall() # Execute nature_shop_count\n\n# Return average number of reviews for each category\naverage_reviews_result = cursor.execute(average_reviews).fetchall() # Execute average_reviews\n\n\n\"\"\"--------------------------------------- SQL RELAY CODE ---------------------------------------\"\"\"\n\n\n# Relay statement with number of rows in data\nfor row in count_result:\n print(f\"\\nNumber of rows: {row['row_count']}\")\n\n# Relay statement with number of people who had Nature category review of at least 100 and Shopping category review of at least 100\nfor row in nature_shop_result:\n print(f\"\\nThe Number of People Who Reviewed At Least 100 in Nature and Shopping categories: {row['high_nature_shop_count']}\")\n\n# Relay statement with average reviews for each category\nfor row in average_reviews_result:\n print(f\"\\nAverage Sports Review: {row['avg_sports_review']:.2f}\") # Average reviews for Sports\n print(f\"Average Religious Review: {row['avg_religious_review']:.2f}\") # Average reviews for Religious\n print(f\"Average Nature Review: {row['avg_nature_review']:.2f}\") # Average reviews for Nature\n print(f\"Average Theatre Review: {row['avg_theatre_review']:.2f}\") # Average reviews for Theatre\n print(f\"Average Shopping Review: {row['avg_shopping_review']:.2f}\") # Average reviews for Shopping\n print(f\"Average Picnic Review: {row['avg_picnic_review']:.2f}\") # Averge reviews for Picnic\n ","sub_path":"module1-introduction-to-sql/buddymove_holidayiq.py","file_name":"buddymove_holidayiq.py","file_ext":"py","file_size_in_byte":4230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"55293593","text":"# -*- coding: utf-8 -*-\r\nfrom unittest import mock\r\nimport unittest\r\n\r\nfrom bs4 import BeautifulSoup\r\n\r\nimport parsers\r\nfrom parsers.echo.nowadzielnica import NowaDzielnicaParser\r\nfrom parsers.vparsers import StatusParser\r\n\r\nfrom tests.parsers import ParserTestCaseMixin\r\n\r\n\r\n@mock.patch.object(parsers.echo.nowadzielnica.SingleRequestLoaderMixin, \"load\")\r\nclass NowaDzielnicaParserTest(ParserTestCaseMixin, unittest.TestCase):\r\n \r\n content = [\r\n BeautifulSoup(\r\n \"\"\"\r\n \r\n
\r\n \r\n \r\n NR | \r\n Nazwa | \r\n Piętro | \r\n Powierzchnia | \r\n Pokoje | \r\n Status | \r\n | \r\n | \r\n
\r\n \r\n \r\n \r\n | 1 | \r\n \r\n A.0.1\r\n | \r\n Parter | \r\n \r\n 07300\r\n 73.00 m²\r\n | \r\n 3 | \r\n \r\n Dostępne\r\n | \r\n Karta PDF\r\n Szczegóły\r\n | \r\n \r\n \r\n | \r\n
\r\n \r\n | 2 | \r\n \r\n A.0.2\r\n | \r\n Parter | \r\n \r\n 04000\r\n 40.00 m²\r\n | \r\n 2 | \r\n \r\n Dostępne\r\n | \r\n Karta PDF\r\n Szczegóły\r\n | \r\n \r\n \r\n | \r\n
\r\n \r\n
\r\n
\r\n \"\"\", \"lxml\")\r\n ]\r\n content_empty = [ BeautifulSoup(\"\", \"lxml\") ]\r\n \r\n parser = NowaDzielnicaParser\r\n records_count = 2\r\n test_record_index = 0\r\n test_record = {\r\n \"number\": \"A.0.1\",\r\n \"floor\": 0,\r\n \"area\": 73.00,\r\n \"rooms\": 3,\r\n \"status\": StatusParser.AVAILABLE,\r\n \"plan\": \"http://www.nowadzielnica.pl/pobierz,karta,karta_mieszkania.A.0.1.pdf\"\r\n }\r\n","sub_path":"tests/parsers/test_nowadzielnica.py","file_name":"test_nowadzielnica.py","file_ext":"py","file_size_in_byte":4256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"584242366","text":"#!/usr/bin/python3\n\nimport argparse\nimport default_nemesis_proto\nimport logging\nimport nemesis_pb2\nimport os\nimport sandbox\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\n\n\nclass Runner(object):\n def __init__(self,\n logger,\n exe_path,\n conf,\n src_path = None,\n check_path = None,\n custom = False,\n sandbox_path = None,\n generator = False):\n\n self.exe_path = exe_path\n self.conf = conf\n self.src_path = src_path\n self.check_path = check_path\n self.custom = custom\n self.logger = logger\n self.generator = generator\n\n self.run_dir = None\n self.run_exe = None\n self.run_conf = None\n self.run_in = None\n self.run_src = None\n self.run_out = None\n self.run_check = None\n self.run_log = None\n self.run_ans = None\n\n self.sandbox_path = '/usr/local/sbin/sandbox'\n\n if sandbox_path:\n self.sandbox_path = sandbox_path\n\n def __init_self_var__(self):\n self.logger.info('Runner.__init_self_var__()')\n self.run_exe = os.path.join(self.run_dir, 'bin')\n self.run_in = os.path.join(self.run_dir, 'in')\n self.run_src = os.path.join(self.run_dir, 'src')\n self.run_out = os.path.join(self.run_dir, 'out')\n self.run_check = os.path.join(self.run_dir, 'check')\n self.run_log = os.path.join(self.run_dir, 'log')\n self.run_conf = os.path.join(self.run_dir, 'conf')\n self.run_ans = os.path.join(self.run_dir, 'ans')\n\n def __run_custom__(self):\n self.logger.info('Runner.__run_custom__()')\n status = None\n time = None\n memory = None\n out = None\n\n try:\n self.run_dir = tempfile.mkdtemp()\n self.logger.info('Runner.__run_custom__(): create {}'.format(self.run_dir))\n shutil.chown(self.run_dir, user='nobody', group='nogroup')\n\n self.__init_self_var__()\n\n self.logger.info('Runner.__run_custom__(): copy {} => {}'.format(self.exe_path, self.run_exe))\n\n shutil.copyfile(self.exe_path, self.run_exe)\n shutil.chown(self.run_exe, user='nobody', group='nogroup')\n os.chmod(self.run_exe, 0o744)\n\n self.logger.info('Runner.__run_custom__(): write input {}'.format(self.run_in))\n\n try:\n with open(self.run_in, \"wb\") as run_input:\n run_input.write(self.conf.input)\n except:\n raise RuntimeError('write input error')\n\n shutil.chown(self.run_in, user='nobody', group='nogroup')\n except:\n self.logger.error('Runner.__run_custom__(): shutil exception')\n shutil.rmtree(self.run_dir)\n raise RuntimeError('shutil exception')\n\n try:\n self.logger.info('Runner.__run_custom__(): subprocess run sandbox')\n self.logger.info('{}'.format([\n self.sandbox_path, '--exe', self.run_exe, '--input',\n self.run_in, '--output', self.run_ans, '--time_limit',\n str(self.conf.time_limit), '--memory_limit',\n str(self.conf.memory_limit), '--id',\n str(self.conf.id), '--log', self.run_log\n ]))\n proc = subprocess.run([\n self.sandbox_path, '--exe', self.run_exe, '--input',\n self.run_in, '--output', self.run_ans, '--time_limit',\n str(self.conf.time_limit), '--memory_limit',\n str(self.conf.memory_limit), '--id',\n str(self.conf.id), '--log', self.run_log\n ])\n except:\n self.logger.error('Runner.__run_custom__(): sandbox subprocess exception')\n shutil.rmtree(self.run_dir)\n raise RuntimeError('sandbox subprocess exception')\n\n if proc.returncode != 0:\n self.logger.error('Runner.__run_custom__(): sandbox returncode: {}'.format(proc.returncode))\n shutil.rmtree(self.run_dir)\n raise RuntimeError('sandobx returncode != 0')\n\n result = default_nemesis_proto.default_Status_Group_Test()\n #result = nemesis_pb2.Status.Group.Test()\n\n try:\n with open(self.run_log, \"rb\") as result_file:\n result.ParseFromString(result_file.read())\n except:\n self.logger.error('Runner.__run_custom__(): parse log file exception')\n shutil.rmtree(self.run_dir)\n raise RuntimeError('parse log file exception')\n\n status = result.status\n time = result.time\n memory = result.memory\n\n try:\n with open(self.run_ans, \"rb\") as answer_file:\n if self.generator == True:\n out = answer_file.read()\n else:\n out = answer_file.read(256)\n except:\n self.logger.error('Runner.__run_custom__(): read answer file error')\n shutil.rmtree(self.run_dir)\n raise RuntimeError('read answer file error')\n\n shutil.rmtree(self.run_dir)\n\n return (status, time, memory, out)\n\n def __run_submit__(self):\n self.logger.info('Runner.__run_submit__()')\n try:\n self.run_dir = tempfile.mkdtemp()\n self.logger.info('Runner.__run_submit__(): create {}'.format(self.run_dir))\n shutil.chown(self.run_dir, user='nobody', group='nogroup')\n\n self.__init_self_var__()\n\n self.logger.info('Runner.__run_submit__(): copy {} => {}'.format(self.exe_path, self.run_exe))\n\n shutil.copyfile(self.exe_path, self.run_exe)\n shutil.chown(self.run_exe, user='nobody', group='nogroup')\n os.chmod(self.run_exe, 0o744)\n\n self.logger.info('Runner.__run_submit__(): write input: {}'.format(self.run_in))\n\n try:\n with open(self.run_in, 'wb') as run_input:\n run_input.write(self.conf.input)\n except:\n raise RuntimeError('write input error')\n\n shutil.chown(self.run_in, user='nobody', group='nogroup')\n\n self.logger.info('Runner.__run_submit__(): copy {} => {}'.format(self.src_path, self.run_src))\n\n shutil.copyfile(self.src_path, self.run_src)\n shutil.chown(self.run_src, user='nobody', group='nogroup')\n except:\n self.logger.error('Runner.__run_submit__(): shutil exception')\n shutil.rmtree(self.run_dir)\n raise RuntimeError('shutil exception')\n\n try:\n self.logger.info('Runner.__run_submit__(): subprocess run sandbox')\n proc = subprocess.run([\n self.sandbox_path, '--exe', self.run_exe, '--input',\n self.run_in, '--output', self.run_ans, '--time_limit',\n str(self.conf.time_limit), '--memory_limit',\n str(self.conf.memory_limit), '--id',\n str(self.conf.id), '--log', self.run_log\n ])\n except:\n self.logger.error('Runner.__run_submit__(): sandbox subprocess exception')\n shutil.rmtree(self.run_dir)\n raise RuntimeError('sandbox subprocess exception')\n\n self.logger.info('Runner.__run_submit__(): sandbox returncode: {}'.format(proc.returncode))\n if proc.returncode != 0:\n self.logger.error('Runner.__run_submit__(): sandbox returncode error: {}'.format(proc.returncode))\n shutil.rmtree(self.run_dir)\n raise RuntimeError('sandobx returncode != 0')\n\n result = default_nemesis_proto.default_Status_Group_Test()\n #result = nemesis_pb2.Status.Group.Test()\n\n try:\n with open(self.run_log, \"rb\") as result_file:\n result.ParseFromString(result_file.read())\n except:\n self.logger.error('Runner.__run_submit__(): parse log file exception')\n shutil.rmtree(self.run_dir)\n raise RuntimeError('parse log file exception')\n\n try:\n self.logger.info('Runner.__run_submit__(): write output => {}'.format(self.run_out))\n\n try:\n with open(self.run_out, \"wb\") as run_output:\n run_output.write(self.conf.output)\n except:\n raise RuntimeError('write output error')\n\n shutil.chown(self.run_out, user='nobody', group='nogroup')\n\n self.logger.info('Runner.__run_submit__(): copy {} => {}'.format(self.check_path, self.run_check))\n\n shutil.copyfile(self.check_path, self.run_check)\n shutil.chown(self.run_check, user='nobody', group='nogroup')\n os.chmod(self.run_check, 0o744)\n except:\n self.logger.error('Runner.__run_submit__(): shutil exception')\n shutil.rmtree(self.run_dir)\n raise RuntimeError('shutil exception')\n\n check_exit_code = None\n check_stdout = None\n check_stderr = None\n\n try:\n self.logger.info('Runner.__run_submit__(): sandbox run check')\n proc = sandbox.Sandbox(\n self.run_check, [\n '--src', self.run_src, '--input', self.run_in, '--output',\n self.run_out, '--answer', self.run_ans\n ],\n memory_limit = 1024 * 1024 * 1024,\n time_limit = 120,\n nobody = True,\n logger = self.logger)\n check_exit_code, check_stdout, check_stderr = proc.run()\n except:\n self.logger.error('Runner.__run_submit__(): sandbox run check error')\n shutil.rmtree(self.run_dir)\n raise RuntimeError('sandbox error')\n\n if check_exit_code == 0:\n result.verdict = True\n elif check_exit_code == 1:\n result.verdict = False\n else:\n self.logger.error('Runner.__run_submit__(): checker internal error')\n shutil.rmtree(self.run_dir)\n raise RuntimeError('checker error')\n\n shutil.rmtree(self.run_dir)\n result.id = self.conf.id\n return result\n\n def run(self):\n self.logger.info('Runner.run()')\n if self.conf.IsInitialized() == False:\n self.logger.error('Runner.run(): self.conf.IsInitialized() == False')\n raise RuntimeError('self.conf.IsInitialized() == False')\n if self.custom:\n return self.__run_custom__()\n return self.__run_submit__()\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Nemesis code runner\")\n parser.add_argument('--exe', dest=\"exe_path\", default=\"/dev/null\", help=\"exe file\")\n parser.add_argument('--input', dest=\"input_path\", default=\"/dev/null\", help=\"input file\")\n parser.add_argument('--memory', dest=\"memory_limit\", type=int, default=1024 * 32, help=\"memory limit\")\n parser.add_argument('--time', dest=\"time_limit\", type=int, default=1, help=\"time limit\")\n parser.add_argument('--src', dest=\"src_path\", default=None, help=\"source path\")\n parser.add_argument('--output', dest=\"output_path\", default=None, help=\"output path\")\n parser.add_argument('--check', dest=\"check_path\", default=None, help=\"check path\")\n parser.add_argument('--custom', dest=\"custom\", default=False, help=\"custom invocation\", action='store_true')\n parser.add_argument('--sandbox', dest=\"sandbox_path\", default=None, help=\"sandbox path\")\n\n args = parser.parse_args()\n \n conf = default_nemesis_proto.default_Task_Group_Test()\n conf.id = 1\n conf.time_limit = args.time_limit\n conf.memory_limit = args.memory_limit\n\n try:\n with open(args.input_path, 'rb') as input_file:\n conf.input = input_file.read()\n except:\n raise RuntimeError('read input error')\n\n if args.output_path:\n try:\n with open(args.output_path, 'rb') as output_file:\n conf.output = output_file.read()\n except:\n raise RuntimeError('read correct output error')\n\n logging.basicConfig(level = logging.INFO)\n\n logger = logging.getLogger('runner_logger')\n\n runner = Runner(\n exe_path = args.exe_path,\n conf = conf,\n src_path = args.src_path,\n check_path = args.check_path,\n custom = args.custom,\n sandbox_path = args.sandbox_path,\n logger = logger)\n\n print(runner.run())\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/python/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":12442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"297223371","text":"import requests as rq\nimport urllib.request\nimport urllib.parse as parse\n\nurl = \"http://apis.data.go.kr/B552584/RfidFoodWasteServiceNew/getTotalTimeList\"\nservice_key = \"6CA3mYDBH4MjNuGnfZf6cbLOODeOsdEEu5ufYjgPu8a18B%2FDoN9lifD3Q9XUOV33U8G5d%2FKlBpOGRIf1DptR7Q%3D%3D\"\nqueryParams = '?' + parse.urlencode({ parse.quote_plus('ServiceKey') : '서비스키' })\n\nres = rq.get(url, queryParams)\nprint(res)\nprint(res.status_code)\nif res.status_code == 200:\n print(\"[성공]\")\nelse:\n print(\"에러\")\n","sub_path":"calltest.py","file_name":"calltest.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"188129667","text":"#!/usr/bin/env python\n\n## Copyright (c) 2012 Aldebaran Robotics. All rights reserved.\n## Use of this source code is governed by a BSD-style license that can be\n## found in the COPYING file.\n\n\"\"\" Contains a class that holds diagram informations\n.. module:: node\n\"\"\"\n\nimport converter.node as node\nimport converter.xar_types as xar_types\n\n\nclass ActuatorKey(node.Node):\n \"\"\" Stores informations about ActuatorKey in the xar format\n \"\"\"\n\n def __init__(self, attrs):\n super(ActuatorKey, self).__init__(\"ActuatorKey\")\n\n # Attributes\n self.frame = attrs.getValue('frame')\n self.value = attrs.getValue('value')\n self.smooth = attrs.getValue('smooth')\n self.symmetrical = attrs.getValue('symmetrical')\n\n # Elements\n self.tangents = []\n\n self._function_map = {'Tangent': ActuatorKey.attach_tangent}\n\n def attach_tangent(self, attrs):\n self.tangents.append(xar_types.tangent(attrs.getValue(\"side\"),\n attrs.getValue(\"interpType\"),\n attrs.getValue(\"abscissaParam\"),\n attrs.getValue(\"ordinateParam\")))\n\n def beacon(self):\n return \"Key\"\n","sub_path":"xarconverter/converter/node/actuator_key.py","file_name":"actuator_key.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}