diff --git "a/3532.jsonl" "b/3532.jsonl" new file mode 100644--- /dev/null +++ "b/3532.jsonl" @@ -0,0 +1,634 @@ +{"seq_id":"644406453","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 12 16:48:43 2020\n\n@author: Espen\n\"\"\"\n\nimport matplotlib.pyplot as plt\nplt.rcParams[\"figure.figsize\"]=(7,4)\n\n\nfile = open(\"C:/Users/Espen/OneDrive – NTNU/TFY4235 Numfys/Assignment 2/Results/Task 9/MetadataMultisys0.01-5.0-100avgs.txt\",\"r\")\n\ndtMult = []\ntauMult = []\ndeltaUMult = []\nkBTMult = []\nendPosMult = []\ndriftVelsMult = []\ndegeneracy = int(file.readline().split()[-1])\n\nfor line in file.readlines():\n ll = line.split()\n dtMult.append(float(ll[1]))\n tauMult.append(float(ll[2]))\n deltaUMult.append(float(ll[6]))\n kBTMult.append(float(ll[7]))\n endPosMult.append(float(ll[8]))\n driftVelsMult.append(float(ll[9]))\n\nfile.close()\n\navgDriftVel = sum(driftVelsMult)/len(driftVelsMult)\n\nprint(avgDriftVel, \"um/s\")\n##For plotting the average drift velocities as a function of tau\n\n##Average over the degeneracy\nfig = plt.figure()\ndriftVelAvg = []\n#degeneracy = 100 #How many trials per choice of tau? \ntempSum = driftVelsMult[0]\nfor i in range(1, len(tauMult)+1):\n if i%degeneracy != 0:\n tempSum += driftVelsMult[i]\n elif i%degeneracy ==0:\n driftVelAvg.append((tempSum/degeneracy))\n if i < len(tauMult):\n tempSum = driftVelsMult[i]\n\nz = tauMult[::degeneracy]\n\n\nprint(f\"Max velocity is {max(driftVelAvg)} µm/s, which occurs at tau = {z[driftVelAvg.index(max(driftVelAvg))]}\")\n\nplt.plot(z,driftVelAvg)\nplt.title(\"Drift velocity vs. time periodicity of ratchet\")\nplt.xlabel(r\"$\\tau$ [s]\")\nplt.ylabel(r\"$\\langle v_{drift} \\rangle$ [µm/s]\")\n\nplt.show()\n","sub_path":"Assignment 2/MultisysPlot.py","file_name":"MultisysPlot.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"433441876","text":"import signal\nfrom argparse import ArgumentParser\n\nfrom wom.corpgen import ConfigMap, DEFAULT_SETTINGS, CorpusGenerator\nfrom wom.util import basic_logger as log\n\n\ndef main():\n\n # handle interrupts\n def sigint_handler(input_signal, frame):\n log.info(\"received stop signal SIGINT\")\n cg.stop()\n\n # register sigint handler\n signal.signal(signal.SIGINT, sigint_handler)\n\n # set up an command line option parser\n parser = ArgumentParser()\n parser.add_argument(\n \"--config\",\n required=True,\n action=\"store\",\n dest=\"config_file\",\n metavar=\"FILE\",\n help=\"specify a path to a configuration\",\n )\n\n # parse command line options\n args = parser.parse_args()\n\n # create a central configuration source\n config = ConfigMap()\n # apply default configurations\n config.apply_map(DEFAULT_SETTINGS)\n # apply input configuration\n config.apply_file(args.config_file)\n\n # initialize and run the corpus generator\n cg = CorpusGenerator(config)\n cg.start()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"wom/corpgen/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"543264794","text":"#https://www.jianshu.com/p/d9f1a5d15de8\nimport requests\nimport json\nimport time\n\nwith open(r'C:\\Users\\Administrator\\Desktop\\xag\\xag.csv','w+',encoding='utf-8') as f:\n for a in range(3):\n url = \"https://movie.douban.com/j/new_search_subjects?sort=T&range=0,10&tags=%E5%8A%B1%E5%BF%97&start={}\".format(20*3)\n #这里跟之前的不一样,因为返回的是 json 文件\n #之前我们用的 .text 是需要网页返回文本的信息,而这里返回的是 json文件所以用 .json()\n file = requests.get(url).json()\n time.sleep(2)\n # print(len(file['data']))\n for index in range(20):\n #取出字典中 'data' 下第 [i] 部电影的信息;取出字典中的值,需要在方括号中指明值对应的键\n dict = file['data'][index]\n #电影链接\n href = dict['url']\n #标题\n title = dict['title']\n #评分\n rate=dict['rate']\n #主演\n cast=dict['casts']\n #因为有多名演员,这里用了 join() 函数,在字符串中间加入空格分隔。\n# print('{} {} {} {}\\n'.format(title,rate,' '.join(cast),url))\n# f.write('{} {} {} {}\\n'.format(title,rate,' '.join(cast),url))\n f.write('{},{},{},{}\\n'.format(title,rate,' '.join(cast),url))\n \nprint('写入完成')\n \n ","sub_path":"Python/爬虫/DEMO/爬虫基本7课【xpath】/example7 爬JSON数据网站并保存到本地.py","file_name":"example7 爬JSON数据网站并保存到本地.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"25653525","text":"\"\"\"\nThis file is part of Buttonwood.\n\nButtonwood is a python software package created to help quickly create, (re)build, or\nanalyze markets, market structures, and market participants. \n\nMIT License\n\nCopyright (c) 2016-2020 Peter F. Nabicht\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nFAR = 1\nFOK = 2\nFAK = 3\n\nTIME_IN_FORCE_STRINGS = {FAR: \"FAR\",\n FOK: \"FOK\",\n FAK: \"FAK\",\n }\n\nTIME_IN_FORCE_STR_TO_INT = {v: k for k, v in TIME_IN_FORCE_STRINGS.items()}\n\ndef time_in_force_str(int_id):\n assert isinstance(int_id, int)\n if int_id not in TIME_IN_FORCE_STRINGS:\n raise Exception(\"%d is an unknown time in force identifier\")\n return TIME_IN_FORCE_STRINGS[int_id]\n\nLIMIT = 100\nMARKET = 101\n\nORDER_TYPE_STRINGS = {LIMIT: \"Limit\",\n MARKET: \"Market\",\n }","sub_path":"buttonwood/MarketObjects/Events/OrderEventConstants.py","file_name":"OrderEventConstants.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"532641639","text":"import pandas as pd\r\nimport numpy as np\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\n\r\n# functions \r\ndef get_title_from_index(index):\r\n\treturn df[df.index == index][\"title\"].values[0]\r\n\r\ndef get_index_from_title(title):\r\n\treturn df[df.title == title][\"index\"].values[0]\r\n\r\n\r\n#Step 1: Read File\r\ndf=pd.read_csv(\"movie_dataset.csv\") \r\n\r\n\r\n#Step 2: Select Features\r\nfeatures = ['keywords','cast','genres','director']\r\nfor feature in features:\r\n\tdf[feature] = df[feature].fillna('')\r\n\r\n\r\n#Step 3:combining all features\r\ndef combine_features(row): \r\n\t\treturn row[\"keywords\"] +\" \"+row[\"cast\"] +\" \"+row[\"genres\"] +\" \"+row[\"director\"] \r\ndf[\"combined_features\"] = df.apply(combine_features,axis=1)\r\n\r\n\r\n#Step 4: extracting features from dataset\r\ncv=CountVectorizer()\r\ncount_matrix=cv.fit_transform(df[\"combined_features\"])\r\n\r\n#Step 5: using cosine similarity\r\ncosine_sim = cosine_similarity(count_matrix)\r\n\r\n\r\n# Step 6: Get index of this movie from its title\r\nmovie_user_likes = \"The Wood\"\r\nmovie_index = get_index_from_title(movie_user_likes)\r\n\r\n\r\n#Step 7: generate the similar movie matrix and sorted in descending order(similarity score)\r\nsimilar_movies = list(enumerate(cosine_sim[movie_index]))\r\nsorted_similar_movies = sorted(similar_movies,key=lambda x:x[1],reverse=True)\r\n\r\n\r\n#Step 8: printing the similar movies\r\ni=0\r\nprint(\"Top 10 similar movies to \"+movie_user_likes+\" are:\\n\")\r\nfor movie in sorted_similar_movies:\r\n\tprint(get_title_from_index(movie[0]))\r\n\ti=i+1\r\n\tif i>10:\r\n\t\tbreak\r\n\r\nprint(\"\\n\")\t\r\n","sub_path":"movie_recommendation.py","file_name":"movie_recommendation.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"95303198","text":"# -*- coding: utf-8 -*-\nimport httplib, urllib, json\nHOST_NAME = \"front-end\"\nPORT = 8079\ntest_count = 0\ntable_content = \"\"\n\ndef testResult(count,description,expected_value,actual_value):\n if(expected_value == actual_value):\n result = \"Passed\"\n class_result = \"table-success\"\n else:\n result = \"Failed\"\n class_result = \"table-danger\"\n content = \"%i%s%s%s%s\"%(class_result,count,description,expected_value,actual_value,result)\n return content\n\n# Test case 1 - Testing version URL\ntest_count = test_count+1\ndescription = \"/cart url should return 200 status code\"\nconn = httplib.HTTPConnection(HOST_NAME,PORT)\nconn.request(\"GET\",\"/cart\")\nresponse = conn.getresponse()\nresult_content = testResult(test_count,description,200,response.status)\ntable_content = table_content + result_content\nconn.close()\n\n# Test case 2 - Testing version URL\ntest_count = test_count+1\ndescription = \"/ url should return 200 status code\"\nconn = httplib.HTTPConnection(HOST_NAME,PORT)\nconn.request(\"GET\",\"/\")\nresponse = conn.getresponse()\nresult_content = testResult(test_count,description,200,response.status)\ntable_content = table_content + result_content\nconn.close()\n\n# Test case 2 - Testing version URL\n'''\ntest_count = test_count+1\ndescription = \"/api url should return name & version info\"\nconn = httplib.HTTPConnection(HOST_NAME,PORT)\nconn.request(\"GET\",\"/api\")\n\nexpected_value = [\"Version\",\"name\"]\n\nresponse = conn.getresponse()\nresponse_content = response.read()\njson_response = json.loads(response_content)\n\nresult_content = testResult(test_count,description,set(expected_value),set(json_response.keys()))\ntable_content = table_content + result_content\nconn.close()\n'''\n\n# Test case 3 - Testing version URL\ntest_count = test_count+1\ndescription = \"/catalogue?size=5 url should return 200 status code\"\nconn = httplib.HTTPConnection(HOST_NAME,PORT)\nconn.request(\"GET\",\"/catalogue?size=5\")\nresponse = conn.getresponse()\nresult_content = testResult(test_count,description,200,response.status)\ntable_content = table_content + result_content\nconn.close()\n\n\n\ncontent = '
\\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n '+table_content+'\\\n \\\n
#DescriptionExpectedActualResult
\\\n
\\\n \\\n\\\n'\n\nf = open(\"./functional-test-result/header.html\",\"r\")\nheader_content = f.read()\nf.close()\n\nf = open(\"./functional-test-result/index.html\", \"w+\")\nf.write(header_content)\nf.write(content)\nf.close()\n","sub_path":"functionalTest.py","file_name":"functionalTest.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"283692470","text":"import os\nimport asyncio\n\nfrom aiodns import DNSResolver\nfrom aiodns.error import DNSError\n\nloop = asyncio.get_event_loop()\nresolver = DNSResolver(loop=loop)\n\ndef get_entries(file):\n with open(file, \"r\") as entries:\n return entries.readlines()\n\ndef sort_entries(file):\n \"\"\"\n Sort the blacklist alphabetically. Keeps things nice and ordered.\n \"\"\"\n\n entries = get_entries(file)\n entries.sort()\n\n with open(file, \"w\") as new:\n new.writelines([\"{}\".format(entry) for entry in entries])\n\nasync def remove_dead_entries(entries):\n \"\"\"\n Remove any domains which don't have A (IPv4) or AAAA (IPv6) records.\n Could be written better...\n \"\"\"\n alive = []\n\n for entry in set(entries):\n ipv4 = True\n ipv6 = True\n\n try:\n ips = await resolver.query(entry.strip(), \"A\")\n except DNSError:\n ipv4 = False\n\n try:\n ips = await resolver.query(entry.strip(), \"AAAA\")\n except DNSError:\n ipv6 = False\n\n if ipv4 or ipv6:\n alive.append(entry)\n\n return alive\n\ndef remove_duplicates(file):\n \"\"\"\n Remove duplicate entries from a file.\n \"\"\"\n\n entries = get_entries(file)\n \n with open(file, \"w\") as new:\n for entry in set(entries):\n new.write(entry)\n\nasync def generate_ip_blacklist(entries):\n with open(\"release/ip-blacklist\", \"w\") as ip_blacklist:\n for entry in entries:\n try:\n ips = await resolver.query(entry.strip(), \"A\")\n ip_blacklist.writelines([\"{}\\n\".format(ip.host) for ip in ips])\n except Exception:\n pass\n\n try:\n ips = await resolver.query(entry.strip(), \"AAAA\")\n ip_blacklist.writelines([\"{}\\n\".format(ip.host) for ip in ips])\n except Exception:\n pass\n\ndef generate_cloaking_rules(entries):\n with open(\"release/cloaking-rules\", \"w\") as cloaking:\n cloaking.writelines([\"{} 0.0.0.0\\n\".format(entry.strip()) for entry in entries])\n\nasync def main():\n if not os.path.exists(\"release/\"):\n os.makedirs(\"release/\")\n\n entries = await remove_dead_entries(get_entries(\"blacklist\"))\n\n with open(\"blacklist\", \"w\") as blacklist:\n blacklist.writelines(entries)\n\n print(\"Generating IP blacklist.\")\n await generate_ip_blacklist(entries)\n\n print(\"Generating cloaking rules.\")\n generate_cloaking_rules(entries)\n\n print(\"Removing duplicates from blacklist.\")\n remove_duplicates(\"blacklist\")\n\n print(\"Removing duplicates from IP blacklist.\")\n remove_duplicates(\"release/ip-blacklist\")\n\n print(\"Removing duplicates from cloaking rules.\")\n remove_duplicates(\"release/cloaking-rules\")\n\n print(\"Sorting blacklist.\")\n sort_entries(\"blacklist\")\n\n print(\"Sorting IP blacklist.\")\n sort_entries(\"release/ip-blacklist\")\n\n print(\"Sorting cloaking rules.\")\n sort_entries(\"release/cloaking-rules\")\n\nloop.run_until_complete(main())","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":2998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"308066704","text":"from django.conf.urls import url\nfrom web import views\n\nurlpatterns = [\n url(r'^signin/', views.signin),\n url(r'^signout/', views.signout),\n url(r'^index/', views.index),\n url(r'^base/', views.base),\n url(r'^ajax/', views.ajax),\n url(r'^addfavor/', views.addfavor),\n url(r'^getreply', views.getreply),\n url(r'^submit_reply', views.submit_reply),\n url(r'^send_message/', views.send_message),\n ]\n","sub_path":"web/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"213472824","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"ANA\")\n\nprocess.load(\"SimGeneral.HepPDTESSource.pythiapdt_cfi\")\nprocess.load(\"Configuration.StandardSequences.Services_cff\")\nprocess.load(\"GeneratorInterface.HydjetInterface.hydjetDefault_cfi\")\nprocess.load('Configuration.StandardSequences.Generator_cff')\n\nprocess.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(100000)\n )\n\nprocess.source = cms.Source(\"EmptySource\")\n\nprocess.load(\"GeneratorInterface.PyquenInterface.pyquenDefault_cfi\")\n\nprocess.generator.doQuench = True\nprocess.generator.doRadiativeEnLoss = True\nprocess.generator.doCollisionalEnLoss = True\nprocess.generator.qgpInitialTemperature = 1\n\nprocess.generator.doIsospin = cms.bool(False)\nprocess.generator.comEnergy =cms.double(2760)\nprocess.generator.PythiaParameters.parameterSets = cms.vstring('pythiaUESettings','ppJets','kinematics')\nprocess.generator.PythiaParameters.kinematics = cms.vstring('CKIN(3) = 100','CKIN(4) = 9999')\n\nprocess.RandomNumberGeneratorService.generator.initialSeed = 5\n\nprocess.SimpleMemoryCheck = cms.Service('SimpleMemoryCheck',\n ignoreTotal=cms.untracked.int32(0),\n oncePerEventMode = cms.untracked.bool(False)\n )\n\nprocess.ana = cms.EDAnalyzer('HydjetAnalyzer'\n )\n\nprocess.dijet = cms.EDAnalyzer('DijetNtupleProducer')\n\nprocess.TFileService = cms.Service('TFileService',\n fileName = cms.string('Pyquen.root')\n )\n\n\nprocess.p1 = cms.Path(process.generator*process.hiGenParticles*process.hiGenJets*process.dijet*process.ana)\n\n\n\n\n","sub_path":"cmssw/QPythiaInterface/test/testPyquen.py","file_name":"testPyquen.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"466597646","text":"# ----------------------------------\n# Multiclass Prediction via Logistic Regression\n#\n# This class uses gradient descent to solve\n# a multiclass logistic regression problem\n#\n# Author: Gabe Mancino\n# ----------------------------------\n\n# Import necessary packages\n\nimport numpy as np\n\n\nclass MulticlassLogReg:\n '''\n INPUTS:\n x = Data matrix with ith data point as ith row\n y = Label vector with ith label in ith row\n w0 = Initial guess for weight matrix\n b0 = Initial guess for shift vector\n alpha = Constant stepsize (should be less than 0.25, I think?)\n T = Max iterations\n tol = Stopping tolerance (difference in previous loss to new loss)\n [lam1, lam2] = Regularization parameters for w and b, respectively\n num_classes = Number of classes for prediction\n\n OUTPUTS:\n losses = Vector of losses from each iteration\n w = \"Optimal\" w\n b = \"Optimal\" b\n '''\n\n # Initialize class with appropriate data and starting parameters\n def __init__(self, x, y, w0, b0, alpha, T, tol, lam1, lam2, num_classes):\n self.x = x\n self.y = y\n self.w0 = w0\n self.b0 = b0\n self.w = self.w0.copy()\n self.b = self.b0.copy()\n self.stepsize = alpha\n self.max_iters = T\n self.tol = tol\n self.lambdas = [lam1, lam2]\n self.num_classes = num_classes\n self.losses = []\n\n # Define softmax function for computing class probabilities\n def softmax(self, z):\n # Subtract max for numerical stability\n z -= np.max(z)\n sm = (np.exp(z).T / np.sum(np.exp(z), axis=1)).T\n return sm\n\n # Define prediction function to make life easier\n def predict(self, x, w, b):\n probability = self.softmax(np.dot(x, w) + b)\n prediction = np.argmax(probability, axis=1)\n return [prediction, probability]\n\n # One hot encoding to turn label vector in label matrix\n def one_hot_encoding(self, y, num_classes):\n ln = len(y)\n y = y.astype(int)\n Y = np.zeros((ln, num_classes))\n for i in range(ln):\n Y[i, y[i]] = 1\n return Y\n\n # Cross entropy loss function\n def Loss(self, probs, yhat, w, b, lam1, lam2):\n N = probs.shape[0]\n\n loss = (-1 / N) * np.sum(yhat * np.log(probs)) + (lam1 / 2) * np.linalg.norm(w, ord='fro') ** 2 + (\n lam2 / 2) * np.linalg.norm(b, ord=2) ** 2\n return loss\n\n # Define gradient with respect to w and b\n def grad(self, x, y, w, b, lam1, lam2, num_classes):\n\n # Encode data into matrix\n yhat = self.one_hot_encoding(y, num_classes)\n\n N = x.shape[0]\n\n scores = np.dot(x, w) + b\n probs = self.softmax(scores)\n\n # Frobenius norm loss\n loss = self.Loss(probs, yhat, w, b, lam1, lam2)\n\n gradw = (-1/N) * np.dot(x.T,(yhat - probs)) + lam1*w\n gradb = (-1/N) * (yhat - probs) + lam2*b\n\n return [gradw, gradb, loss]\n\n # Perform gradient descent with fixed stepsize\n def grad_descent(self):\n\n loss = 1\n\n for i in range(self.max_iters):\n\n lossOld = loss\n\n [gradw, gradb, loss] = self.grad(self.x, self.y, self.w, self.b, self.lambdas[0], self.lambdas[1], self.num_classes)\n\n self.losses.append(loss)\n\n self.w = self.w - self.stepsize * gradw\n self.b = self.b - self.stepsize * gradb\n\n # Check termination criteria based on function output\n if abs(lossOld - loss) <= self.tol:\n print('[INFO] Convergence criteria met. ', str(i), ' iterations were performed.')\n break\n\n self.b = self.b[0, :]\n\n return [self.losses, self.w, self.b]\n","sub_path":"Framework1/NumericalExperiments/MulticlassLogReg.py","file_name":"MulticlassLogReg.py","file_ext":"py","file_size_in_byte":3691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"234625366","text":"xgb_params = {\n 'regressor__min_child_weight':[2,4], \n 'regressor__gamma':[i/10.0 for i in range(2,7,2)], \n 'regressor__subsample':[i/10.0 for i in range(6,11,2)],\n 'regressor__colsample_bytree':[i/10.0 for i in range(6,11,2)], \n 'regressor__max_depth': [3, 5, 10],\n 'regressor__learning_rate': [0.01, 0.05, 0.1, 0.2]\n}\n\n\nlgb_params = {\n 'regressor__objective': ['regression'],\n \"regressor__boosting_type\": ['gbdt'],\n 'regressor__n_estimators': [100],\n \"regressor__max_depth\": [3, 7],\n 'regressor__subsample':[i/10.0 for i in range(6,11,2)],\n 'regressor__colsample_bytree':[i/10.0 for i in range(6,11,2)],\n \"regressor__learning_rate\": [0.01, 0.05, 0.1],\n \"regressor__num_leaves\": [5, 10],\n \"regressor__reg_alpha\": [0.01, 0.1, 0.5, 1.3],\n \"regressor__reg_lambda\": [0.01, 0.1, 0.5]\n}\n\n\nann_params = {\n 'regressor__activation': ['relu'],\n 'regressor__learning_rate' : ['adaptive'],\n 'regressor__solver': ['adam'],\n 'regressor__max_iter': [200, 1000],\n 'regressor__hidden_layer_sizes': [(20,5),(10,5),(5,5),(10,10),(10,5,5)],\n 'regressor__learning_rate_init': [0.01,0.05,0.1,0.2],\n 'regressor__alpha': [0.001, 0.01,0.1,0.3],\n}\n","sub_path":"code_new/models/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"566159518","text":"# -*- coding:utf8 -*-\nimport os\nimport pickle\nimport sys\nimport time\n\nfrom selenium import webdriver\n\n\ndef scraping(url, s_list):\n # wait設定\n pause = 3\n\n # Webドライバー作成\n browser = webdriver.PhantomJS(service_log_path=os.path.devnull)\n browser.implicitly_wait(pause)\n\n print(\"*\" * 20 + \" アクセス開始 \" + \"*\" * 20 + \"\\n\")\n browser.get(url)\n\n outupt_list = []\n for index, asin in enumerate(s_list):\n s_input = browser.find_element_by_id(\"_item_search_inp\")\n s_input.data_reset()\n s_input.send_keys(asin)\n\n # フォーム送信\n frm = browser.find_element_by_css_selector(\"#_graph_search_btn\")\n frm.click()\n time.sleep(pause)\n try:\n jan_str = browser.find_elements_by_xpath('//*[@id=\"main_contents\"]/div/ul[1]/li[2]/div[5]/span[2]')[0].text\n except IndexError as index_e:\n print(index_e)\n sys.exit()\n jan = jan_str[4:]\n print(\"{} : {} {}\".format(index, asin, jan))\n outupt_list.append([str(asin), jan])\n\n # 終了\n browser.close()\n\n # 念のため、シリアライズ\n with open('get_asin.pickle', mode='wb') as f:\n pickle.dump(outupt_list, f)\n\n # デシアライズ\n # with open('get_asin.pickle', mode='rb') as f:\n # load_data = pickle.load(f)\n # print(load_data)\n\n # 結果表示\n print(\"\\n\" + \"*\" * 20 + \" 結果表示 \" + \"*\" * 20 + \"\\n\")\n for ret in outupt_list:\n print(\"\\t\".join(ret))\n\n\nif __name__ == '__main__':\n URL = r\"https://mnrate.com/\"\n\n search_list = [\n 'B002BSHR6Y',\n ]\n\n scraping(URL, search_list)\n","sub_path":"asin_to_jan/get_jan.py","file_name":"get_jan.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"562503300","text":"from django.shortcuts import render\nfrom .models import Post, Contact\n\n\ndef hello_blog(request):\n list_post = Post.objects.all()\n data = {'posts': list_post} \n return render(request, 'index.html', data)\n\ndef post_detail(request, id):\n post = Post.objects.get(id=id)\n return render(request, 'post_detail.html', {'post': post})\n\ndef save_form(request):\n name=request.POST['name']\n Contact.objects.create(\n name=name,\n email=request.POST['email'],\n message=request.POST['message']\n )\n return render(request, 'contact_sucess.html', {'name_contact':name})","sub_path":"blog/website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"415974659","text":"from flask import Flask, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask import render_template, request, url_for,redirect,send_from_directory\nfrom sqlalchemy import *\nfrom models import *\nfrom app import *\nimport json\nimport os\nfrom werkzeug.security import generate_password_hash, check_password_hash\nimport sys, flask\n\nAPP_ROOT = os.path.dirname(os.path.abspath(__file__))\n# app = Flask(__name__)\n\n# app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:walakokahibaw@localhost/db'\n# app.config['SECRET_KEY'] = 'hard to guess string'\n# app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\n# db = SQLAlchemy(app)\n#retrieve data for parent,child and teacher\n\n@app.route('/api/user/', methods=['GET'])\ndef getoneuser(acc_id):\n user = Account.query.filter_by(acc_id=acc_id).first()\n if not user:\n return jsonify({'message': \"no user found\"})\n user_data = {}\n user_data['acc_id'] = user.acc_id\n user_data['username'] = user.username\n user_data['email'] = user.email\n user_data['acc_type'] = user.acc_type\n return jsonify({'user': user_data})\n\n@app.route('/api/teacher/', methods=['GET'])\ndef getinfoteacher(acc_id):\n user = Teacher.query.filter_by(acc_id=acc_id).first()\n if not user:\n return jsonify({'message': \"no user found\"})\n user_data = {}\n user_data['fname_t'] = user.fname_t\n user_data['lname_t'] = user.lname_t\n user_data['bday_t'] = user.bday_t\n user_data['specialty'] = user.specialty\n user_data['tel_num'] = user.tel_num\n user_data['add_t'] = user.add_t\n return jsonify({'user': user_data})\n\n@app.route('/api/parent/', methods=['GET'])\ndef getinfoparent(acc_id):\n user = Parent.query.filter_by(acc_id=acc_id).first()\n if not user:\n return jsonify({'message': \"no user found\"})\n user_data = {}\n user_data['fname_p'] = user.fname_p\n user_data['lname_p'] = user.lname_p\n user_data['bday_p'] = user.bday_p\n user_data['add_p'] = user.add_p\n return jsonify({'user': user_data})\n\n@app.route('/api/child/', methods=['GET'])\ndef getinfochild(c_id):\n user = Child.query.filter_by(c_id=c_id).first()\n if not user:\n return jsonify({'message': \"no user found\"})\n user_data = {}\n user_data['fname_c'] = user.fname_c\n user_data['lname_c'] = user.lname_c\n user_data['bday_c'] = user.bday_c\n user_data['diagnosis'] = user.diagnosis\n return jsonify({'user': user_data})\n\ndef token_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n token=None\n if 'x-access-token' in request.headers:\n token= request.headers['x-access-token']\n if not token:\n return jsonify({'message' : 'Token is missing'}),\n try:\n data = jwt.decode(token), app.config['SECRET_KEY']\n current_user = Account.query.filter_by(username=data['username']).first()\n except:\n return jsonify({'message': 'Token is invalid'}), 401\n return f(current_user, *args,**kwargs)\n return decorated\n\n #edit profile -----------------------------\n\n@app.route('/api/parent/editprofile/', methods=['POST']) #this api is for editing parent's profile \ndef update_parentinfo(acc_id):\n# @token_required\n Parent.query.filter_by(acc_id=int(acc_id)).first()\n data = request.get_json()\n\n output = Parent(fname_p = data['fname_p'], lname_p = data['lname_p'], bday_p = data['bday_p'], add_p = data['add_p'])\n\n output = db.session.merge(output)\n db.session.add(output)\n db.session.commit()\n return jsonify({'message' : 'success!'})\n\n\n@app.route('/api/child/editprofile/', methods=['POST']) #this api is for editing child's profile\ndef update_childinfo(c_id):\n# @token_required\n Child.query.filter_by(acc_id=int(c_id)).first()\n data = request.get_json()\n\n output = Child(fname_c = data['fname_c'], lname_c = data['lname_c'], bday_c = data['bday_c'], diagnosis = data['diagnosis'])\n\n output = db.session.merge(output)\n db.session.add(output)\n db.session.commit()\n return jsonify({'message' : 'success!'})\n\n@app.route('/api/teacher/editprofile/', methods=['POST']) #this api is for editing teacher's profile\ndef update_teacherinfo(c_id):\n# @token_required\n Teacher.query.filter_by(acc_id=int(c_id)).first()\n data = request.get_json()\n\n output = Teacher(fname_t = data['fname_t'], lname_t = data['lname_t'], bday_t = data['bday_c'], specialty = data['specialty'],tel_num = data['tel_num'], add_t = data['add_t'])\n\n output = db.session.merge(output)\n db.session.add(output)\n db.session.commit()\n return jsonify({'message' : 'success!'})\n\n@app.route('/api/signup', methods=['POST'])\ndef createuser():\n data = request.get_json()\n hashed_password = generate_password_hash(data['password'], method='sha256')\n new_acc = Account(acc_type=data['acc_type'], username = data['username'],email=data['email'], password = hashed_password)\n print(hashed_password)\n\n db.session.add(new_acc)\n db.session.commit()\n return jsonify({'message' : 'New user created.'})\n\n@app.route('/api/login', methods=['POST'])\ndef login_api():\n\n auth = request.authorization\n if not auth or not auth.username or not auth.password:\n return make_response('un authenticated', 401, {'WWW-Authenticate' : 'Login required'})\n user = Account.query.filter_by(username=auth.username).first()\n\n if not user:\n return jsonify('User not found', 401, {'WWW-Authenticate' : 'Login required'})\n\n if check_password_hash(user.password,auth.password):\n token = jwt.encode({'account_id': Account.acc_id, 'exp' : datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])\n return jsonify({'status': 'ok', 'token': token.decode('UTF-8')})\n return make_response('Could not verify', {'WWW-Authenticate' : 'Login required'})\n\n@app.after_request\ndef after_request(response):\n response.headers.add('Access-Control-Allow-Origin', 'http://127.0.0.1:5000')\n response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')\n response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')\n return response","sub_path":"app/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":6190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"486405017","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 2 10:29:59 2018\n\n@author: tcarroll\n\"\"\"\n\nnumbers = [26,54,93,17,77,0,31,44,55,20,80,3,20.7,-8]\nhigh = 0\ncount = 0\nlength=len(numbers)\n\nwhile count <= len(numbers)-1:\n \n \n for t in range (0, length-1):\n if t == 0:\n high = numbers[t]\n position = t\n elif numbers[t] > high:\n high = numbers[t]\n position = t\n \n temp = numbers[length-1]\n numbers[length-1] = high\n numbers[position] = temp\n \n count = count + 1\n length = length - 1\n \nprint(numbers)\n\n","sub_path":"sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"529465214","text":"from flask import Flask,render_template,request,url_for,session,redirect\nimport os\nimport MySQLdb\nfrom DB.database import Database\n\napp = Flask(__name__)\n\napp.secret_key = os.urandom(24)\n\n@app.route(\"/\")\n@app.route(\"/home\")\ndef index():\n\treturn render_template('index.html')\n\n@app.route(\"/projects\")\ndef projects():\n\treturn render_template('projects.html')\n\n@app.route(\"/signin\",methods=['GET',\"POST\"])\ndef signin():\n\tif request.method == 'GET':\n\t\t#show the sign in form \n\t\treturn render_template('signin.html',error=False)\n\telif request.method == 'POST':\n\t\t#user has already submitted the form , validate and redirect accordingly.\n\t\tusername = request.form['username']\n\t\tpassword = request.form['password']\n\t\t\n\t\tif username == \"diljit\" and password == \"diljit123\":\n\t\t\tsession['username'] = username\n\t\t\treturn redirect(url_for('welcome'))\n\t\telse:\n\t\t return render_template('signin.html',error=True)\n\telse:\n\t\treturn render_template('signin.html')\t \n\t\t\n\t\t\n\n@app.route(\"/gallery\")\ndef gallery():\t\n\treturn render_template('gallery.html')\n\n@app.route(\"/gallery/createAlbum\",methods=['GET','POST'])\ndef createAlbum():\n\tif request.method == 'GET':\n\t\treturn render_template('createAlbum.html')\n\t#else:\n\t\t#create an album in database corresponding to the data\n\t\t#get the album id\n\t\t#return redirect(url_for('uploadAlbumPhotos',albumId=albumId)\n\n@app.route(\"/gallery/createAlbum/uploadAlbumPhotos\")\ndef uploadAlbumPhotos():\n\t#code for uploading album photos\n\treturn \"uploading photos\"\n\n@app.route(\"/contact\")\ndef contact():\t\n\treturn render_template('contact.html')\n\n@app.route(\"/welcome\")\ndef welcome():\n\tif 'username' in session:\n\t\treturn render_template('welcome.html')\n\telse:\n\t\treturn redirect(url_for('signin'))\n\treturn\t\n\n@app.route(\"/logout\")\ndef logout():\n\tsession.pop('username', None)\n\tredirect(url_for('index'))\n\n@app.route(\"/test\")\ndef test():\n\tconn = Database()\n\tqueryResult = conn.read(\"SELECT * FROM sample\")\n\treturn str(queryResult)\n\n\n\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"380495606","text":"import numpy as np\nimport cv2\nimport os\nimport yaml\n\n\ndef get_image_paths(directory):\n return [x.path for x in os.scandir(directory) if x.name.endswith(\".jpg\") or x.name.endswith(\".png\")]\n\n\ndef load_images(image_paths, convert=None):\n iter_all_images = (cv2.resize(cv2.imread(fn), (256,256)) for fn in image_paths)\n if convert:\n iter_all_images = (convert(img) for img in iter_all_images)\n for i,image in enumerate( iter_all_images ):\n if i == 0:\n all_images = np.empty((len(image_paths),) + image.shape, dtype=image.dtype)\n all_images[i] = image\n return all_images\n\n\ndef get_transpose_axes( n ):\n if n % 2 == 0:\n y_axes = list(range(1, n-1, 2))\n x_axes = list(range(0, n-1, 2))\n else:\n y_axes = list(range(0, n-1, 2))\n x_axes = list(range(1, n-1, 2))\n return y_axes, x_axes, [n-1]\n\n\ndef stack_images(images):\n images_shape = np.array(images.shape)\n new_axes = get_transpose_axes(len(images_shape))\n new_shape = [np.prod(images_shape[x]) for x in new_axes]\n return np.transpose(\n images,\n axes = np.concatenate(new_axes)\n ).reshape(new_shape)\n\n\ndef get_G(test_A, test_B, path_A, path_B, batchSize):\n figure_A = np.stack([\n test_A,\n np.squeeze(np.array([path_A([test_A[i:i+1]]) for i in range(test_A.shape[0])])),\n np.squeeze(np.array([path_B([test_A[i:i+1]]) for i in range(test_A.shape[0])])),\n ], axis=1 )\n figure_B = np.stack([\n test_B,\n np.squeeze(np.array([path_B([test_B[i:i+1]]) for i in range(test_B.shape[0])])),\n np.squeeze(np.array([path_A([test_B[i:i+1]]) for i in range(test_B.shape[0])])),\n ], axis=1 )\n\n figure = np.concatenate([figure_A, figure_B], axis=0)\n figure = figure.reshape((4,batchSize//2) + figure.shape[1:])\n figure = stack_images(figure)\n figure = np.clip((figure + 1) * 255 / 2, 0, 255).astype('uint8')\n return figure\n\n\ndef get_G_mask(test_A, test_B, path_A, path_B, batchSize):\n figure_A = np.stack([\n test_A,\n (np.squeeze(np.array([path_A([test_A[i:i+1]]) for i in range(test_A.shape[0])])))*2-1,\n (np.squeeze(np.array([path_B([test_A[i:i+1]]) for i in range(test_A.shape[0])])))*2-1,\n ], axis=1 )\n figure_B = np.stack([\n test_B,\n (np.squeeze(np.array([path_B([test_B[i:i+1]]) for i in range(test_B.shape[0])])))*2-1,\n (np.squeeze(np.array([path_A([test_B[i:i+1]]) for i in range(test_B.shape[0])])))*2-1,\n ], axis=1 )\n\n figure = np.concatenate([figure_A, figure_B], axis=0)\n figure = figure.reshape((4,batchSize//2) + figure.shape[1:])\n figure = stack_images(figure)\n figure = np.clip((figure + 1) * 255 / 2, 0, 255).astype('uint8')\n return figure\n\n\ndef show_loss_config(loss_config):\n \"\"\"\n Print out loss configuration. Called in loss function automation.\n \n Argument:\n loss_config: A dictionary. Configuration regarding the optimization.\n \"\"\"\n for config, value in loss_config.items():\n print(f\"{config} = {value}\")\n\n\ndef make_html(filesets,img_dir,step_count):\n # this function makes a HTML file showing all the generated images in a webpage\n index_path = os.path.join(img_dir, \"index.html\")\n if os.path.exists(index_path):\n index = open(index_path, \"a\")\n else:\n index = open(index_path, \"w\")\n index.write(\"\")\n index.write(\"\")\n\n index.write(\"\")\n index.write(\"\" % step_count)\n for kind in range(3):\n index.write(f\"\")\n index.write(\"\")\n return index_path\n\n\ndef save_preview_image(test_A, test_B, \n path_A, path_B, \n path_bgr_A, path_bgr_B,\n path_mask_A, path_mask_B, \n batchSize, save_fn=\"preview.jpg\"):\n figure_A = np.stack([\n test_A,\n np.squeeze(np.array([path_bgr_B([test_A[i:i+1]]) for i in range(test_A.shape[0])])),\n (np.squeeze(np.array([path_mask_B([test_A[i:i+1]]) for i in range(test_A.shape[0])])))*2-1,\n np.squeeze(np.array([path_B([test_A[i:i+1]]) for i in range(test_A.shape[0])])),\n ], axis=1 )\n figure_B = np.stack([\n test_B,\n np.squeeze(np.array([path_bgr_A([test_B[i:i+1]]) for i in range(test_B.shape[0])])),\n (np.squeeze(np.array([path_mask_A([test_B[i:i+1]]) for i in range(test_B.shape[0])])))*2-1,\n np.squeeze(np.array([path_A([test_B[i:i+1]]) for i in range(test_B.shape[0])])),\n ], axis=1 )\n\n figure = np.concatenate([figure_A, figure_B], axis=0)\n figure = figure.reshape((4,batchSize//2) + figure.shape[1:])\n figure = stack_images(figure)\n figure = np.clip((figure + 1) * 255 / 2, 0, 255).astype('uint8')\n cv2.imwrite(save_fn, figure) \n\n\ndef load_yaml(path_configs):\n with open(path_configs, 'r') as f:\n return yaml.load(f) \n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"164811065","text":"from django.http import HttpResponseRedirect, HttpResponseNotFound, HttpResponse\nfrom django.shortcuts import render\nfrom .models import *\nfrom .forms import *\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.contrib import messages\nfrom django.urls import reverse\nfrom django.contrib.auth.decorators import permission_required,login_required,user_passes_test\nfrom django.db.models import Count, Case, When, IntegerField, FloatField, F, Sum, DecimalField, Subquery, OuterRef \nfrom django.db.models.functions import Cast, Coalesce\n\n@user_passes_test(lambda u: u.is_superuser)\ndef document_add(request, typeDoc, id):\n #check if this is a valid document\n if typeDoc not in('in','out'):\n return HttpResponseNotFound(\"page not found 404\")\n if id != 0:\n #check if this document exits \n if not Documents.objects.filter(pk = id).exists():\n return HttpResponseNotFound(\"page not found 404\")\n else:\n #check if this document is in the right type of document\n if not Documents.objects.filter(type = typeDoc).exists():\n return HttpResponseNotFound(\"page not found 404\")\n if request.method == 'POST':\n if int(id) == 0:\n form = DocumentsForm(request.POST)\n else:\n d = Documents.objects.get(pk=id)\n form = DocumentsForm(request.POST,instance=d)\n strType = typeDoc\n if 'documentsForm' in request.POST:\n ftype = form.save(commit=False)\n intID = 0\n strDescription = form.data['description']\n if strDescription.strip() ==\"\":\n messages.warning(request, 'add description of document')\n else:\n ftype.type = strType\n ftype.subType = 'inventory'\n ftype.user = request.user\n ftype.save()\n intID = ftype.pk\n messages.success(request, 'your document is saves')\n \n\n return HttpResponseRedirect(reverse('store:document_add', kwargs={'typeDoc' : typeDoc, 'id' : intID }))\n if 'documentsDForm' in request.POST:\n formD = DocumentsDetailsForm(request.POST)\n if int(id) == 0:\n messages.warning(request, 'save the document description before add a products!')\n return HttpResponseRedirect(request.META['HTTP_REFERER'])\n else:\n ddExtra = formD.save(commit=False)\n ddExtra.type = strType\n d = Documents.objects.get(pk=id)\n ddExtra.documents = d\n ddExtra.user = request.user\n ddExtra.save()\n return HttpResponseRedirect(request.META['HTTP_REFERER'])\n else:\n #form = DocumentsForm()\n form = None\n if int(id) != 0:\n d = Documents.objects.get(pk=id)\n form = DocumentsForm(instance=d)\n else:\n form = DocumentsForm() \n formD = DocumentsDetailsForm()\n #details\n documentd_list = DocumentsDetails.objects.filter(documents__pk = id)\n page = request.GET.get('page', 1)\n paginator = Paginator(documentd_list, 10)\n try:\n documentsd = paginator.page(page)\n except PageNotAnInteger:\n documentsd = paginator.page(1)\n except EmptyPage:\n documentsd = paginator.page(paginator.num_pages)\n\n return render(request, 'store/documentsadd.html', {'form': form, 'formD': formD, \"documentsd\": documentsd})\n@user_passes_test(lambda u: u.is_superuser)\ndef document(request, typeDoc):\n if typeDoc not in('in','out'):\n return HttpResponseNotFound(\"page not found 404\")\n document_list = Documents.objects.filter(type = typeDoc)\n page = request.GET.get('page', 1)\n paginator = Paginator(document_list, 10)\n try:\n documents = paginator.page(page)\n except PageNotAnInteger:\n documents = paginator.page(1)\n except EmptyPage:\n documents = paginator.page(paginator.num_pages)\n\n return render(request, 'store/documents.html', { 'documents': documents })\n \n#@permission_required()\n#@login_required\n@user_passes_test(lambda u: u.is_superuser)\ndef products_add(request, id):\n if request.method == 'POST':\n if int(id) == 0:\n form = ProductsForm(request.POST)\n else:\n d = Products.objects.get(pk=id)\n form = ProductsForm(request.POST,instance=d)\n if form.is_valid():\n p = Products.objects.filter(pk=id)\n if p.exists():\n for pp in p:\n logPrice = LogsPriceChange(user = request.user, price = pp.price,name = pp.name, products = pp)\n logPrice.save()\n\n ftype = form.save(commit=False)\n ftype.save()\n return HttpResponseRedirect(reverse('store:products_add', kwargs = {'id': ftype.pk }))\n else:\n form = None\n if int(id) != 0:\n d = Products.objects.get(pk=id)\n form = ProductsForm(instance=d)\n else:\n form = ProductsForm() \n\n logDocuments_list = LogsPriceChange.objects.filter(products__pk = id)\n page = request.GET.get('page', 1)\n paginator = Paginator(logDocuments_list, 10)\n try:\n logDocuments = paginator.page(page)\n except PageNotAnInteger:\n logDocuments = paginator.page(1)\n except EmptyPage:\n logDocuments = paginator.page(paginator.num_pages)\n\n return render(request, 'store/productsadd.html', {'form': form, 'logDocuments': logDocuments })\ndef products(request):\n product_list = None\n if request.method == 'POST':\n if 'btnLikes' in request.POST:\n if request.user.is_authenticated:\n __user =request.user\n __strID = request.POST.get(\"id\",'0')\n __product = Products.objects.get(pk= int(__strID))\n pl = ProductsLikes(user=__user, products = __product)\n pl.save()\n else:\n messages.info(request, 'logging before you like any of our products')\n if 'btnAddtoBasket' in request.POST:\n if request.user.is_authenticated:\n __user =request.user\n __strID = request.POST.get(\"id\",'0')\n __product = Products.objects.get(pk= int(__strID))\n __quantity = request.POST.get(\"quantity\",'0')\n #print(\"quantity: \"+__quantity)\n if __quantity.strip() == \"\":\n __quantity = \"0\"\n if not __quantity == \"0\":\n _tmpc = tempCar.objects.filter(user=__user, products = __product)\n if not _tmpc.exists():\n pl = tempCar(user=__user, products = __product, quantity = __quantity)\n pl.save()\n else:\n for t in _tmpc:\n t.quantity = t.quantity + float(__quantity)\n t.save()\n else:\n messages.info(request, 'add quantity')\n\n else:\n messages.info(request, 'logging before add to basket')\n\n\n # product_list = Products.objects.filter(description__contains = __strFilter)\n #else:\n # product_list = Products.objects.all()\n __strFilter = request.GET.get('search', '')\n __strsortby = request.GET.get('sortby', '')\n __strdir = request.GET.get('dir', '')\n product_list = Products.objects.annotate(likes = Count('productslikes')).filter(name__contains = __strFilter)\n #product_list = product_list.annotate(stock = Sum(F(\"documentsdetails__quantity\")))\n #product_list = product_list\n if __strsortby == 'name':\n if __strdir == \"desc\":\n __strsortby = \"-\" + __strsortby\n product_list = Products.objects.annotate(likes = Count('productslikes')).filter(name__contains = __strFilter).order_by(__strsortby)\n if __strsortby == 'likes':\n if __strdir == \"desc\":\n __strsortby = \"-\" + __strsortby\n\n product_list = Products.objects.annotate(likes = Count('productslikes')).filter(name__contains = __strFilter).order_by(__strsortby)\n\n page = request.GET.get('page', 1)\n \n paginator = Paginator(product_list, 10)\n try:\n products = paginator.page(page)\n except PageNotAnInteger:\n products = paginator.page(1)\n except EmptyPage:\n products = paginator.page(paginator.num_pages)\n\n return render(request, 'store/products.html', { 'products': products })\n\n@login_required\ndef my_basket(request):\n __user = request.user\n __strFilter = request.GET.get('search', '')\n __strsortby = request.GET.get('sortby', '')\n __strdir = request.GET.get('dir', '')\n if request.method == 'POST':\n if 'btnLikes' in request.POST:\n __strID = request.POST.get(\"id\",'0')\n __product = Products.objects.get(pk= int(__strID))\n pl = ProductsLikes(user=__user, products = __product)\n pl.save()\n elif 'btnDelete' in request.POST:\n __strID = request.POST.get(\"id\",'0')\n tmpc = tempCar.objects.get(pk = int(__strID))\n tmpc.delete()\n elif 'btnProcessCar' in request.POST:\n tcs = tempCar.objects.filter(user = __user)\n if tcs.exists():\n doc = Documents(user = __user, description = \"online sell\", type = 'out', subType = \"sell\" )\n doc.save()\n for tc in tcs:\n dd = DocumentsDetails()\n dd.type = \"out\"\n dd.price = tc.products.price\n dd.quantity = tc.quantity\n dd.documents = doc\n dd.user = request.user\n dd.products = tc.products\n dd.save()\n tmpc = tempCar.objects.filter(user = __user)\n for t in tmpc:\n tt = tempCar.objects.get(pk = t.pk)\n tt.delete()\n messages.success(request, 'Thanks for you purchase :)')\n else:\n messages.info(request, 'add products to your basket firt')\n \n car_list = tempCar.objects.annotate(likes = Count('products__productslikes'), cost = Cast(F('quantity') * F('products__price'), FloatField())).filter(user= __user, products__name__contains = __strFilter)\n\n if __strsortby == 'name':\n __strsortby = 'products__name'\n if __strdir == \"desc\":\n __strsortby = \"-\" + __strsortby\n car_list = tempCar.objects.annotate(likes = Count('products__productslikes'), cost = Cast(F('quantity') * F('products__price'), FloatField())).filter(user= __user,products__name__contains = __strFilter).order_by(__strsortby)\n if __strsortby == 'likes':\n if __strdir == \"desc\":\n __strsortby = \"-\" + __strsortby\n\n car_list = tempCar.objects.annotate(likes = Count('products__productslikes'), cost = Cast(F('quantity') * F('products__price'), FloatField())).filter(user= __user, products__name__contains = __strFilter).order_by(__strsortby)\n\n\n page = request.GET.get('page', 1)\n total = car_list.aggregate(Sum('cost'))['cost__sum']\n if total is None:\n total = 0.0\n paginator = Paginator(car_list, 10)\n try:\n products = paginator.page(page)\n except PageNotAnInteger:\n products = paginator.page(1)\n except EmptyPage:\n products = paginator.page(paginator.num_pages)\n\n return render(request, 'store/mybasket.html', { 'products': products , 'total': total })\n\n@login_required\ndef purchase_history(request):\n documentd_list = DocumentsDetails.objects.filter(user = request.user, type=\"out\")\n page = request.GET.get('page', 1)\n paginator = Paginator(documentd_list, 10)\n try:\n documentsd = paginator.page(page)\n except PageNotAnInteger:\n documentsd = paginator.page(1)\n except EmptyPage:\n documentsd = paginator.page(paginator.num_pages)\n return render(request, 'store/purchase_history.html', { \"documentsd\": documentsd})\ndef stock(request):\n product_list = None\n __strFilter = request.GET.get('search', '')\n __strsortby = request.GET.get('sortby', '')\n __strdir = request.GET.get('dir', '')\n product_list = Products.objects.filter(name__contains = __strFilter)\n product_list = product_list.annotate(\n stock_in_sum = Sum(Case(When(documentsdetails__type='in', then=F('documentsdetails__quantity')), output_field=DecimalField(), default=0)),\n stock_out_sum = Sum(Case(When(documentsdetails__type='out', then=F('documentsdetails__quantity')), output_field=DecimalField(), default=0))\n ).annotate(stock = F('stock_in_sum') - F('stock_out_sum'))\n _likes = ProductsLikes.objects.filter(products=OuterRef('pk')).values('products').annotate(count=Count('pk')).values('count')\n product_list = product_list.annotate(likes = Coalesce(Subquery(_likes),0)).filter(stock__gt = 0)\n if __strsortby == 'name':\n if __strdir == \"desc\":\n __strsortby = \"-\" + __strsortby\n product_list = Products.objects.filter(name__contains = __strFilter).order_by(__strsortby)\n product_list = product_list.annotate(\n stock_in_sum = Sum(Case(When(documentsdetails__type='in', then=F('documentsdetails__quantity')), output_field=DecimalField(), default=0)),\n stock_out_sum = Sum(Case(When(documentsdetails__type='out', then=F('documentsdetails__quantity')), output_field=DecimalField(), default=0))\n ).annotate(stock = F('stock_in_sum') - F('stock_out_sum'))\n _likes = ProductsLikes.objects.filter(products=OuterRef('pk')).values('products').annotate(count=Count('pk')).values('count')\n product_list = product_list.annotate(likes = Coalesce(Subquery(_likes),0)).filter(stock__gt = 0)\n if __strsortby == 'likes':\n if __strdir == \"desc\":\n __strsortby = \"-\" + __strsortby\n\n product_list = Products.objects.filter(name__contains = __strFilter).order_by(__strsortby)\n product_list = product_list.annotate(\n stock_in_sum = Sum(Case(When(documentsdetails__type='in', then=F('documentsdetails__quantity')), output_field=DecimalField(), default=0)),\n stock_out_sum = Sum(Case(When(documentsdetails__type='out', then=F('documentsdetails__quantity')), output_field=DecimalField(), default=0))\n ).annotate(stock = F('stock_in_sum') - F('stock_out_sum'))\n _likes = ProductsLikes.objects.filter(products=OuterRef('pk')).values('products').annotate(count=Count('pk')).values('count')\n product_list = product_list.annotate(likes = Coalesce(Subquery(_likes),0)).filter(stock__gt = 0)\n\n page = request.GET.get('page', 1)\n \n paginator = Paginator(product_list, 10)\n try:\n products = paginator.page(page)\n except PageNotAnInteger:\n products = paginator.page(1)\n except EmptyPage:\n products = paginator.page(paginator.num_pages)\n\n return render(request, 'store/stock.html', { 'products': products })","sub_path":"store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"611634532","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/5/4 4:55 PM\n# @Author : zhongch4g\n# @Site : \n# @File : wildcard_matching.py\n# @Software: IntelliJ IDEA\n\nclass Solution():\n def is_match(self, source, parttern):\n \"\"\"\n :param source:\n :param parttern:\n :return: True or False\n \"\"\"\n return self.is_correct_match_with_memo(source, 0, parttern, 0, {})\n\n # 返回source 的索引i之后的字符串是否和parttern 的索引j之后的模式相匹配\n def is_correct_match(self, source, i, parttern, j):\n # 匹配失败的情况\n # 如果source匹配完了而parttern还有\n if len(source) == i:\n for index in range(j, len(parttern)):\n if parttern[index] != \"*\":\n return False\n return True\n\n # 如果parttern匹配完了而source还有\n if len(parttern) == j:\n return False\n\n # 判断*和?两种情况\n if parttern[j] != \"*\":\n return self.is_match_single(source[i], parttern[j]) \\\n and self.is_correct_match(source, i + 1, parttern, j + 1)\n\n # * 匹配串 或者 *匹配空\n return self.is_correct_match(source, i + 1, parttern, j) \\\n or self.is_correct_match(source, i, parttern, j + 1)\n\n def is_correct_match_with_memo(self, source, i, parttern, j, memo):\n if (i, j) in memo:\n return memo[(i, j)]\n\n if len(source) == i:\n for index in range(j, len(parttern)):\n if parttern[index] != \"*\":\n return False\n return True\n\n if len(parttern) == j:\n return False\n\n if parttern[j] != \"*\":\n matched = self.is_match_single(source[i], parttern[j]) \\\n and self.is_correct_match(source, i + 1, parttern, j + 1)\n\n else:\n matched = self.is_correct_match(source, i + 1, parttern, j) \\\n or self.is_correct_match(source, i, parttern, j + 1)\n\n memo[(i, j)] = matched\n return matched\n\n def is_match_single(self, s, p):\n return s == p or p == \"?\"\n\nsolution = Solution()\nresult = solution.is_match(\"aabbbbbbb\", \"a*\")\nprint(result)","sub_path":"extra/wildcard_matching.py","file_name":"wildcard_matching.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"560692652","text":"from functools import reduce\nfrom operator import or_\n\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.db.models import Q\nfrom django.shortcuts import reverse\nfrom django.views.generic import DetailView, ListView, TemplateView\nfrom django.views.generic.edit import FormView\n\nfrom grandchallenge.products.forms import ImportForm\nfrom grandchallenge.products.models import Company, Product, Status\nfrom grandchallenge.products.utils import DataImporter\n\n\nclass ProductList(ListView):\n model = Product\n context_object_name = \"products\"\n queryset = Product.objects.filter(ce_status=Status.CERTIFIED).order_by(\n \"-verified\", \"-ce_verified\", \"product_name\"\n )\n\n def get_queryset(self):\n queryset = super().get_queryset().select_related(\"company\")\n subspeciality_query = self.request.GET.get(\"subspeciality\")\n modality_query = self.request.GET.get(\"modality\")\n ce_class_query = self.request.GET.get(\"ce_class\")\n fda_class_query = self.request.GET.get(\"fda_class\")\n search_query = self.request.GET.get(\"search\")\n self.product_total_all = queryset.count()\n\n if search_query:\n search_fields = [\n \"product_name\",\n \"subspeciality\",\n \"modality\",\n \"description\",\n \"key_features\",\n \"diseases\",\n \"distribution\",\n \"company__company_name\",\n ]\n q = reduce(\n or_,\n [\n Q(**{f\"{f}__icontains\": search_query})\n for f in search_fields\n ],\n Q(),\n )\n queryset = queryset.filter(q)\n\n if subspeciality_query and subspeciality_query != \"All\":\n queryset = queryset.filter(\n Q(subspeciality__icontains=subspeciality_query)\n )\n if modality_query and modality_query != \"All\":\n queryset = queryset.filter(Q(modality__icontains=modality_query))\n\n if ce_class_query and ce_class_query != \"All\":\n queryset = queryset.filter(Q(ce_class=ce_class_query))\n\n if (\n fda_class_query\n and fda_class_query != \"All\"\n and fda_class_query != \"No FDA\"\n ):\n queryset = queryset.filter(Q(fda_class=fda_class_query))\n elif fda_class_query == \"No FDA\":\n queryset = queryset.filter(Q(fda_class=\"\"))\n\n return queryset\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n subspeciality_query = self.request.GET.get(\"subspeciality\", \"All\")\n modality_query = self.request.GET.get(\"modality\", \"All\")\n ce_class_query = self.request.GET.get(\"ce_class\", \"All\")\n fda_class_query = self.request.GET.get(\"fda_class\", \"All\")\n search_query = self.request.GET.get(\"search\", \"\")\n subspecialities = [\n \"All\",\n \"Abdomen\",\n \"Breast\",\n \"Cardiac\",\n \"Chest\",\n \"MSK\",\n \"Neuro\",\n \"Other\",\n ]\n\n modalities = [\n \"All\",\n \"X-ray\",\n \"CT\",\n \"MR\",\n \"Ultrasound\",\n \"Mammography\",\n \"PET\",\n \"Other\",\n ]\n\n ce_classes = [\n \"All\",\n \"Class I\",\n \"Class IIa\",\n \"Class IIb\",\n \"Class III\",\n ]\n\n fda_classes = [\"All\", \"Class I\", \"Class II\", \"Class III\", \"No FDA\"]\n\n context.update(\n {\n \"q_search\": search_query,\n \"subspecialities\": subspecialities,\n \"modalities\": modalities,\n \"ce_classes\": ce_classes,\n \"fda_classes\": fda_classes,\n \"selected_subspeciality\": subspeciality_query,\n \"selected_modality\": modality_query,\n \"selected_ce_class\": ce_class_query,\n \"selected_fda_class\": fda_class_query,\n \"products_selected_page\": True,\n \"product_total\": context[\"object_list\"].count(),\n \"product_total_all\": self.product_total_all,\n }\n )\n return context\n\n\nclass ProductDetail(DetailView):\n model = Product\n\n\nclass CompanyList(ListView):\n model = Company\n context_object_name = \"companies\"\n queryset = Company.objects.order_by(\"company_name\")\n\n def get_queryset(self):\n queryset = super().get_queryset()\n search_query = self.request.GET.get(\"search\")\n\n if search_query:\n search_fields = [\"company_name\", \"description\", \"hq\"]\n q = reduce(\n or_,\n [\n Q(**{f\"{f}__icontains\": search_query})\n for f in search_fields\n ],\n Q(),\n )\n queryset = queryset.filter(q)\n return queryset\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n search_query = self.request.GET.get(\"search\", \"\")\n\n context.update(\n {\n \"q_search\": search_query,\n \"companies_selected_page\": True,\n \"company_total\": context[\"object_list\"].count(),\n }\n )\n return context\n\n\nclass CompanyDetail(DetailView):\n model = Company\n context_object_name = \"company\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n products_by_company = self.object.product_set.order_by(\n \"ce_status\", \"product_name\"\n )\n context.update({\"products_by_company\": products_by_company})\n\n return context\n\n\nclass AboutPage(TemplateView):\n template_name = \"products/about.html\"\n\n\nclass ContactPage(TemplateView):\n template_name = \"products/contact.html\"\n\n\nclass ImportDataView(PermissionRequiredMixin, FormView):\n template_name = \"products/import_data.html\"\n form_class = ImportForm\n permission_required = (\n f\"{Product._meta.app_label}.add_{Product._meta.model_name}\"\n )\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({\"user\": self.request.user})\n return kwargs\n\n def form_valid(self, *args, **kwargs):\n response = super().form_valid(*args, **kwargs)\n form = self.get_form()\n if form.is_valid():\n di = DataImporter()\n di.import_data(\n product_data=form.cleaned_data[\"products_file\"],\n company_data=form.cleaned_data[\"companies_file\"],\n images_zip=form.cleaned_data[\"images_zip\"][0].open(),\n )\n return response\n\n def get_success_url(self):\n return reverse(\"products:product-list\")\n","sub_path":"app/grandchallenge/products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"636655759","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom threading import Thread\n\nfrom django.http import HttpResponseRedirect\nfrom django.template.response import TemplateResponse\nfrom django.views.decorators.cache import never_cache\n\n\n# 本文件用于存储一些全站所需的公用函数\n\n# 直接渲染txt文件的函数,url中调用\n@never_cache\ndef textrender(request, textfile):\n return TemplateResponse(request, textfile, content_type=\"text/plain\")\n\n\n# 转行其他url,url中调用\ndef wlredirect(request, to_url):\n return HttpResponseRedirect(to_url)\n\n\n# 返回当前系统使用的主题\ndef get_theme():\n theme = \"wltheme\"\n try:\n from mainsys.config import WL_THEME\n if len(WL_THEME.strip()):\n theme = WL_THEME\n except ImportError:\n pass\n return theme\n\n\n# 根据数据类型进行转化,先全部转为字符串类型\n# 注意'true'或者'false'的字符串不能转换为'1'或'0'\ndef fieldtostr(field):\n if field is None:\n field = ''\n elif isinstance(field, int) or isinstance(field, float):\n field = str(field)\n elif isinstance(field, str):\n field = field.strip()\n elif isinstance(field, bool): # bool类型也转化为'0'或'1'\n field = str(int(field))\n return field\n\n\n# 将空类型转为字符串,并对字符串去空,用于导出json时\ndef kongtostr(field):\n if field is None:\n return ''\n elif isinstance(field, str):\n return field.strip()\n return field\n\n\n# 将1601数值转换为月份\ndef tmonthtostr(field):\n if field is None:\n return ''\n try:\n field = str(field)\n return \"%s年%s月\" % (field[:2], field[2:])\n except:\n return field\n\n\n# 限制函数的执行时间的装饰器\nclass TimeoutException(Exception):\n pass\n\n\nThreadStop = Thread._stop # 获取私有函数\n\n\ndef timelimited(timeout):\n def decorator(function):\n def decorator2(*args, **kwargs):\n class TimeLimited(Thread):\n def __init__(self, _error=None, ):\n Thread.__init__(self)\n self._error = _error\n\n def run(self):\n try:\n self.result = function(*args, **kwargs)\n except Exception as e:\n self._error = e\n\n def _stop(self):\n if self.isAlive():\n ThreadStop(self)\n\n t = TimeLimited()\n t.start()\n t.join(timeout)\n if isinstance(t._error, TimeoutException):\n t._stop()\n raise TimeoutException('timeout for %s' % (repr(function)))\n if t.isAlive():\n t._stop()\n raise TimeoutException('timeout for %s' % (repr(function)))\n if t._error is None:\n return t.result\n\n return decorator2\n\n return decorator\n","sub_path":"xyapps/utils/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"575821382","text":"import gym\nimport math\nimport random\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom collections import namedtuple\nfrom itertools import count\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torchvision.transforms as T\n\nfrom torch.nn import init, Parameter\nfrom torch.autograd import Variable\n\n\nTransition = namedtuple('Transition',('state', 'action', 'next_state', 'reward'))\n\nclass ReplayMemory(object):\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.memory = []\n self.position = 0\n\n def push(self, *args):\n \"\"\"Saves a transition.\"\"\"\n if len(self.memory) < self.capacity:\n self.memory.append(None)\n self.memory[self.position] = Transition(*args)\n self.position = (self.position + 1) % self.capacity\n\n def sample(self, batch_size):\n return random.sample(self.memory, batch_size)\n\n def __len__(self):\n return len(self.memory)\n\n\nclass DQN(nn.Module):\n def __init__(self, noisy, nodes, input_size=25, output_size=5):\n super(DQN, self).__init__()\n self.relu = nn.ReLU()\n if noisy == True:\n self.fc1 = NoisyLinear(input_size, nodes)\n self.fc_value = NoisyLinear(nodes, 16)\n self.fc_adv = NoisyLinear(nodes, 16)\n self.value = NoisyLinear(16, 1)\n self.adv = NoisyLinear(16, output_size)\n else:\n self.fc1 = nn.Linear(input_size, nodes)\n self.fc_value = nn.Linear(nodes, 16)\n self.fc_adv = nn.Linear(nodes, 16)\n self.value = nn.Linear(16, 1)\n self.adv = nn.Linear(16, output_size)\n\n def forward(self, state):\n y = self.relu(self.fc1(state))\n value = self.relu(self.fc_value(y))\n adv = self.relu(self.fc_adv(y))\n\n value = self.value(value)\n adv = self.adv(adv)\n\n # print('adv: ', adv)\n # print('value: ', value)\n\n advAverage = torch.mean(adv, dim=1, keepdim=True)\n Q = value + adv - advAverage\n\n #print('Q: ', Q)\n return Q\n\n def select_action(self, state):\n with torch.no_grad():\n Q = self.forward(state)\n action_index = torch.argmax(Q, dim=1)\n return action_index\n\n def sample_noise(self):\n self.fc1.sample_noise()\n self.fc_value.sample_noise()\n self.fc_adv.sample_noise()\n self.value.sample_noise()\n self.adv.sample_noise()\n\n\n\n# Noisy linear layer with independent Gaussian noise\nclass NoisyLinear(nn.Linear):\n def __init__(self, in_features, out_features, sigma_init=0.017, bias=True):\n super(NoisyLinear, self).__init__(in_features, out_features, bias=True)\n # µ^w and µ^b reuse self.weight and self.bias\n self.sigma_init = sigma_init\n self.sigma_weight = Parameter(torch.Tensor(out_features, in_features)) # σ^w\n self.sigma_bias = Parameter(torch.Tensor(out_features)) # σ^b\n self.register_buffer('epsilon_weight', torch.zeros(out_features, in_features))\n self.register_buffer('epsilon_bias', torch.zeros(out_features))\n self.reset_parameters()\n\n def reset_parameters(self):\n if hasattr(self, 'sigma_weight'): # Only init after all params added (otherwise super().__init__() fails)\n init.uniform(self.weight, -math.sqrt(3 / self.in_features), math.sqrt(3 / self.in_features))\n init.uniform(self.bias, -math.sqrt(3 / self.in_features), math.sqrt(3 / self.in_features))\n init.constant(self.sigma_weight, self.sigma_init)\n init.constant(self.sigma_bias, self.sigma_init)\n\n def forward(self, input):\n return F.linear(input, self.weight + self.sigma_weight * Variable(self.epsilon_weight), self.bias + self.sigma_bias * Variable(self.epsilon_bias))\n\n def sample_noise(self):\n self.epsilon_weight = torch.randn(self.out_features, self.in_features)\n self.epsilon_bias = torch.randn(self.out_features)\n\n def remove_noise(self):\n self.epsilon_weight = torch.zeros(self.out_features, self.in_features)\n self.epsilon_bias = torch.zeros(self.out_features)\n","sub_path":"DayAhead/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":4096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"626693939","text":"\n\ncurrenthighest = 0\nfor i in range(900,1000):\n for j in range(900,1000):\n tempval = j*i\n string = str(tempval)\n templist = []\n rev = []\n for a in string:\n templist.insert(0,a)\n for b in string:\n rev.insert(0,b)\n rev.reverse()\n if templist == rev:\n if currenthighest < tempval:\n currenthighest = tempval\nprint (currenthighest)\n","sub_path":"python3stuff/eulers/euler4.py","file_name":"euler4.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"387167014","text":"from csv import reader, DictReader\nfrom sys import argv, exit\n\n\ndef main():\n\n # Handle command line arguments\n if len(argv) != 3:\n print(\"Usage: python dna.py data.csv sequence.txt\")\n exit(1)\n\n db_path = argv[1]\n seq_path = argv[2]\n\n # Open csv file and convert to dict\n with open(db_path, \"r\") as csvfile:\n reader = DictReader(csvfile)\n dict_list = (list(reader))\n\n # Open sequences file and append to list\n with open(seq_path, \"r\") as file:\n sequence = file.read()\n seq_list = [char for char in sequence]\n\n # For each STR, compute longest run of consecutive repeats in sequence\n\n # Loop through each STR\n max_counts = []\n\n for i in range(1, len(reader.fieldnames)):\n STR = reader.fieldnames[i]\n max_counts.append(0)\n\n # Loop through sequence to find STR\n for j in range(len(sequence)):\n STR_count = 0\n\n # If match found, start counting repeats\n if sequence[j:(j + len(STR))] == STR:\n k = 0\n while sequence[(j + k):(j + k + len(STR))] == STR:\n STR_count += 1\n k += len(STR)\n # If new maximum of repeats, update max_counts\n if STR_count > max_counts[i - 1]:\n max_counts[i - 1] = STR_count\n\n # Compare against data\n for i in range(len(dict_list)):\n matches = 0\n for j in range(1, len(reader.fieldnames)):\n\n if int(max_counts[j - 1]) == int(dict_list[i][reader.fieldnames[j]]):\n matches += 1\n if matches == (len(reader.fieldnames) - 1):\n print(dict_list[i]['name'])\n exit(0)\n\n print(\"No match\")\n\n\nmain()","sub_path":"pset6/dna/dna.py","file_name":"dna.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"252226067","text":"from tx2.UserReg.models import RegisterUser\nfrom tx2.UserReg.DBFunctions import DBRegUserUpdate,DBRegUserInsert\nfrom tx2.CONFIG import LOGGER_UserReg\nimport logging\n\nclass UserRegFnx():\n\tdef __init__(self):\n\t\tself.Logger = logging.getlogger(LOGGER_UserReg)\n\t\t\n\tdef Create(self):\n\t\ttry:\n\t\t\tdetails = {\n\t\t\t\t\t'MetaInfo':MetaInfo,\n\t\t\t\t\t'Desc':Desc,\n\t\t\t\t\t'Users':Users,\n\t\t\t\t\t'Record':Record,\n\t\t\t\t\t'ContentType':ContentType,\n\t\t\t\t\t'Operation':Operation,\n\t\t\t\t\t'by':by,\n\t\t\t\t\t'ip':ip\n\t\t\t\t}\n\t\texcept:\n\t\t\tself.Logger.exception('[%s] == Exception =='%('Create'))\n\t\t\treturn (-1,'Error at business level functions while creating group')\n","sub_path":"tx2/UserReg/BusinessFunctions.py/UserRegFunctions.py","file_name":"UserRegFunctions.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"639460549","text":"#!/usr/bin/env python\n#title : rishicontest.py\n#description : The code below was written for the intention of utilizing FBOOK Graph API to monitor a facebook picture contest.\n#author : Anurag Singhal\n#date : 20150407\n#=======================================================================================================================\n\nimport urllib2\nimport json\nfrom matplotlib import pyplot as plt\nfrom matplotlib import style\nstyle.use('ggplot')\n# %matplotlib inline #use for Python Notebook \n\nucla=urllib2.urlopen('https://graph.facebook.com/10153269446483338/likes?summary=1') #input graph api code\nucb= urllib2.urlopen('https://graph.facebook.com/10153269445858338/likes?summary=1')\nucsd= urllib2.urlopen('https://graph.facebook.com/10153269446748338/likes?summary=1')\nucr=urllib2.urlopen('https://graph.facebook.com/10153269446023338/likes?summary=1')\nuci= urllib2.urlopen('https://graph.facebook.com/10153269446223338/likes?summary=1')\nucd=urllib2.urlopen('https://graph.facebook.com/10153269446218338/likes?summary=1')\nnorthw=urllib2.urlopen('https://graph.facebook.com/10153269445868338/likes?summary=1')\ncpp=urllib2.urlopen('https://graph.facebook.com/10153269445908338/likes?summary=1')\n\nchapters = ['UCLA', 'UCB', 'UCR', 'UCSD', 'UCI', 'NW', 'CPP', 'UCD'] #dictionary of x axis variables\n\n\nchapters_url=[ucla, ucb, ucr, ucsd, uci, northw, cpp, ucd] \nchapters_dat=[]\nchapters_jsondec=[]\ncomp_summary=[]\n\n\ndef get_json(photo_url):\n\t\"\"\"Function takes the url and reads it like a \n\tJSON \"\"\"\n\treturn photo_url.read()\n\ndef dec_json(dat_json):\n\t\"\"\"Function decodes the JSON so it can be accessed \n\tlike a dictionary\"\"\"\n\treturn json.loads(dat_json)\n\ndef get_summary(dat):\n\t\"\"\"Function calls on the summary of the picture in \n\tthis case the Total Number of Likes the picture has\"\"\"\n\treturn dat['summary']['total_count']\n\nfor chapter in chapters_url:\n\tchapters_dat.append(get_json(chapter)) \n#print (chapters_dat)\n\nfor data in chapters_dat:\n\tchapters_jsondec.append(dec_json(data))\n#print (chapters_jsondec)\n\nfor data in chapters_jsondec:\n\tcomp_summary.append(get_summary(data))\n#print(chapters_jsondec)\n\ncomp_update=dict(zip(chapters, comp_summary))\nplt.bar(range(len(comp_update)), comp_update.values(), color='#ff6701', align='center')\nplt.title('RISHI Competition Update')\nplt.ylabel('# of Likes')\nplt.xticks(range(len(comp_update)), comp_update.keys())\nplt.xlabel('Chapter')\n\nplt.show()\nprint (comp_update)","sub_path":"rishicontest.py","file_name":"rishicontest.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"344746530","text":"import numpy as np\r\nimport torchvision.models as models\r\nimport torchvision.datasets as dset\r\nimport os\r\nimport torch\r\nimport argparse\r\nimport random\r\nimport torchvision.transforms as transforms\r\nimport os, sys\r\nif sys.version_info[0] == 2:\r\n\timport cPickle as pickle\r\nelse:\r\n\timport pickle\r\nfrom PIL import Image\r\n\r\nparser = argparse.ArgumentParser(\"sota\")\r\nparser.add_argument('--gpu', type=str, default='0', help='set visible gpus')\r\nparser.add_argument('--data-path', type=str, default='data', help='the path of save directory')\r\nparser.add_argument('--dataset', type=str, default='cifar10', help='choose dataset')\r\nparser.add_argument('--seed', type=int, default=-1, help='random seed')\r\nargs = parser.parse_args()\r\n\r\nif args.seed is None or args.seed < 0: args.seed = random.randint(1, 100000)\r\n\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\r\nnp.random.seed(args.seed)\r\nrandom.seed(args.seed)\r\n\r\n# remove last fully-connected layer\r\nmodel = models.resnet18(pretrained=True).eval()\r\nfeature_extractor = torch.nn.Sequential(*list(model.children())[:-1])\r\n\r\n\r\ndef get_transform(dataset):\r\n\tif args.dataset == 'mnist':\r\n\t\tmean, std = [0.1307, 0.1307, 0.1307], [0.3081, 0.3081, 0.3081]\r\n\telif args.dataset == 'svhn':\r\n\t\tmean, std = [0.4376821, 0.4437697, 0.47280442], [0.19803012, 0.20101562, 0.19703614]\r\n\telif args.dataset == 'cifar10':\r\n\t\tmean = [x / 255 for x in [125.3, 123.0, 113.9]]\r\n\t\tstd = [x / 255 for x in [63.0, 62.1, 66.7]]\r\n\telif args.dataset == 'cifar100':\r\n\t\tmean = [x / 255 for x in [129.3, 124.1, 112.4]]\r\n\t\tstd = [x / 255 for x in [68.2, 65.4, 70.4]]\r\n\telif args.dataset == 'imagenet32':\r\n\t\tmean = [x / 255 for x in [122.68, 116.66, 104.01]]\r\n\t\tstd = [x / 255 for x in [66.22, 64.20, 67.86]]\r\n\r\n\ttransform = transforms.Compose([\r\n\t\ttransforms.Resize((32, 32)),\r\n\t\ttransforms.ToTensor(),\r\n\t\ttransforms.Normalize(mean, std),\r\n\t])\r\n\tif dataset == 'mnist':\r\n\t\ttransform.transforms.append(transforms.Lambda(lambda x: x.repeat(3, 1, 1)))\r\n\treturn transform\r\n\r\n\r\ndef process(dataset, n_classes):\r\n\tdata_label = {i: [] for i in range(n_classes)}\r\n\tfor x, y in dataset:\r\n\t\tdata_label[y].append(x)\r\n\tfor i in range(n_classes):\r\n\t\tdata_label[i] = torch.stack(data_label[i])\r\n\t\r\n\tholder = {i: [] for i in range(n_classes)}\r\n\tfor i in range(n_classes):\r\n\t\twith torch.no_grad():\r\n\t\t\tdata = feature_extractor(data_label[i])\r\n\t\t\tholder[i].append(data.squeeze())\r\n\treturn holder\r\n\r\n\r\n\r\nclass ImageNet32(object):\r\n\ttrain_list = [\r\n\t\t['train_data_batch_1', '27846dcaa50de8e21a7d1a35f30f0e91'],\r\n\t\t['train_data_batch_2', 'c7254a054e0e795c69120a5727050e3f'],\r\n\t\t['train_data_batch_3', '4333d3df2e5ffb114b05d2ffc19b1e87'],\r\n\t\t['train_data_batch_4', '1620cdf193304f4a92677b695d70d10f'],\r\n\t\t['train_data_batch_5', '348b3c2fdbb3940c4e9e834affd3b18d'],\r\n\t\t['train_data_batch_6', '6e765307c242a1b3d7d5ef9139b48945'],\r\n\t\t['train_data_batch_7', '564926d8cbf8fc4818ba23d2faac7564'],\r\n\t\t['train_data_batch_8', 'f4755871f718ccb653440b9dd0ebac66'],\r\n\t\t['train_data_batch_9', 'bb6dd660c38c58552125b1a92f86b5d4'],\r\n\t\t['train_data_batch_10', '8f03f34ac4b42271a294f91bf480f29b'],\r\n\t]\r\n\tvalid_list = [\r\n\t\t['val_data', '3410e3017fdaefba8d5073aaa65e4bd6'],\r\n\t]\r\n\t\r\n\tdef __init__(self, root, n_class, transform):\r\n\t\tself.transform = transform\r\n\t\tdownloaded_list = self.train_list\r\n\t\tself.n_class = n_class\r\n\t\tself.data_label = {i: [] for i in range(n_class)}\r\n\t\tself.data = []\r\n\t\tself.targets = []\r\n\t\t\r\n\t\tfor i, (file_name, checksum) in enumerate(downloaded_list):\r\n\t\t\tfile_path = os.path.join(root, file_name)\r\n\t\t\twith open(file_path, 'rb') as f:\r\n\t\t\t\tif sys.version_info[0] == 2:\r\n\t\t\t\t\tentry = pickle.load(f)\r\n\t\t\t\telse:\r\n\t\t\t\t\tentry = pickle.load(f, encoding='latin1')\r\n\t\t\t\tfor j, k in enumerate(entry['labels']):\r\n\t\t\t\t\tself.data_label[k - 1].append(entry['data'][j])\r\n\t\t\r\n\t\tfor i in range(n_class):\r\n\t\t\tself.data_label[i] = np.vstack(self.data_label[i]).reshape(-1, 3, 32, 32)\r\n\t\t\tself.data_label[i] = self.data_label[i].transpose((0, 2, 3, 1)) # convert to HWC\r\n\t\r\n\tdef get(self, use_num_cls, max_num=None):\r\n\t\tassert isinstance(use_num_cls, list) \\\r\n\t\t and len(use_num_cls) > 0 and len(use_num_cls) < self.n_class, \\\r\n\t\t\t'invalid use_num_cls : {:}'.format(use_num_cls)\r\n\t\tnew_data, new_targets = [], []\r\n\t\tfor i in use_num_cls:\r\n\t\t\tnew_data.append(self.data_label[i][:max_num] if max_num is not None else self.data_label[i])\r\n\t\t\tnew_targets.extend([i] * max_num if max_num is not None\r\n\t\t\t else [i] * len(self.data_label[i]))\r\n\t\tself.data = np.concatenate(new_data)\r\n\t\tself.targets = new_targets\r\n\t\t\r\n\t\timgs = []\r\n\t\tfor img in self.data:\r\n\t\t\timg = Image.fromarray(img)\r\n\t\t\timg = self.transform(img)\r\n\t\t\twith torch.no_grad():\r\n\t\t\t\timgs.append(feature_extractor(img.unsqueeze(0)).squeeze().unsqueeze(0))\r\n\t\treturn torch.cat(imgs)\r\n\r\n\r\nif __name__ == '__main__':\r\n\tncls = {'mnist': 10, 'svhn': 10, 'cifar10': 10, 'cifar100': 100, 'imagenet32': 1000}\r\n\ttransform = get_transform(args.dataset)\r\n\tif args.dataset == 'imagenet32':\r\n\t\timgnet32 = ImageNet32(args.data, ncls[args.dataset], transform)\r\n\t\tdata_label = {i: [] for i in range(1000)}\r\n\t\tfor i in range(1000):\r\n\t\t\tm = imgnet32.get([i])\r\n\t\t\tdata_label[i].append(m)\r\n\t\t\tif i % 10 == 0:\r\n\t\t\t\tprint(f'Currently saving features of {i}-th class')\r\n\t\t\t\ttorch.save(data_label, f'{args.save_path}/{args.dataset}bylabel.pt')\r\n\telse:\r\n\t\tif args.dataset == 'mnist':\r\n\t\t\tdata = dset.MNIST(args.data_path, train=True, transform=transform, download=True)\r\n\t\telif args.dataset == 'svhn':\r\n\t\t\tdata = dset.SVHN(args.data_path, split='train', transform=transform, download=True)\r\n\t\telif args.dataset == 'cifar10':\r\n\t\t\tdata = dset.CIFAR10(args.data_path, train=True, transform=transform, download=True)\r\n\t\telif args.dataset == 'cifar100':\r\n\t\t\tdata = dset.CIFAR100(args.data_path, train=True, transform=transform, download=True)\r\n\t\tdataset = process(data, ncls[args.dataset])\r\n\t\ttorch.save(dataset, f'{args.save_path}/{args.dataset}bylabel.pt')\r\n\r\n","sub_path":"MetaD2A_nas_bench_201/process_dataset.py","file_name":"process_dataset.py","file_ext":"py","file_size_in_byte":5887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"500346897","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 18 17:04:57 2020\n\n@author: abdullahsaid\n\"\"\"\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader,RandomSampler\nfrom torchvision import datasets, models, transforms\nimport torchvision.transforms.functional as TF\nfrom PIL import Image\nimport pandas as pd\nimport os\nimport random\nimport segmentation_models_pytorch as smp\nimport numpy as np\nfrom tqdm import tqdm, tnrange\nimport time\nimport copy\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nimport torch.nn.functional as F\nimport lovasz_losses as L\n\n\npath = os.path.join(os.path.expanduser('~'),'floodDetection','dataSet')\n\nwith open(os.path.join(path,'norm.npy'), 'rb') as f:\n mean = np.load(f)\n std = np.load(f)\n\n#imagenet weights used if hand is not included since the model is initlized with image net weights\n#when hand is included since four channels are used the models first layer is randoly initilized \nimgNetMean = [0.485, 0.456, 0.406]\nimgNetStd = [0.229, 0.224, 0.225]\n\ntrainDS = os.path.join(path,'trainFinal3.csv')\nvalDS = os.path.join(path,'valFinal3.csv')\n\nclass harveyDataset(Dataset):\n '''\n inputs:\n \n csvFile: is the dataset with file names\n hand: boolean if hand value should be included\n phase: if train or validation then apply augmentation otherwise no augmenation is used\n transform: augmentation that will be applied\n p: probability of applying a random augmentation\n \n output:\n \n Iterable dataset for a dataloader\n '''\n def __init__(self, csvFile, hand, phase, transform = None, p=.5):\n self.data = pd.read_csv(csvFile)\n self.phase = phase\n self.hand = hand\n self.transform = transform\n self.p = p\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n img = Image.open(os.path.join(path,self.data.loc[idx,'file']+'.jpg'))\n if self.phase in ('train','val'):\n seg = Image.open(os.path.join(path,self.data.loc[idx,'file']+'.png'))\n if self.hand == True:\n hnd = Image.open(os.path.join(path,self.data.loc[idx,'file']+'.tif'))\n if self.transform:\n imageHand, label = self.transform(image=img,hand=hnd,\n seg=seg,p=self.p)\n return imageHand, label*255\n else:\n image = TF.to_tensor(img)\n hand = TF.to_tensor(hnd)\n label = TF.to_tensor(seg)\n imageHand = torch.cat([image,hand],dim=0)\n imageHand = TF.normalize(imageHand, mean=mean, std = std)\n return imageHand, label*255\n else:\n if self.transform:\n image, label = self.transform(image=img,hand=None,\n seg=seg,p=self.p)\n return image, label*255\n else:\n image = TF.to_tensor(img)\n label = TF.to_tensor(seg)\n image = TF.normalize(image, mean=imgNetMean, std = imgNetStd)\n return image, label*255\n else:\n image = TF.to_tensor(img)\n image = TF.normalize(image, mean=imgNetMean, std = imgNetStd)\n return image, self.data.loc[idx,'file']\n\n\n#augment function to insure that the same augmentation is used on labels and hand\ndef augmentation(image, seg, hand=None, p=.5):\n if hand:\n if random.random() < p:\n angle = random.randint(-45,45)\n image = TF.rotate(image,angle)\n hand = TF.rotate(hand,angle,fill=0.) \n seg = TF.rotate(seg,angle,fill=2) #if rotation leaves edges black fill value with 2 to ignore\n if random.random() < p:\n image = TF.hflip(image)\n hand = TF.hflip(hand)\n seg = TF.hflip(seg)\n if random.random() < p:\n image = TF.vflip(image)\n hand = TF.vflip(hand)\n seg = TF.vflip(seg)\n if random.random() < p:\n image = TF.adjust_saturation(image,random.uniform(.5,3)) #only applicable to image\n if random.random() < p:\n image = TF.adjust_contrast(image,random.uniform(.5,3)) #only applicable to image\n \n \n image = TF.to_tensor(image)\n hand = TF.to_tensor(hand)\n segmentation = TF.to_tensor(seg)\n imageHand = torch.cat([image,hand],dim=0)\n imageHand = TF.normalize(imageHand, mean=mean, std = std)\n return imageHand, segmentation\n else:\n if random.random() < p:\n angle = random.randint(-45,45)\n image = TF.rotate(image,angle)\n seg = TF.rotate(seg,angle,fill=2)\n if random.random() < p:\n image = TF.hflip(image)\n seg = TF.hflip(seg)\n if random.random() < p:\n image = TF.vflip(image)\n seg = TF.vflip(seg)\n if random.random() < p:\n image = TF.adjust_saturation(image,random.uniform(.5,3))\n if random.random() < p:\n image = TF.adjust_contrast(image,random.uniform(.5,3))\n\n image = TF.to_tensor(image)\n segmentation = TF.to_tensor(seg)\n image = TF.normalize(image,mean=imgNetMean, std=imgNetStd)\n return image, segmentation\n\n#training procedure, with a annealing cosine scheduler for learning rate\ndef train_model(model, criterion, optimizer, scheduler, num_epochs=25):\n since = time.time()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n iters = len(dataloaders['train'])\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train() # Set model to training mode\n else:\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n # Iterate over data.\n for i, (inputs, labels) in zip(tqdm(range(len(dataloaders[phase]))),dataloaders[phase]):\n inputs = inputs.to(device)\n labels = labels.to(device)\n if phase == 'train':\n scheduler.step(epoch + i / iters)\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n output = model(inputs)\n pred = (nn.Sigmoid()(output)>.5).long()\n loss = criterion(output, labels, ignore=2)\n # backward + optimize only if in training phase\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n # statistics \n running_loss += loss.item() * inputs.size(0)\n running_corrects += L.iou_binary(pred,labels,ignore=2,per_image=False\n ) * inputs.size(0)\n\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects / dataset_sizes[phase]\n\n print('{} Overall Loss: {:.4f} IoU: {:.4f}'.format(\n phase, epoch_loss, epoch_acc))\n \n log=open(os.path.join(path,'NohandEpoch.txt'),'a')\n log.writelines('{} No Hand Overall Loss: {:.4f} No Hand IoU: {:.4f}\\n\\n'.format(\n phase, epoch_loss, epoch_acc))\n log.close()\n # deep copy the model\n if phase == 'val' and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n\n print()\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n print('Best val IoU: {:4f}'.format(best_acc))\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n return model\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\ntrain=harveyDataset(trainDS, hand=False, phase='train',transform=augmentation,p=.5)\nval=harveyDataset(valDS, hand=False, phase='val',transform=augmentation,p=.5)\n\ndataDict = {'train': train,\n 'val': val}\n\nsampler = {'train': RandomSampler(train, replacement=False),\n 'val': RandomSampler(val, replacement=False)}\n\n\ndataloaders = {x: DataLoader(dataDict[x], batch_size=4, sampler=sampler[x],\n num_workers=0, pin_memory=True) for x in ['train','val']}\n\ndataset_sizes = {x: len(sampler[x]) for x in ['train', 'val']}\n\n\n#Since we are using BCEWithLogitsLoss Sigmoid activation is done inside that function\n#More stable than using BCLoss function\n\nmodel = smp.Unet('efficientnet-b2',in_channels=3,encoder_weights='imagenet',\n classes=1, activation=None, encoder_depth=5,\n decoder_channels = (1024, 512, 256, 128, 64))\n\n\nmodel = model.to(device)\n\ncriterion = L.binary_xloss #cross entropy this implimentation allows to ignore pixel labels of value 2 in our case\n\noptimizer = optim.SGD(model.parameters(), lr=1e-3, momentum=0.9)\n\n#SGD with warm resets after 200 iters with a factor of 2 for growth in iter size before next rest\nexp_lr_scheduler = lr_scheduler.CosineAnnealingWarmRestarts(optimizer,200,2)\n\nmodel = train_model(model, criterion, optimizer, exp_lr_scheduler,\n num_epochs=25)\n\ntorch.save(model.state_dict(), os.path.join(os.path.dirname(path),'noHandScratch.pt'))\n","sub_path":"Models/noHandScratch.py","file_name":"noHandScratch.py","file_ext":"py","file_size_in_byte":9823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"485940280","text":"import json\nimport shlex\nimport re\nimport random\nimport math\n\nfrom bot.events import Callback, command\nfrom util.text import strikethrough, smallcaps\n\n\nclass Queue(Callback):\n QFILE = \"queues.json\"\n\n def __init__(self, server):\n self.qfile = server.get_config_dir(self.QFILE)\n try:\n with open(self.qfile) as f:\n self.queues = json.load(f)\n except:\n self.queues = {}\n super().__init__(server)\n \n\n def find(self, queue, query=None):\n q = [(i[0] + 1, i[1]) for i in enumerate(queue)]\n\n if query is None:\n return q\n elif re.match(r\"^\\d+((,|\\s)+\\d+)*$\", query):\n return sorted([q[int(i)-1] for i in set(query.replace(\",\", \" \").split())])\n elif re.match(r\"^\\d*-\\d*$\", query):\n start, stop = [int(i) if i else None for i in query.split('-')]\n if start: start -= 1\n return q[start:stop]\n else:\n exact = [i for i in q if i[1].lower() == query.lower()]\n if exact:\n return exact\n else:\n try:\n query = set(shlex.split(query.lower()))\n except:\n query = set(query.lower().split())\n hidden = {\"hidden\", \"done\"} - query\n exclude = {i for i in query if i.startswith(\"-\")} | {'-' + i for i in hidden}\n include = query - exclude\n q = [i for i in q if all(k.lstrip(\"#\") in [j.lstrip(\"#\") for j in i[1].lower().split()] for k in include)]\n q = [i for i in q if not any(k[1:].lstrip(\"#\") in [j.lstrip(\"#\") for j in i[1].lower().split()] for k in exclude)]\n\n return q\n\n\n def display(self, num, line):\n points = re.split(r\"\\s*(\\[(?:\\d+/)?\\d+\\])\\s*\", line, maxsplit=1)\n vis = \"│\"\n if len(points) == 3:\n line = \"%s %s\" % (points[0], points[-1])\n points = [float(x) for x in points[1][1:-1].split(\"/\")]\n align = math.ceil(points[-1]/5) * 5\n total = points[-1]\n if len(points) > 1:\n done = points[0]\n else:\n done = 0\n vis = '┝' + \"━\" * math.ceil(total - done) + '\u000315' + \"─\" * math.ceil(done) + \" \" * (align - math.ceil(total))\n return \"\u000306│ %s %s\u0003 %s\" % (num, vis, re.sub(r\"#(\\S+)\", lambda x: r\"\u000315%s\u000F\" % smallcaps(x.group(1)), line))\n\n def displayAll(self, lines, max=25):\n for count, i in enumerate(lines):\n if max - 1 <= count and max != len(lines):\n yield \"\u000306│ %d of %d items displayed.\" % (count, len(lines))\n return\n yield self.display(*i)\n\n @command(\"list\", r\"(.*)\")\n def list(self, server, msg, query):\n nick = server.lower(msg.address.nick)\n queue = self.queues.setdefault(nick, [])\n if not queue:\n yield \"\u000306│\u0003 Your queue is empty. \"\n return\n\n q = self.find(queue, query)\n\n if not q:\n yield \"\u000306│\u0003 No matching items.\"\n return\n\n yield from self.displayAll(q, 25 if msg.prefix == '!' else 5)\n\n\n @command(\"choose\", r\"^([^\\d,]*)$\")\n def choose(self, server, msg, query):\n nick = server.lower(msg.address.nick)\n queue = self.queues.setdefault(nick, [])\n if not queue:\n return \"\u000306│\u0003 Your queue is empty. \"\n\n q = self.find(queue, query)\n\n if not q:\n return \"\u000306│\u0003 No matching items.\"\n\n return self.display(*random.choice(q))\n\n @command(\"queue todo\", r\"(.+)\")\n def queue(self, server, message, item):\n nick = message.address.nick\n queue = self.queues.setdefault(server.lower(nick), [])\n queue.append(item)\n self.save()\n return self.display(len(queue), item)\n\n @command(\"edit replace\", r\"(\\d+)\\s+(.+)\")\n def edit(self, server, message, index, item):\n nick = message.address.nick\n queue = self.queues.setdefault(server.lower(nick), [])\n index = int(index)\n if index > len(queue):\n queue.append(\"\")\n index = len(queue)\n queue[index - 1] = item\n self.save()\n return self.display(index, item)\n\n @command(\"push prepend\", r\"(.+)\")\n def push(self, server, message, item):\n nick = message.address.nick\n queue = self.queues.setdefault(server.lower(nick), [])\n queue.insert(0, item)\n self.save()\n return self.display(len(queue), item)\n\n @command(\"pop\", r\"(.*)\")\n def pop(self, server, msg, query):\n nick = server.lower(msg.address.nick)\n queue = self.queues.setdefault(nick, [])\n\n if not queue:\n yield \"\u000306│\u0003 Your queue is empty. \"\n return\n\n if not query: \n query = \"1\"\n\n q = self.find(queue, query)\n\n if not q:\n yield \"\u000306│\u0003 No matching items.\"\n return\n\n for i in sorted(q, key=lambda x:-x[0]):\n queue.pop(i[0]-1)\n\n yield from self.displayAll([('✓' if len(q) == 1 else i[0], strikethrough(i[1])) for i in q], 25 if msg.prefix == '!' else 5)\n\n self.save()\n\n @command(\"peek\", r\"(.*)\")\n def peek(self, server, message, query):\n nick = server.lower(message.address.nick)\n queue = self.queues.setdefault(nick, [])\n if not queue:\n return \"\u000306│\u0003 Your queue is empty. \"\n\n q = self.find(queue, query)\n\n if not q:\n return \"\u000306│\u0003 No matching items.\"\n\n return self.display(*q[0])\n \n @command(\"next promote\", r\"(.+)\")\n def promote(self, server, msg, query):\n nick = server.lower(msg.address.nick)\n queue = self.queues.setdefault(nick, [])\n\n q = self.find(queue, query)\n\n if not q:\n queue.append(query)\n q = [(len(queue), query)]\n\n q.reverse()\n\n for i, item in q:\n queue.pop(i-1)\n for i, item in q:\n queue.insert(0, item)\n\n q.reverse()\n\n yield from self.displayAll([(i+1, item[1]) for i, item in enumerate(q)], 25 if msg.prefix == '!' else 5)\n\n self.save()\n \n @command(\"last demote\", r\"(.+)\")\n def demote(self, server, msg, query):\n nick = server.lower(msg.address.nick)\n queue = self.queues.setdefault(nick, [])\n\n q = self.find(queue, query)\n\n if not q:\n queue.append(query)\n q = [(len(queue), query)]\n\n updated = []\n\n q.reverse()\n\n for i, item in q:\n queue.pop(i-1)\n q.reverse()\n for i, item in q:\n queue.append(item)\n updated.append((len(queue), item))\n\n yield from self.displayAll(updated, 25 if msg.prefix == '!' else 5)\n\n self.save()\n\n @command(\"insert\", \"(\\d+)\\s+(.+)\")\n def insert(self, server, msg, index, query):\n nick = server.lower(msg.address.nick)\n queue = self.queues.setdefault(nick, [])\n\n q = self.find(queue, query)\n\n if not q:\n queue.append(query)\n q = [(len(queue), query)]\n\n updated = []\n\n index = int(index)\n if len(queue) - len(q) + 1 < index:\n index = len(queue) - len(q)\n index -= 1\n\n q.reverse()\n\n for i, item in q:\n queue.pop(i-1)\n queue.insert(index, item)\n q.reverse()\n for i, item in enumerate(q):\n updated.append((index + i+1, item[1]))\n\n yield from self.displayAll(updated, 25 if msg.prefix == '!' else 5)\n\n self.save()\n\n @command(\"tag\", r\"(#\\S+(?:\\s+#\\S+)*)\\s+(.+)\")\n def tag(self, server, msg, tag, query):\n nick = server.lower(msg.address.nick)\n queue = self.queues.setdefault(nick, [])\n if not queue:\n yield \"\u000306│\u0003 Your queue is empty. \"\n return\n q = self.find(queue, query)\n\n if not q:\n yield \"\u000306│\u0003 No matching items.\"\n return\n\n for i, item in q:\n tags = [i for i in tag.split() if i.lower() not in item.lower()]\n queue[i-1] = item + ' ' + ' '.join(tags)\n\n yield from self.displayAll([(i[0], queue[i[0]-1]) for i in q], 25 if msg.prefix == '!' else 5)\n\n self.save()\n\n @command(\"untag\", r\"(#\\S+(?:\\s+#\\S+)*)(?:\\s+(.+))?\")\n def untag(self, server, msg, tags, query):\n nick = server.lower(msg.address.nick)\n queue = self.queues.setdefault(nick, [])\n tags = tags.split()\n if not queue:\n yield \"\u000306│\u0003 Your queue is empty. \"\n return\n q = self.find(queue, query)\n\n if not q:\n yield \"\u000306│\u0003 No matching items.\"\n return\n\n tagged = []\n\n for i, item in q:\n fixed = re.sub(\"( ?(%s))\" % (\"|\".join(re.escape(x) for x in tags)), \"\", queue[i-1], re.IGNORECASE)\n if queue[i-1] != fixed:\n queue[i-1] = fixed\n tagged.append((i, fixed))\n\n yield from self.displayAll(tagged, 25 if msg.prefix == '!' else 5)\n\n self.save()\n\n @command(\"score\", r\"((?:\\d+/)?\\d+|[+-]\\d+)\\s+(.+)\")\n def score(self, server, msg, score, query):\n nick = server.lower(msg.address.nick)\n queue = self.queues.setdefault(nick, [])\n if not queue:\n yield \"\u000306│\u0003 Your queue is empty. \"\n return\n q = self.find(queue, query)\n\n if not q:\n yield \"\u000306│\u0003 No matching items.\"\n return\n\n for i, item in q:\n split = re.split(r\"(\\[(?:\\d+/)?\\d+\\])\", item, maxsplit=1)\n if len(split) == 3:\n queue[i-1] = split[0] + '[' + score + ']' + split[2]\n else:\n queue[i-1] = item + ' ' + '[' + score + ']'\n # TODO: relative scoring and velocity\n\n yield from self.displayAll([(i[0], queue[i[0]-1]) for i in q], 25 if msg.prefix == '!' else 5)\n\n self.save()\n\n # TODO: Alter hidden tags\n @command(\"hide\", r\"(.+)\")\n def hide(self, server, msg, query):\n yield from self.tag.funct(self, server, msg, \"#hidden\", query)\n\n # TODO: Alter hidden tags\n @command(\"unhide\", r\"(.+)\")\n def unhide(self, server, msg, query):\n yield from self.untag.funct(self, server, msg, \"#hidden\", query) \n\n def save(self):\n with open(self.qfile, \"w\") as f:\n json.dump(self.queues, f)\n\n\n__initialise__ = Queue","sub_path":"plugins/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":10421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"597001569","text":"import subprocess\nfrom PIL import Image\nimport numpy as np\nimport tensorflow as tf\nimport cv2\nimport time\n\ninterpreter = tf.lite.Interpreter(model_path='the_tflite.tflite')\ninterpreter.allocate_tensors()\n\ninput_details = interpreter.get_input_details()\noutput_details = interpreter.get_output_details()\n\ncam = cv2.VideoCapture(0)\nword = ''\n\nnum = 0\nwhile True:\n\t#display camera input\n\tret_val, image = cam.read()\n\timage = cv2.flip(image, 1)\n\t\n\t#take image and process\n\timg = subprocess.call(\"imagesnap -q -w 0.01 img.jpeg\", shell=True)\n\timg = Image.open(\"img.jpeg\")\n\timg = img.resize((224, 224))\n\n\tinput_data = np.array(img)\n\tinput_data = input_data.astype(np.float32)\n\tinput_data = np.expand_dims(input_data, axis=0)\n\n\tinterpreter.set_tensor(input_details[0]['index'], input_data) \n\n\ttoc = time.time()\n\tinterpreter.invoke()\n\ttic = time.time()\n\tthe_time = tic - toc\n\n\toutput_data = interpreter.get_tensor(output_details[0]['index'])\n\toutput = np.argmax(output_data)\n\tif output == 1:\n\t\tprint('one')\n\t\tword = 'one'\n\telse:\n\t\tprint('five')\n\t\tword = 'five'\n\n\t#show camera input\n\tcv2.imshow('my webcam', image)\n\tif cv2.waitKey(1) == 27: \n\t\tbreak # esc to quit\n\ncv2.destroyAllWindows()\n","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"279390878","text":"import setuptools\nimport re\n\n\ndef get_property(prop, project):\n # https://stackoverflow.com/questions/17791481/creating-a-version-attribute-for-python-packages-without-getting-into-troubl/41110107\n result = re.search(\n r'{}\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]'.format(prop), open(project + \"/__init__.py\").read()\n )\n return result.group(1)\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nproject_name = \"seissolxdmfwriter\"\nsetuptools.setup(\n name=project_name,\n version=get_property(\"__version__\", project_name),\n author=\"SeisSol Group\",\n description=\"A python writer for SeisSol xdmf output\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/SeisSol/Visualization/seissolxdmfwriter\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires=\">=3.6\",\n install_requires=[\"numpy\", \"h5py\"],\n)\n","sub_path":"seissolxdmfwriter/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"95398176","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport math\n\ndef my_abs(x):\n if not isinstance(x, (int, float)):\n raise TypeError('bad operand type')\n if x >= 0:\n return x\n else:\n return -x\n\ndef move(x, y, step, angle=0):\n nx = x + step * math.cos(angle)\n ny = y - step * math.sin(angle)\n return nx, ny\n\nn = my_abs(-20)\nprint(n)\n\nx, y = move(100, 100, 60, math.pi/6)\nprint(x, y)\n\n#TypeError: bad operand type:\nmy_abs('123')\n\n\ndef quadratic(a, b, c):\n if not (isinstance(a,(int, float)) and isinstance(b,(int, float)) and isinstance(c,(int, float))):\n raise TypeError('bad operand type') \n elif a == 0:\n return('此方程不是一元二次方程')\n\n delta = b**2 - 4*a*c\n if delta < 0:\n return('此一元二次方程无实数根')\n elif delta == 0:\n return('此一元二次方程的根为:x1=x2=%f' % (-b/2*a))\n else:\n x1 = (-b + math.sqrt(delta))/(2*a)\n x2 = (-b - math.sqrt(delta))/(2*a)\n return x1, x2\n\nprint('quadratic(2, 3, 1) =', quadratic(2, 3, 1))\nprint('quadratic(1, 3, -4) =', quadratic(1, 3, -4))","sub_path":"samples/002_function/def_func.py","file_name":"def_func.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"488526954","text":"#Name: Yash Raja\n#Date: October 30th, 2017\n#File Name: 4_1_1_c\n#Description: Guessing game where you have unlimited guesses, while allowing user to quit program by inputting -1\n#Test Cases\n\nimport random\n\n# Initialize variables\nwon = False\nnumTries = 0\nn = 0\n# Computer guesses a number between 1 and 100\nnumToGuess = random.randint(1, 100)\n\nwhile not(won):\n n+=1\n # Ask the user for a guess and increment number of guesses\n guess = int(input(\"If you want to quit input -1. Guess a number between 1 and 100: \"))\n\n # Check to see if the guess is correct. Output result\n if guess == numToGuess or guess == -1:\n won = True\n else:\n if guess < numToGuess:\n print(\"Your guess was too low!\")\n else:\n print(\"Your guess was too high!\")\n\n print(\"Try again!!\")\t\t\nif guess == -1:\n print(\"You have quit the program in\", n, \"tries\")\nelse:\n print(\"Yay!!!! You won!!! in\", n, \"tries\")\n \t\n","sub_path":"Unit 3 - Repetions/4_2/4_2_1_c.py","file_name":"4_2_1_c.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"651401320","text":"from numpy import *\nfrom numpy.linalg import inv\nimport plot_tool as plt\n#################\n# Kalman Filter #\n#################\n\ndef kf_predict(X, P, A, Q, B, U):\n X = dot(A, X) + dot(B, U)\n P = dot(A, dot(P, A.T)) + Q\n return(X,P)\n\ndef kf_update(X, P, Y, H, R):\n IM = dot(H, X)\n IS = R + dot(H, dot(P, H.T))\n K = dot(P, dot(H.T, inv(IS)))\n X = X + dot(K, (Y-IM))\n P = P - dot(K, dot(IS, K.T))\n return (X,P,K,IM,IS)\n\n\n#########################\n# Our Prediction Scheme #\n#########################\n\n## input data\n\ndata = array([[0,0], [1,2], [2,4], [3,6], [4,8]]) # data: sequence of 2D points from progressing fire fronts\nt = len(data) # t: current time\nn = 100 # n: we want to make n predictions\nmus = [] # mus: predicted mean state estimates\ncovs = [] # covs: covariances of mus\n\n## formulation of our problem\n\nmu_ini = data[0] # mu_ini: The mean state estimate of the initial step\ncov_ini = eye(2) # cov_ini: The state covariance of the initial step\n\nA = eye(2) # A: The transition n x n matrix.\nQ = eye(2) # Q: The process noise covariance matrix.\nB = eye(2) # B: The input effect matrix.\nH = eye(2) # H: The measurement matrix.\nR = eye(2) # R: The measurement covariance matrix \n\n\n## update the current mu and cov\n\nmu_update = mu_ini\ncov_update = cov_ini\ndir = zeros(2)\n\nfor i in range(0, t-1):\n (mu_pre, cov_pre) = kf_predict(mu_update, cov_update, A, Q, B, dir)\n mu_old = mu_update\n (mu_update, cov_update, _, _, _) = kf_update(mu_pre, cov_pre, data[i+1], H, R)\n dir = mu_update - mu_old\n \n \n## predict mus and covs\n\nmu_pre = mu_update\ncov_pre = cov_update\n\nfor i in range(0, n):\n mu_old = mu_pre\n (mu_pre, cov_pre) = kf_predict(mu_pre, cov_pre, A, Q, B, dir)\n dir = mu_pre - mu_old\n\n mus += [mu_pre]\n covs += [cov_pre]\n\nplt.plot_covariance(covs, n)\nplt.show()\n\nprint (mus)\nprint (covs)\n","sub_path":"kalman_analysis/prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"102678858","text":"from django.contrib import admin\n\n# Register your models here.\nfrom .models import Member, Circle, Reminder\n\n#class AttendeeAdmin(admin.ModelAdmin):\n# fields = ['checked_in', 'attendee_name', 'attendee_email', 'attendee_id', 'event']\n\nclass MemberInline(admin.TabularInline):\n model = Member\n\nclass CircleAdmin(admin.ModelAdmin):\n# fields = ['event_date', 'event_name', 'event_id']\n# list_display = ('event_date', 'event_name')\n# list_filter = ['event_date']\n inlines = [MemberInline]\n\nadmin.site.register(Circle, CircleAdmin)\nadmin.site.register(Reminder)\n","sub_path":"circly/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"146316167","text":"# Leadrboard score 95.138\n\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\n\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse import hstack\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import TimeSeriesSplit, cross_val_score, GridSearchCV\n\n###########\n# FUNCTIONS\n###########\n\ndef get_auc_lr_valid(X, y, C=1.0, seed=17, ratio = 0.9):\n # Split the data into the training and validation sets\n idx = int(round(X.shape[0] * ratio))\n # Classifier training\n lr = LogisticRegression(C=C, random_state=seed, solver='lbfgs', max_iter=500).fit(X[:idx, :], y[:idx])\n # Prediction for validation set\n y_pred = lr.predict_proba(X[idx:, :])[:, 1]\n # Calculate the quality\n score = roc_auc_score(y[idx:], y_pred)\n\n return score\n\n\ndef write_to_submission_file(predicted_labels, out_file,\n target='target', index_label=\"session_id\"):\n predicted_df = pd.DataFrame(predicted_labels,\n index = np.arange(1, predicted_labels.shape[0] + 1),\n columns=[target])\n predicted_df.to_csv(out_file, index_label=index_label)\n\n\n\ntimes = ['time%s' % i for i in range(1, 11)]\nsites = ['site%s' % i for i in range(1, 11)]\n\ntrain_df = pd.read_csv('train_sessions.csv', index_col='session_id', parse_dates=times)\ntest_df = pd.read_csv('test_sessions.csv', index_col='session_id', parse_dates=times)\n\n# Sort the data by time\ntrain_df = train_df.sort_values(by='time1')\n\n\ntrain_df[sites] = train_df[sites].fillna(0).astype('int')\ntest_df[sites] = test_df[sites].fillna(0).astype('int')\n\n# Load websites dictionary\nwith open(r\"site_dic.pkl\", \"rb\") as input_file:\n site_dict = pickle.load(input_file)\n\n# Create dataframe for the dictionary\nsites_dict = pd.DataFrame(list(site_dict.keys()), index=list(site_dict.values()), columns=['site'])\n#print(u'Websites total:', sites_dict.shape[0])\n#sites_dict.head()\n\n# Our target variable\ny_train = train_df['target'].values\n\n# United dataframe of the initial data\n#full_df = pd.concat([train_df.drop('target', axis=1), test_df])\n\n\n\n# small\ntrain_df[sites].fillna(0).to_csv('train_sessions_text.txt', sep=' ', index=None, header=None)\ntest_df[sites].fillna(0).to_csv('test_sessions_text.txt', sep=' ', index=None, header=None)\n\n\ncv = CountVectorizer(ngram_range=(1, 3), max_features=50000)\nwith open('train_sessions_text.txt') as inp_train_file:\n X_train = cv.fit_transform(inp_train_file)\nwith open('test_sessions_text.txt') as inp_test_file:\n X_test = cv.transform(inp_test_file)\nprint(X_train.shape)\n\n####################################\n# CROSSVALIDATION\n####################################\n\ntime_split = TimeSeriesSplit(n_splits=10)\nlogit = LogisticRegression(C=1, random_state=17, solver='liblinear')\n\ncv_scores = cross_val_score(logit, X_train, y_train, cv=time_split, scoring='roc_auc', n_jobs=-1)\nprint('Initial data')\nprint(cv_scores)\nprint(cv_scores.mean(), cv_scores.std())\n\n# Placeholder for some new features\ntrain_df_newfeatures = pd.DataFrame(index=train_df.index)\ntest_df_newfeatures = pd.DataFrame(index=test_df.index)\n\n# Feature number of urls in session < 10\n# EDA shows, that Alice almost always has 10 urls in session\n\ntrain_df_newfeatures['sub10'] = (train_df[times].count(axis = 1) < 10) * 1 - 0.5\ntest_df_newfeatures['sub10'] = (test_df[times].count(axis = 1) < 10) * 1 - 0.5\n\n# Day of week\n\n\n# Active days. Alice seems to be active at 0, 1, 3 and 4 days if week\n\ntrain_df_newfeatures['dow'] = train_df['time1'].apply(lambda ts : ts.date().weekday())\ntrain_df_newfeatures['active_days'] = (train_df_newfeatures['dow'].apply(lambda x : x in [0, 1, 3, 4]) ) * 1 - 0.5\n\ntest_df_newfeatures['dow'] = test_df['time1'].apply(lambda ts : ts.date().weekday())\ntest_df_newfeatures['active_days'] = (test_df_newfeatures['dow'].apply(lambda x : x in [0, 1, 3, 4]) ) * 1 - 0.5\n\n\ntrain_df_newfeatures['hour'] = train_df['time1'].apply(lambda ts : ts.hour)\ntrain_df_newfeatures['active_hours'] = (train_df_newfeatures['hour'].apply(lambda x : x in [12, 13, 16, 17, 18]) ) * 1 - 0.5\n\ntest_df_newfeatures['hour'] = test_df['time1'].apply(lambda ts : ts.hour)\ntest_df_newfeatures['active_hours'] = (test_df_newfeatures['hour'].apply(lambda x : x in [12, 13, 16, 17, 18]) ) * 1 - 0.5\n\ntrain_df_newfeatures['sesslen'] = (train_df[times].max(axis = 1) - train_df[times].min(axis = 1)).apply(lambda ts: round(ts.seconds))\ntest_df_newfeatures['sesslen'] = (test_df[times].max(axis = 1) - test_df[times].min(axis = 1)).apply(lambda ts: round(ts.seconds))\n\n####################################\n# Feature scaling\n####################################\n\nscaler = StandardScaler()\n\nscaler.fit(train_df_newfeatures['dow'].values.reshape(-1, 1))\ntrain_df_newfeatures['dow_scaled'] = scaler.fit_transform(train_df_newfeatures['dow'].values.reshape(-1,1))\ntest_df_newfeatures['dow_scaled'] = scaler.transform(test_df_newfeatures['dow'].values.reshape(-1,1))\n\nscaler.fit(train_df_newfeatures['sesslen'].values.reshape(-1, 1))\ntrain_df_newfeatures['sesslen_scaled'] = scaler.fit_transform(train_df_newfeatures['sesslen'].values.reshape(-1,1))\ntest_df_newfeatures['sesslen_scaled'] = scaler.fit_transform(test_df_newfeatures['sesslen'].values.reshape(-1,1))\n####################################\n# Adding new features to train dataset\n####################################\n\nX_train_new = csr_matrix(hstack([X_train, train_df_newfeatures[['dow_scaled', 'active_days', 'active_hours', 'sesslen_scaled']]]))\nX_test_new = csr_matrix(hstack([X_test, test_df_newfeatures[['dow_scaled', 'active_days', 'active_hours', 'sesslen_scaled']]]))\ncv_scores = cross_val_score(logit, X_train_new, y_train, cv=time_split, scoring='roc_auc', n_jobs=-1)\nprint('num_urls + day of week')\nprint(cv_scores)\nprint(cv_scores.mean(), cv_scores.std())\n\nlogit.fit(X_train_new, y_train)\nlogit_test_pred2 = logit.predict_proba(X_test_new)[:, 1]\nwrite_to_submission_file(logit_test_pred2, 'subm2.csv')\n","sub_path":"Alice/submission2.py","file_name":"submission2.py","file_ext":"py","file_size_in_byte":6163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"36172660","text":"def interPolSearch(ar,k):\r\n l=0\r\n h=len(ar)-1\r\n while l<=h and (k>=ar[l] and k<=ar[h]):\r\n if l==h:\r\n if ar[l]==k:\r\n return l\r\n return -1\r\n\r\n pos=l+int(float((h-l)/(ar[h]-ar[l])*(k-ar[l]))) #probing\r\n if ar[pos]==k:\r\n return pos\r\n elif k buzzer_lenght:\n buzz = False\n buzzer_time = time.time()\n else:\n if time.time() - buzzer_time > buzzer_interval:\n buzz = True\n buzzer_time = time.time()\n\n if buzz:\n GPIO.output(buzzer_pin, GPIO.HIGH)\n GPIO.output(vib_pin, GPIO.LOW)\n else:\n GPIO.output(buzzer_pin, GPIO.LOW)\n if buzzer_state:\n GPIO.output(vib_pin, GPIO.HIGH)\nexcept KeyboardInterrupt:\n GPIO.output(buzzer_pin, GPIO.LOW)\n GPIO.cleanup()\n","sub_path":"lessons/lesson_5_02-v2.py","file_name":"lesson_5_02-v2.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"410940800","text":"from EmployeeClass import *\r\n\r\n\r\nclass AddEmployee():\r\n def __init__(self):\r\n self.ids = input(\"Plz enter the employee ID: \")\r\n self.name = input(\"The employee name: \")\r\n self.Etype = input(\"Type (1) Worker (2) Supervisor: \")\r\n if self.Etype == '1':\r\n self.shift = input(\"Shift (1) day (2) night: \")\r\n self.rate = input(\"Rate: \")\r\n else:\r\n self.salary = input(\"Salary: \")\r\n self.bonus = input(\"Bonus: \")\r\n\r\n def register(self, id_em_dict):\r\n if self.ids in id_em_dict:\r\n print(\"這個ID有了,不能新增\")\r\n else:\r\n if self.Etype == \"1\":\r\n id_em_dict[self.ids] = WorkerClass(\r\n self.name, self.ids, self.Etype, self.shift, self.rate)\r\n else:\r\n id_em_dict[self.ids] = SupervisorClass(\r\n self.name, self.ids, self.Etype, self.salary, self.bonus)\r\n","sub_path":"AddEmployee.py","file_name":"AddEmployee.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"64728997","text":"for case in range(1, int(input()) + 1):\n prices = list(map(int, input().split()))\n year = list(map(int, input().split()))\n dp = [0] * 13\n for i in range(1, 13):\n day = prices[0] * year[i-1]\n month = prices[1]\n three = 99999\n if i >= 3:\n three = prices[2]\n dp[i] = min(dp[i-1] + day, dp[i-1] + month, dp[i-3] + three)\n \n res = dp[12]\n if res > prices[3]:\n res = prices[3]\n\n print(f'#{case} {res}')\n","sub_path":"Python/SWEA/SW_TEST/1952_수영장.py","file_name":"1952_수영장.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"617718258","text":"# -*- encoding: utf-8 -*-\n\n__author__ = 'kotaimen'\n__date__ = '12/01/2017'\n\nimport uuid\n\nimport click\n\nfrom ..utils import boto3_exception_handler, pretty_print_config, \\\n load_template_body, echo_pair\nfrom ..stack.events import start_tail_stack_events_daemon\nfrom ...cli import changeset\nfrom ...config import load_stack_config\n\n\n@changeset.command()\n@click.argument('config_file', type=click.Path(exists=True))\n@click.argument('changeset_name', required=False, default=None)\n@click.option('--no-wait', is_flag=True, default=False,\n help='Exit immediately after operation is started.')\n@click.option('--use-previous-template', is_flag=True, default=False,\n help='Reuse the existing template that is associated with the '\n 'stack that you are updating.')\n@click.option('--changeset-type', type=click.Choice(['CREATE', 'UPDATE']),\n default='UPDATE',\n help='The type of change set operation. To create a change set '\n 'for a new stack, specify CREATE. To create a change set '\n 'for an existing stack, specify UPDATE.')\n@click.option('--execute', is_flag=True, default=False,\n help='Execute the ChangeSet immediately after its created. '\n 'Specify this disables \"--no-wait\" option. ')\n@click.pass_context\n@boto3_exception_handler\ndef create(ctx, config_file, no_wait, changeset_name, use_previous_template,\n changeset_type, execute):\n \"\"\"Creates a list of changes for a stack.\n\n AWS CloudFormation generates the change set by comparing the template's\n information with the information that you submit. A change set can help\n you understand which resources AWS CloudFormation will change, and how\n it will change them, before you update your stack. Change sets allow\n you to check before making a change to avoid deleting or replacing\n critical resources.\n\n \\b\n CONFIG_FILE Stack configuration file.\n CHANGESET_NAME The name of the change set. must be unique among all\n change sets that are associated with the specified\n stack. cfncli will automatically generate a unique\n name if one not given.\n \"\"\"\n session = ctx.obj['session']\n\n # load config\n stack_config = load_stack_config(config_file)\n pretty_print_config(stack_config)\n load_template_body(session, stack_config)\n click.echo('Creating change set...')\n\n # connect co cfn\n region = stack_config.pop('Region')\n\n # remove unused parameters\n stack_config.pop('DisableRollback', None)\n stack_config.pop('OnFailure', None)\n stack_config.pop('Package', None)\n\n # update parameters\n if changeset_name is None:\n # XXX: use hash of stack config & template as unqiue name?\n changeset_name = '%s-ChangeSet-%s' % (stack_config['StackName'],\n str(uuid.uuid1())[:8])\n stack_config['ChangeSetName'] = changeset_name\n\n echo_pair('ChangeSet Name', changeset_name)\n if use_previous_template:\n stack_config.pop('TemplateBody', None)\n stack_config.pop('TemplateURL', None)\n stack_config['UsePreviousTemplate'] = use_previous_template\n\n stack_config['ChangeSetType'] = changeset_type\n\n stack_config.pop('StackPolicyBody', None)\n stack_config.pop('StackPolicyURL', None)\n\n # create changeset\n client = session.client('cloudformation', region_name=region)\n result = client.create_change_set(**stack_config)\n echo_pair('ChangeSet ARN', result['Id'])\n\n # exit immediately\n if not execute and no_wait:\n return\n\n # wait until update complete\n waiter = client.get_waiter(\n 'change_set_create_complete')\n waiter.wait(ChangeSetName=result['Id'])\n\n click.secho('ChangeSet creation complete.', fg='green')\n\n if not execute:\n return\n\n if changeset_type == 'CREATE':\n waiter_model = 'stack_create_complete'\n else:\n waiter_model = 'stack_update_complete'\n\n client.execute_change_set(\n ChangeSetName=changeset_name,\n StackName=stack_config['StackName'],\n )\n\n # exit immediately\n if no_wait:\n return\n\n # execute changeset\n cfn = session.resource('cloudformation', region_name=region)\n stack = cfn.Stack(stack_config['StackName'])\n\n # start event tailing\n start_tail_stack_events_daemon(session, stack, latest_events=5)\n\n # wait until update complete\n waiter = client.get_waiter(waiter_model)\n waiter.wait(StackName=stack.stack_id)\n\n click.secho('ChangSet execution complete.',fg='green')\n","sub_path":"awscfncli/commands/changeset/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":4645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"310006514","text":"# Get value from user\nval = float(input(\"Enter Value: \"))\n\n# Compute a provisional square root\nroot = 1.0\n\n# How far off is the provisional square root\ndiff = root*root - val\n\n# Loop until the provisional root is close enough to the actual root\nwhile diff > 0.00000001 or diff < -0.00000001:\n\tprint(root, 'squared is', root*root) # Report how we are doing\n\troot = (root + val/root) / 2 # Compute new provisional root\n\n\t# How bad is our approximation\n\tdiff = root*root - val\n\n# Report approximate square root\nprint('Square root of ', val, '=', root)","sub_path":"halter/a/squareroot.py","file_name":"squareroot.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"244840775","text":"from django import forms\nfrom .models import Mandat, Binet\nfrom django.contrib.auth.models import User\n\n\nclass DescriptionForm(forms.ModelForm):\n\t\"\"\"defines a form to get the description of a mandat. It is called in the 'Remarques binet' module of the compta module\"\"\"\n\t\n\tclass Meta:\n\t\tmodel = Mandat\n\t\tfields = ('description',)\n\t\tlabels = {'description': 'Commentaires généraux sur votre mandat'}\n\n\nclass BinetEditForm(forms.ModelForm):\n\t\"\"\"permet de modifier les binets\"\"\"\n\n\tclass Meta:\n\t\tmodel = Binet\n\t\texclude = ('creator',)\n\t\tlabels = {'description': 'Description du binet (visible par tous, non modifiable par les membres)',\n\t\t\t 'remarques_admins': 'Remarques générales sur le binet (visibles par les kessiers seulement)'}\n\n\nclass BinetCreateForm(forms.ModelForm):\n\t\"\"\"permet de créer un binet avec un permier mandat\"\"\"\n\tclass Meta:\n\t\tmodel = Binet\n\t\texclude = ('creator',)\n\n\tdef clean(self):\n\t\tcleaned_data = super(BinetCreateForm, self).clean()\n\t\tif len(Binet.objects.filter(nom=cleaned_data['nom'])) == 1:\n\t\t\tmsg = 'Un binet avec ce nom existe déjà'\n\t\t\tself.add_error('nom', msg)\n\n\nclass MandatCreateForm(forms.ModelForm):\n\t\"\"\"permet de créer le premier mandat lors de la création d'un binet\"\"\"\n\tpresident = forms.ModelChoiceField(queryset=User.objects.order_by('username'))\n\ttresorier = forms.ModelChoiceField(queryset=User.objects.order_by('username'))\n\n\tclass Meta:\n\t\tmodel = Mandat\n\t\tfields = ('type_binet', 'president', 'tresorier', 'promotion')\n\n\tdef clean(self):\n\t\tcleaned_data = super(MandatCreateForm, self).clean()\n\t\tpromotion = cleaned_data['promotion']\n\t\ttresorier = cleaned_data['tresorier']\n\t\tpresident = cleaned_data['president']\n\t\t# pour le cas du président inconnu, on vérifie pas la promo\n\t\tif president != User.objects.get(username='Inconnu') and tresorier != User.objects.get(username='Inconnu'):\n\t\t\tif promotion != president.eleve.promotion or promotion != tresorier.eleve.promotion or president.eleve.promotion != tresorier.eleve.promotion:\n\t\t\t\tmsg = 'Incohérence entre la promotion et les promotions des membres'\n\t\t\t\tself.add_error('president', msg)\n\n\n\nclass MandatEditForm(forms.ModelForm):\n\t\"\"\"permet de modifier le mandat\"\"\"\n\tpresident = forms.ModelChoiceField(queryset=User.objects.order_by('username'))\n\ttresorier = forms.ModelChoiceField(queryset=User.objects.order_by('username'))\n\n\tdef __init__(self, binet, create, *args, **kwargs):\n\t\tsuper(MandatEditForm, self).__init__(*args, **kwargs)\n\t\tself.binet = binet\n\t\tself.create = create\n\n\tclass Meta:\n\t\tmodel = Mandat\n\t\texclude = ('binet', 'creator', 'is_last', 'create_date', 'is_active', 'being_checked')\n\t\tlabels = {'description': 'Description du mandat (visible et modifiable par les membres)',\n\t\t\t 'remarques_admins': 'Remarques générales sur le mandat (visibles par les kessiers seulement)'}\n\n\n\n\tdef clean(self):\n\t\tcleaned_data = super(MandatEditForm, self).clean()\n\t\tpromotion = cleaned_data['promotion']\n\t\ttresorier = cleaned_data['tresorier']\n\t\tpresident = cleaned_data['president']\n\t\t# pour le cas du président inconnu, on vérifie pas la promo\n\t\tif president != User.objects.get(username='Inconnu') and tresorier != User.objects.get(username='Inconnu'):\n\t\t\tif promotion != president.eleve.promotion or promotion != tresorier.eleve.promotion or president.eleve.promotion != tresorier.eleve.promotion:\n\t\t\t\tmsg = 'Incohérence entre la promotion et les promotions des membres'\n\t\t\t\tself.add_error('president', msg)\n\n\t\tif self.create and len(Mandat.objects.filter(binet=self.binet, promotion=promotion)) == 1:\n\t\t\tmsg = 'Le mandat {} du binet {} existe déjà'.format(str(promotion), str(self.binet))\n\t\t\tself.add_error('promotion', msg)\n\n\nclass PassationMandatForm(forms.ModelForm):\n\t\"\"\"permet de créer un formulaire de description et remarques_admins lors de la passation\"\"\"\n\n\tclass Meta:\n\t\tmodel = Mandat\n\t\tfields = ('description', 'remarques_admins')\n\t\tlabels = {'description': 'Remarques sur le mandat visibles par les membres du binet :',\n\t\t\t\t\t'remarques_admins': 'Remarques pour les kessiers :'}","sub_path":"binets/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"475614049","text":"#!/usr/bin/env python\r\nimport subprocess\r\nimport click\r\nimport os\r\n\r\n#chmod a+x ptest\r\n\r\ndef get_blocks():\r\n with open(\".gitignore\", \"r\") as f:\r\n lines = [line.replace(\"\\n\", \"\") for line in f.readlines()]\r\n try:\r\n indx = lines.index('#GITS3')\r\n except ValueError:\r\n indx = -1\r\n if indx == -1:\r\n git_ignore_lines = lines\r\n git_s3_lines = []\r\n else:\r\n git_ignore_lines = lines[0:indx]\r\n git_s3_lines = lines[indx+1:]\r\n return git_ignore_lines, git_s3_lines\r\n\r\ndef write_blocks(git_ignore_lines, git_s3_lines, path=None):\r\n with open(\".gitignore\", \"w\") as f:\r\n git_s3_lines.insert(0, \"#GITS3\")\r\n if path != None:\r\n git_s3_lines.append(path)\r\n final = git_ignore_lines + git_s3_lines\r\n out = \"\"\r\n for line in final:\r\n out += line + \"\\n\"\r\n f.write(out)\r\n\r\ndef filter_paths(path_filter, paths_to_search):\r\n index = -1\r\n paths_found = []\r\n for path in paths_to_search:\r\n index += 1\r\n if path_filter != None:\r\n if path_filter in path:\r\n paths_found.append((index, path))\r\n else:\r\n paths_found.append((index, path))\r\n return paths_found\r\n\r\ndef sync_s3(path, bucket=\"acoe-s3\", local_to_s3=True):\r\n cwd = os.getcwd()\r\n repo = os.path.basename(os.path.normpath(cwd))\r\n full_path = os.path.join(cwd, path)\r\n s3_uri = f\"s3://{bucket}/{repo}/{path}\"\r\n if local_to_s3:\r\n cmd = f\"aws s3 sync {full_path} {s3_uri}\"\r\n else:\r\n cmd = f\"aws s3 sync {s3_uri} {full_path}\"\r\n process = subprocess.Popen(cmd, shell=True, stdout = subprocess.PIPE)\r\n process.wait()\r\n print(process.communicate())\r\n\r\ndef getobjects_s3():\r\n cwd = os.getcwd()\r\n repo = os.path.basename(os.path.normpath(cwd))\r\n cwd = os.getcwd()\r\n repo = os.path.basename(os.path.normpath(cwd))\r\n cmd = f\"aws s3 ls s3://acoe-s3/{repo}/ --recursive\"\r\n process = subprocess.Popen(cmd, shell=True, stdout = subprocess.PIPE)\r\n process.wait()\r\n output, err = process.communicate(b\"input data that is passed to subprocess' stdin\")\r\n objects = []\r\n for item in output.split():\r\n _item = item.decode(\"utf-8\")\r\n if \"workstreams\" in _item:\r\n objects.append(_item)\r\n with open(\".s3synced\", \"w\") as f:\r\n out = \"\\n\".join(objects)\r\n f.write(out)\r\n return objects\r\n\r\nCONTEXT_SETTINGS = dict(help_option_names=['-h'])\r\n\r\n@click.group(context_settings=CONTEXT_SETTINGS)\r\ndef cli():\r\n \"\"\"Stores and manages Git repo large files in AWS S3.\r\n \"\"\"\r\n pass\r\n\r\n@click.command()\r\n@click.argument('path')\r\ndef track(path):\r\n \"\"\"Adds input path to the tracked files. \r\n Files are tracked inside .gitignore under tag #GITS3\r\n \"\"\"\r\n git_ignore_lines, git_s3_lines = get_blocks()\r\n if path in git_s3_lines:\r\n raise ValueError(\"Path is already tracked\")\r\n else:\r\n write_blocks(git_ignore_lines, git_s3_lines, path)\r\n \r\n\r\n@click.command()\r\n@click.argument('path')\r\ndef push(path):\r\n \"\"\"Pushes input path files/objects into S3.\r\n Input can be value or index\r\n \"\"\"\r\n git_ignore_lines, git_s3_lines = get_blocks()\r\n if path.isdigit():\r\n indx = int(path)\r\n try:\r\n _path = git_s3_lines[indx]\r\n sync_s3(_path)\r\n getobjects_s3()\r\n except IndexError:\r\n raise IndexError(f\"\"\"Your path index {path} is not an index of \r\n tracked files. Check available indexes with git \r\n s3 list\r\n \"\"\")\r\n else:\r\n paths_found = filter_paths(path, git_s3_lines)\r\n if paths_found == []:\r\n raise ValueError(f\"Path is not in tracked paths, try: git s3 track {path}\")\r\n else:\r\n for path in paths_found:\r\n sync_s3(path[1])\r\n getobjects_s3()\r\n\r\n@click.command()\r\n@click.argument('path')\r\ndef pull(path):\r\n \"\"\"Pulls all files from S3 inside input path.\r\n \"\"\"\r\n git_ignore_lines, git_s3_lines = get_blocks()\r\n if path.isdigit():\r\n indx = int(path)\r\n try:\r\n _path = git_s3_lines[indx]\r\n sync_s3(_path, local_to_s3=False)\r\n except IndexError:\r\n raise IndexError(f\"\"\"Your path index {path} is not an index of \r\n tracked files. Check available indexes with git \r\n s3 list\r\n \"\"\")\r\n else:\r\n paths_found = filter_paths(path, git_s3_lines)\r\n for path in paths_found:\r\n sync_s3(path[1], local_to_s3=False)\r\n\r\n@click.command()\r\n@click.argument('path')\r\ndef remove(path, bucket=\"acoe-s3\"):\r\n \"\"\"Removes input path from the tracked paths.\r\n Input can be value or index\r\n \"\"\"\r\n git_ignore_lines, git_s3_lines = get_blocks()\r\n if path.isdigit():\r\n indx = int(path)\r\n try:\r\n _path = git_s3_lines[indx]\r\n git_s3_lines.remove(_path)\r\n write_blocks(git_ignore_lines, git_s3_lines)\r\n except IndexError:\r\n raise IndexError(f\"\"\"Your path index {path} is not an index of \r\n tracked files. Check available indexes with git \r\n s3 list\r\n \"\"\")\r\n elif path in git_s3_lines:\r\n git_s3_lines.remove(path)\r\n write_blocks(git_ignore_lines, git_s3_lines)\r\n else:\r\n raise ValueError(f\"Path {path} is not in tracked paths\")\r\n\r\n@click.command()\r\n@click.option('-f', \"path_filter\", help=\"filters paths containing input value\")\r\ndef list(path_filter=None):\r\n \"\"\"Lists all tracked paths to be in sync in s3. \r\n These folders are stored in .gitignore under the tag #GITS3\r\n \"\"\"\r\n git_ignore_lines, git_s3_lines = get_blocks()\r\n paths_found = filter_paths(path_filter, git_s3_lines)\r\n for path in paths_found:\r\n click.echo(str(path[0]) + \" \" + path[1])\r\n\r\n@click.command()\r\n@click.option('-f', \"path_filter\", help=\"filters paths containing input value\")\r\ndef synced(path_filter=None):\r\n \"\"\"Lists of all objects (files) stored into S3. \r\n from the current repo\r\n \"\"\"\r\n objects = getobjects_s3()\r\n for obj in objects:\r\n if path_filter != None:\r\n if path_filter in obj:\r\n click.echo(obj)\r\n else:\r\n click.echo(obj)\r\n\r\n\r\ncli.add_command(track)\r\ncli.add_command(push)\r\ncli.add_command(pull)\r\ncli.add_command(remove)\r\ncli.add_command(list)\r\ncli.add_command(synced)","sub_path":"grizly/cli/git_s3.py","file_name":"git_s3.py","file_ext":"py","file_size_in_byte":6672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"486951321","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n# 用饼图的面积及颜色展示一组4维数据\nif __name__ == \"__main__\":\n # 随机数生成器\n rdm = np.random.RandomState(1)\n\n # 产生100对随机数据,服从正态分布N(0,1)\n x = rdm.randn(100)\n y = rdm.randn(100)\n # 颜色种类,范围:[0,1)\n colors = rdm.rand(100)\n\n print('x:', x)\n print('y:', y)\n print('colors:', colors)\n\n # 尺寸种类\n sizes = 1000 * rdm.rand(100)\n\n plt.scatter(x, y, c=colors, s=sizes, alpha=0.3)\n plt.colorbar()\n plt.show()\n","sub_path":"test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"327039509","text":"\"\"\"\nGiven a string and a set of delimiters, reverse the words in the string while maintaining the relative order of the delimiters.\nFor example, given \"hello/world:here\", return \"here/world:hello\"\n\nFollow-up: Does your solution work for the following cases: \"hello/world:here/\", \"hello//world:here\"\n\"\"\"\n\ndef main():\n s = raw_input(\"Enter the string?\\n\")\n delimiters = []\n index = 0\n prev_item = ''\n for item in s:\n if item == ':' or item =='/':\n delimiters.append([item, index])\n prev_item = item\n index += 1\n else:\n if prev_item == '' or prev_item == '/' or prev_item == ':':\n index += 1\n prev_item = item\n\n s = s.replace('/', ':')\n s = s.split(':')\n s.reverse()\n\n while(s[0] == ''):\n s.reverse()\n s.pop()\n s.reverse()\n s.append('')\n\n for delimiter in delimiters:\n item, index = delimiter\n s.insert(index, item)\n\n s = ''.join(s)\n\n print(s)\n\nif __name__ == '__main__':\n main()","sub_path":"problem114.py","file_name":"problem114.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"587020433","text":"from ..cards.acecard import AceCard\nfrom ..cards.facecard import FaceCard\nfrom ..cards.numbercard import NumberCard\nfrom ..suit.suit import Suit\n\n# Instantiate Suit\nclub, diamond, heart, spade = (\n Suit('Club', '♣︎'),\n Suit('Diamond', '♦︎'),\n Suit('Heart', '♥︎'),\n Suit('Spade', '♠︎'),\n)\n\n# Here class_, rank_str are dynamically set in the\n# object once we run rank.\n# Notice that rank returns self as this needed in order\n# to then, chain suit to it\nclass FactoryCard:\n def rank(self, rank):\n self.class_, self.rank_str = {\n 1: (AceCard, rank),\n 11: (FaceCard, rank),\n 12: (FaceCard, rank),\n 13: (FaceCard, rank),\n }.get(rank, (NumberCard, rank))\n return self\n\n def suit(self, suit):\n return self.class_(self.rank_str, suit)\n\n\ndeck = [\n FactoryCard().rank(rank).suit(suit)\n for rank in range(1, 14)\n for suit in (club, diamond, heart, spade)\n]\n\nprint(deck)\n","sub_path":"src/playground/factoryclass.py","file_name":"factoryclass.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"371404859","text":"#!/usr/bin/env python3\n#\n# Copyright (c) FIRST and other WPILib contributors.\n# Open Source Software; you can modify and/or share it under the terms of\n# the WPILib BSD license file in the root directory of this project.\n#\n\nimport commands2\n\nimport wpimath.trajectory\n\nimport subsystems.drivesubsystem\nimport constants\n\n\nclass DriveDistanceProfiled(commands2.TrapezoidProfileCommand):\n \"\"\"Drives a set distance using a motion profile.\"\"\"\n\n def __init__(\n self, meters: float, drive: subsystems.drivesubsystem.DriveSubsystem\n ) -> None:\n \"\"\"Creates a new DriveDistanceProfiled command.\n\n :param meters: The distance to drive.\n :param drive: The drive subsystem to use.\n \"\"\"\n super().__init__(\n wpimath.trajectory.TrapezoidProfile(\n # Limit the max acceleration and velocity\n wpimath.trajectory.TrapezoidProfile.Constraints(\n constants.DriveConstants.kMaxSpeedMetersPerSecond,\n constants.DriveConstants.kMaxAccelerationMetersPerSecondSquared,\n ),\n # End at desired position in meters; implicitly starts at 0\n wpimath.trajectory.TrapezoidProfile.State(meters, 0),\n ),\n # Pipe the profile state to the drive\n lambda setpointState: drive.setDriveStates(setpointState, setpointState),\n # Require the drive\n [drive],\n )\n # Reset drive encoders since we're starting at 0\n drive.resetEncoders()\n","sub_path":"commands-v2/drive-distance-offboard/commands/drivedistanceprofiled.py","file_name":"drivedistanceprofiled.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"350762352","text":"import boto3\nimport time\nimport os\nimport sys\nimport wget\n\nstart = time.time()\n\nsource = sys.argv[1]\nbucket = sys.argv[2]\nprefix = sys.argv[3]\n\ns3 = boto3.resource('s3')\n\ndef download(url):\n filename = url.split(\"/\")[-1]\n if not os.path.exists(filename):\n wget.download(url, filename)\n\ndef upload_to_s3(prefix, channel, file):\n data = open(file, \"rb\")\n key = '{}/{}/{}'.format(prefix, channel, file)\n s3.Bucket(bucket).put_object(Key=key, Body=data)\n\n# This examples downloads recordIO files that have already be split into train/Test\n# For the process of creating these files see the \"im2rec.py\" as part of mxnet\n# see: https://gluon-cv.mxnet.io/build/examples_datasets/recordio.html\n\nprint (\"Downloadng Training Data\")\ndownload(os.path.join(source, 'caltech-256-60-train.rec'))\nupload_to_s3(prefix, 'train', 'caltech-256-60-train.rec')\nprint (\"Finished Downloadng Training Data\")\n\nprint (\"Downloadng Testing Data\")\ndownload(os.path.join(source, 'caltech-256-60-val.rec'))\nupload_to_s3(prefix, 'validation', 'caltech-256-60-val.rec')\nprint (\"Finished Downloadng Testing Data\")\n\nend = time.time()\nprint(end - start)\n","sub_path":"Source/data-prep.py","file_name":"data-prep.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"89903388","text":"import logging\nimport os\n\nimport torch\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\n\nfrom unet3d.model import UNet3D\nfrom unet3d.trainer import UNet3DTrainer\nfrom unet3d.utils import DiceCoefficient\nfrom unet3d.utils import DiceLoss\nfrom unet3d.utils import Random3DDataset\nfrom unet3d.utils import get_logger\n\n\nclass TestUNet3DTrainer(object):\n def test_single_epoch(self, tmpdir, capsys):\n with capsys.disabled():\n # get device to train on\n device = torch.device(\n \"cuda:0\" if torch.cuda.is_available() else 'cpu')\n\n conv_layer_order = 'crg'\n\n loss_criterion, final_sigmoid = DiceLoss(), True\n\n model = self._load_model(final_sigmoid, conv_layer_order)\n\n error_criterion = DiceCoefficient()\n\n loaders = self._get_loaders()\n\n learning_rate = 1e-4\n weight_decay = 0.0005\n optimizer = optim.Adam(model.parameters(), lr=learning_rate,\n weight_decay=weight_decay)\n\n logger = get_logger('UNet3DTrainer', logging.DEBUG)\n trainer = UNet3DTrainer(model, optimizer, loss_criterion,\n error_criterion,\n device, loaders, tmpdir,\n max_num_epochs=1,\n log_after_iters=2,\n validate_after_iters=2,\n logger=logger)\n\n trainer.fit()\n\n # test loading the trainer from the checkpoint\n UNet3DTrainer.from_checkpoint(\n os.path.join(tmpdir, 'last_checkpoint.pytorch'),\n model, optimizer, loss_criterion, error_criterion, loaders,\n logger=logger)\n\n def _load_model(self, final_sigmoid, layer_order):\n in_channels = 1\n out_channels = 2\n # use F.interpolate for upsampling\n interpolate = True\n return UNet3D(in_channels, out_channels, interpolate,\n final_sigmoid, layer_order)\n\n def _get_loaders(self):\n # when using ConvTranspose3d, make sure that dimensions can be divided by 16\n train_dataset = Random3DDataset(4, (32, 64, 64), 1, 2)\n val_dataset = Random3DDataset(1, (32, 64, 64), 1, 2)\n\n return {\n 'train': DataLoader(train_dataset, batch_size=1, shuffle=True),\n 'val': DataLoader(val_dataset, batch_size=1, shuffle=True)\n }\n","sub_path":"tests/test_trainer.py","file_name":"test_trainer.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"502916425","text":"import data\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import tree, svm, neighbors\n\nX_train, X_test, y_train, y_test = train_test_split(data.X, data.y, test_size=0.2)\n\nclf = neighbors.KNeighborsClassifier()\nclf.fit(X_train, y_train)\nconfidence = clf.score(X_test, y_test)\nprint(confidence)\n\nexample_measures = np.array([[4, 2, 1, 1, 1, 2, 3, 2, 1]])\nexample_measures = example_measures.reshape(len(example_measures), -1)\nprediction = clf.predict(example_measures)\n\nif prediction == [2]:\n print(\"benign\")\nelse:\n print(\"malignant\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"321996823","text":"#IMPORT THE REQUIRED LIBRARIES\nimport os\nimport pandas as pd\ndirectory = os.fsencode('/home/sunbeam/Documents/forproject/IMDB_SENTIMENT/inreviews/')\n\nfor file in os.listdir(directory):\n filenamecsv = os.fsdecode(file)\n filename = filenamecsv.replace('.csv', '')\n print('filename :', filename)\n df = pd.read_csv(os.path.join('/home/sunbeam/Documents/forproject/IMDB_SENTIMENT/inreviews/', filenamecsv))\n\n\nexit()\nimport pandas as pd\ndf = pd.read_csv('/home/sunbeam/Documents/forproject/IMDB_SENTIMENT/inreviews/12 Angry Men.csv')\ndf.rename(columns={'Unnamed: 0': 'sr', '0': 'rev'}, inplace=True)\nprint(df['rev'][1])\n# print(df2)\nfrom selenium import webdriver\nimport time\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\n\nPATH = \"/home/sunbeam/selenium/chromedriver\"\ndriver = webdriver.Chrome(PATH)\n\ntoptho = 'https://www.imdb.com/search/title/?count=100&groups=top_1000&sort=user_rating'\ndriver.get(toptho)\n\nhtml = driver.page_source.encode('utf-8')\nsoup = BeautifulSoup(html, 'lxml')\n\n\n\n\nimport pandas as pd\n\nxdata = pd.read_csv('Avengers.csv', header=None)\nxlist = []\nfor i in range(1000):\n # print(i, \" \", xdata[1][i])\n xlist.append(xdata[1][i])\n\nfor elem in xlist:\n if type(elem) != str:\n print(type(elem))\nexit()\nxdata1 = xdata.dropna()\n# print(xdata1)\nxlist = []\nfor i in range(1000):\n print(i, \" \", xdata1[1][i])\n# xlist.append(rev)\n# print(xlist)\nexit()\n\nprint('sum of na : \\n', xdata.isna().sum())\n# print(xdata)\n\nfor i in range(50):\n print(xdata[1][i])\n\ndata2 = pd.read_csv(\n '/home/sunbeam/Documents/forproject/IMDB_SENTIMENT/kaggle/input/imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv')\nfor i in range(len(data)):\n print(data[1][i])\n# for i in range(len(data)):\n# if type(data[1][i]) == str:\n# pass\n# else:\n# data.drop(i)\n# print(data)\n# xlist = {'reviews': [], 'sentiment': []}\n# for i in range(11):\n# xlist['reviews'].append(i)\n# xlist['sentiment'].append(i)\n# print(xlist)\n# print(i)\n# while type(data[1][i].str):\n# xlist['reviews'].append(data[1][i])\n# xlist['sentiment'].append(1)\n# print(xlist)\n\n# while type(data[1][i]) == str:\n# print(type(data[1][i]))\n# print(type(data[1][1]))\n# print(type('random text'))\n# df1 = pd.DataFrame()\n# xlist = {'reviews':[], 'sentiment':[]}\n# for i in range(11):\n# xlist['reviews']\n\n\n#\n# from textblob import TextBlob\n# import string\n# def remove_num_punct(aText):\n# p = string.punctuation\n# d = string.digits\n# j = p + d\n# table = str.maketrans(j, len(j)* ' ')\n# return aText.translate(table)\n#\n# i = 0\n# aList = []\n# for txt in xdata[1].isnull():\n# if txt:\n# aList.append(np.nan)\n# else:\n# b = remove_num_punct(xdata[1][i])\n# pol = TextBlob(b).sentiment.polarity\n# aList.append(pol)\n# i+=1\n# print(aList)\n# exit()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"650942811","text":"\"\"\"\nRestart commands for Minecraft servers\n\"\"\"\n\nimport click\n\nfrom ..base import mymcadmin, cli_command, rpc_command, error, success\nfrom ... import rpc\n\n@mymcadmin.command()\n@click.argument('server_id')\n@cli_command\n@rpc_command\ndef restart(rpc_conn, server_id):\n \"\"\"\n Restart a Minecraft server\n \"\"\"\n\n click.echo('Attempting to restart {}'.format(server_id), nl = False)\n\n with rpc.RpcClient(*rpc_conn) as rpc_client:\n rpc_client.server_restart(server_id)\n\n success('Success')\n\n@mymcadmin.command()\n@cli_command\n@rpc_command\ndef restart_all(rpc_conn):\n \"\"\"\n Restart all Minecraft servers\n \"\"\"\n\n click.echo('Restarting all servers...')\n\n with rpc.RpcClient(*rpc_conn) as rpc_client:\n result = rpc_client.server_restart_all()\n\n successful = result['success']\n failure = result['failure']\n\n for server_id in successful:\n success('{} successfully restarted'.format(server_id))\n\n for server_id in failure:\n error('{} could not restart properly'.format(server_id))\n\n","sub_path":"mymcadmin/cli/commands/restart.py","file_name":"restart.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"623182033","text":"class Solution:\n def buy_books(self, sizes, costs, budget):\n n = len(sizes)\n\n dp = [[0 for _ in range(budget + 1)] for _ in range(n + 1)]\n\n for i in range(1, n + 1):\n for j in range(1, budget + 1):\n if costs[i - 1] > budget:\n dp[i][j] = dp[i - 1][j]\n else:\n dp[i][j] = max(dp[i - 1][j], dp[i - 1][j - costs[i]] + sizes[i])\n\n return dp[-1][-1]\n","sub_path":"VMWare/12. Buy Books.py","file_name":"12. Buy Books.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"445760898","text":"import pyboard\nimport time\n\n\npyb = pyboard.Pyboard('/dev/cu.usbserial-0001', 115200)\npyb.enter_raw_repl()\n\nledstate = False\nfor i in range(10):\n\n ret = pyb.exec(f'led.value({ledstate})')\n ledstate = not ledstate\n print(ret)\n time.sleep(0.2)\n\npyb.exit_raw_repl()\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"76109176","text":"N = int(input())\nA = list(map(int, input().split()))\n\ndef gcd(x, y):\n if y == 0:\n return x\n return gcd(y, x % y)\n\ndef erat():\n end = 10 ** 6 + 1\n t = [False if i == 0 or i == 1 else True for i in range(end)]\n ret = [0 for i in range(end)]\n\n for i in range(2, end):\n if t[i]:\n for j in range(i * 2, end, i):\n t[j] = False\n if not ret[j]:\n ret[j] = i\n ret[i] = i\n return ret\n\ntmp = A[0]\nfor i in A[1:]:\n tmp = gcd(tmp, i)\nisS = True if tmp == 1 else False\nisP = True\n\ne = erat()\n\nD = dict(iter([(i, False) for i in range(10 ** 6 + 1)]))\nfor i in A:\n tmp = set()\n while i != 1:\n tmp.add(e[i])\n i //= e[i]\n for i in tmp:\n if D[i]:\n isP = False\n break\n else:\n D[i] = True\n if not isP:\n break\n\nif isP:\n print('pairwise coprime')\nelif isS:\n print('setwise coprime')\nelse:\n print('not coprime')\n","sub_path":"contest/abc177/e.py","file_name":"e.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"461502033","text":"import canvas\nimport ui\nif '.' not in __name__: # ==\"__main__\" or imported from same package\n\timport plottls\n\t#import xy_plot\nelse:\n\tfrom pyploty import plottls\n\t#from pyploty import xy_plot\n\nclass uiView_context(plottls.xy_context):\n\tdef __init__(self,uiImageView, *args, **kwargs):\n\t\t#world_frame=None, user_frame=None, font_name='Helvetica', font_size=16.0):\n\t\tself.uiImageView=uiImageView\n\t\tivframe = uiImageView.frame\n\t\twrld_frm = (ivframe[0],ivframe[1]+ivframe[3], ivframe[2],-ivframe[3])\n\t\tself.path=ui.Path()\n\t\tsuper().__init__(plottls.rect_area(*wrld_frm), *args, **kwargs) #world_frame,user_frame, font_name, font_size)\n\t\n\tdef draw_path(self):\n\t\tself.path.stroke()\n\t\n\t\t\n\tdef draw_rect(self,x,y,width,height):\n\t\trect = ui.Path.rect(*self.xyWorld(x,y),*self.whWorld(width,height))\n\t\tself.path.append_path(rect)\n\t\t\t\n\tdef draw_line(self,x1,y1,x2,y2):\n\t\tself.move_to(x1,y1)\n\t\tself.path.line_to(*self.xyWorld(x2,y2))\n\t\t\t\t\n\tdef draw_text(self,text, x, y, font_name=None, font_size=None):\n\t\tlbl = ui.Label()\n\t\tif font_size is None:\n\t\t\tfont_size=self.font_size\n\t\tif font_name is None:\n\t\t\tfont_name=self.font_name\n\t\tlbl.font = (font_name, font_size)\n\t\tlbl.text=text\n\t\tlbl.alignment = ui.ALIGN_LEFT\n\t\tlbl.size_to_fit()\n\t\twl,hl = lbl.width,lbl.height\n\t\t#xo,yo = self.whUser(wl,hl)\n\t\tlbl.x,lbl.y= self.xyWorld(x,y)\n\t\t#lbl.x -= xo #self.uiImageView.frame[0]\n\t\tlbl.y -= hl \n\t\tself.uiImageView.superview.add_subview(lbl)\n\t\t\n\tdef move_to(self,x,y):\n\t\tself.path.move_to(*self.xyWorld(x,y))\n\t\t\n\tdef add_line(self,x,y):\n\t\tself.path.line_to(*self.xyWorld(x,y))\n\t\t\n\tdef add_rect(self,x,y,width,height):\n\t\trect = ui.Path.rect(*self.xyWorld(x,y),*self.whWorld(width,height))\n\t\tself.path.append_path(rect)\n\t\t\t\t\t\n\tdef add_curve(cp1x, cp1y, cp2x, cp2y, x, y):\n\t\tself.path.add_curve(*self.xyWorld(cp1x,cp1y), *self.xyWorld(cp2x,cp2y), *self.xyWorld(x,y))\n\t\n\tdef get_text_size(self, text, font_name=None, font_size=None):\n\t\t''' size of text string in user coordinates '''\t\t\n\t\tif font_size is None:\n\t\t\tfont_size=self.font_size\n\t\tif font_name is None:\n\t\t\tfont_name=self.font_name\n\t\treturn self.whUser(*canvas.get_text_size(text, font_name, font_size))\t\n\nif __name__ == \"__main__\":\t\n\t#world =\tplottls.drawing_frame(100,100, 600,300)\t\n\tuser = plottls.rect_area(0,0, 60,30)\n\t#ctx = uiView_context(world ,user)\n\t\n\tclass gr_view(ui.View):\n\t\t\"\"\" \n\t\t\"\"\"\n\t\tdef __init__(self, frame=(100,100,600,300),*args,**kwargs):\n\t\t\t\"\"\" clipping all beyond frame\n\t\t\t\"\"\"\n\t\t\tsuper().__init__(frame=frame,*args,**kwargs)\n\t\t\t#self.bounds = frame # internal coord, origin defaults to 0\n\t\t\tself.background_color = 'white'\n\t\t\tivL = ui.ImageView(frame=(10,10,280,280))\n\t\t\tprint('iv.frame=%s iv.bounds=%s self.bounds=%s' % (ivL.frame,ivL.bounds,self.bounds))\t\n\t\t\tself.add_subview(ivL)\n\t\t\tivR = ui.ImageView(frame=(310,10,280,280))\n\t\t\tself.add_subview(ivR)\n\t\t\tself.ctx1 = uiView_context(ivL, user)\n\t\t\tself.ctx2 = uiView_context(ivR, user.sub_frame(yofs=-0))\n\t\t\t\t\t\t\n\t\tdef draw(self):\n\t\t\tself.ctx1.draw_path()\n\t\t\tself.ctx2.draw_path()\n\t\t\t\n\tview = gr_view()\t\n\t\n\tview.ctx1.draw_line(2,1, 30,15)\n\tview.ctx1.add_rect(4,2, 52,26)\n\tview.ctx1.draw_text('org',4,2)\n\tview.ctx1.draw_text('mid',30,15)\n\tview.ctx1.draw_text('top',30,28)\n\tview.ctx2.add_rect(4,2, 52,26)\n\n\tview.present('sheet')\n\t#view.ctx1.draw_path()\n","sub_path":"uiView_context.py","file_name":"uiView_context.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"479647875","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\n\"\"\"\n\n[env]\n# Conda Environment\nconda create --name nft_insights python=3.9.7\nconda info --envs\nsource activate nft_insights\nconda deactivate\n# if needed to remove\nconda env remove -n [NAME_OF_THE_CONDA_ENVIRONMENT]\n\n[path]\ncd /Users/brunoflaven/Documents/03_git/BlogArticlesExamples/nft_insights/001_NFT_training/\n\n\n[file]\npython nft_create_1.py\n\n# Define TOTAL_IMAGES before\n\n[source]\nhttps://betterprogramming.pub/create-your-own-nft-collection-with-python-82af40abf99f\nhttps://github.com/sonmh79/NFT_training/blob/master/nft_create.py\n\n[required]\n# install\npip install numpy\npip install pillow\npip install python-dotenv\n\n# if you want to extend you script to a streamlit app\npip install streamlit\npip install watchdog\n\n# show what the requirements\npip freeze > nft_image_generator_requirements_1.txt\npip install -r nft_image_generator_requirements_1.txt\n\n# remove the ditrectories before using the script\nrm -R images\nrm -R metadata\n\n\"\"\"\nfrom PIL import Image\nfrom IPython.display import display\nimport random\nimport json\nimport os\n\nclass NFT:\n\n # Each image is made up a series of traits\n # The weightings for each trait drive the rarity and add up to 100%\n\n face = [\"White\", \"Black\"]\n face_weights = [65, 35]\n\n ears = [\"ears1\", \"ears2\", \"ears3\", \"ears4\"]\n ears_weights = [50, 24, 24, 2]\n\n eyes = [\"regular\", \"small\", \"rayban\", \"hipster\", \"focused\"]\n eyes_weights = [70, 10, 5, 1, 14]\n\n hair = ['hair1', 'hair10', 'hair11', 'hair12', 'hair2', 'hair3', 'hair4',\n 'hair5',\n 'hair6',\n 'hair7',\n 'hair8',\n 'hair9']\n hair_weights = [10 , 10 , 10 , 10 ,10, 10, 10 ,10 ,10, 7 , 1 , 2]\n\n mouth = ['m1', 'm2', 'm3', 'm4', 'm5', 'm6']\n mouth_weights = [10, 10, 50, 10, 15, 5]\n\n nose = ['n1', 'n2']\n nose_weights = [90, 10]\n\n access = [\"acc1\",\"acc2\",\"acc3\"]\n access_weights = [5, 5, 90]\n\n beard = [\"b1\",\"b2\",\"b3\",\"b4\",\"b5\",\"b6\",\"b7\",\"b8\"]\n beard_weights = [1, 2, 3, 4, 5, 6, 7, 72]\n\n face_files = {\n \"White\": \"face1\",\n \"Black\": \"face2\"\n }\n\n ears_files = {\n \"ears1\": \"ears1\",\n \"ears2\": \"ears2\",\n \"ears3\": \"ears3\",\n \"ears4\": \"ears4\"\n }\n\n eyes_files = {\n \"regular\": \"eyes1\",\n \"small\": \"eyes2\",\n \"rayban\": \"eyes3\",\n \"hipster\": \"eyes4\",\n \"focused\": \"eyes5\"\n }\n\n hair_files = {\n \"hair1\": \"hair1\",\n \"hair2\": \"hair2\",\n \"hair3\": \"hair3\",\n \"hair4\": \"hair4\",\n \"hair5\": \"hair5\",\n \"hair6\": \"hair6\",\n \"hair7\": \"hair7\",\n \"hair8\": \"hair8\",\n \"hair9\": \"hair9\",\n \"hair10\": \"hair10\",\n \"hair11\": \"hair11\",\n \"hair12\": \"hair12\"\n }\n\n mouth_files = {\n \"m1\": \"m1\",\n \"m2\": \"m2\",\n \"m3\": \"m3\",\n \"m4\": \"m4\",\n \"m5\": \"m5\",\n \"m6\": \"m6\"\n }\n\n nose_files = {\n \"n1\": \"n1\",\n \"n2\": \"n2\"\n }\n\n access_files = {\n \"acc1\": \"acc1\",\n \"acc2\": \"acc2\",\n \"acc3\": \"acc3\"\n }\n\n beard_files = {\n \"b1\": \"beard1\",\n \"b2\": \"beard2\",\n \"b3\": \"beard3\",\n \"b4\": \"beard4\",\n \"b5\": \"beard5\",\n \"b6\": \"beard6\",\n \"b7\": \"beard7\",\n \"b8\": \"beard8\"\n }\n ## Generate Traits\n\n TOTAL_IMAGES = 100 # Number of random unique images we want to generate\n\n all_images = []\n\n def __init__(self):\n\n face,ears,eyes,hair,mouth,nose,access,beard = self.face,self.ears,self.eyes,self.hair,self.mouth,self.nose,self.access,self.beard\n face_weights, ears_weights, eyes_weights, hair_weights, mouth_weights, nose_weights, access_weights, beard_weights = self.face_weights, self.ears_weights, self.eyes_weights, self.hair_weights, self.mouth_weights, self.nose_weights, self.access_weights, self.beard_weights\n all_images = self.all_images\n TOTAL_IMAGES = self.TOTAL_IMAGES\n\n # A recursive function to generate unique image combinations\n def _create_new_image():\n new_image = {} #\n\n # For each trait category, select a random trait based on the weightings\n new_image[\"Face\"] = random.choices(face, face_weights)[0]\n new_image[\"Ears\"] = random.choices(ears, ears_weights)[0]\n new_image[\"Eyes\"] = random.choices(eyes, eyes_weights)[0]\n new_image[\"Hair\"] = random.choices(hair, hair_weights)[0]\n new_image[\"Mouth\"] = random.choices(mouth, mouth_weights)[0]\n new_image[\"Nose\"] = random.choices(nose, nose_weights)[0]\n new_image[\"Access\"] = random.choices(access, access_weights)[0]\n new_image[\"Beard\"] = random.choices(beard, beard_weights)[0]\n\n if new_image in all_images:\n return _create_new_image()\n else:\n return new_image\n\n\n # Generate the unique combinations based on trait weightings\n for i in range(TOTAL_IMAGES):\n new_trait_image = _create_new_image()\n\n all_images.append(new_trait_image)\n\n\n def _all_images_unique(all_images):\n seen = list()\n return not any(i in seen or seen.append(i) for i in all_images)\n\n\n print(\"Are all images unique?\", _all_images_unique(all_images))\n # Add token Id to each image\n i = 0\n for item in all_images:\n item[\"tokenId\"] = i\n i = i + 1\n\n print(all_images)\n\n def trait_counts(self):\n\n face, ears, eyes, hair, mouth, nose, access, beard = self.face, self.ears, self.eyes, self.hair, self.mouth, self.nose, self.access, self.beard\n all_images = self.all_images\n\n # Get Trait Counts\n\n face_count = {}\n for item in face:\n face_count[item] = 0\n\n ears_count = {}\n for item in ears:\n ears_count[item] = 0\n\n eyes_count = {}\n for item in eyes:\n eyes_count[item] = 0\n\n hair_count = {}\n for item in hair:\n hair_count[item] = 0\n\n mouth_count = {}\n for item in mouth:\n mouth_count[item] = 0\n\n nose_count = {}\n for item in nose:\n nose_count[item] = 0\n\n access_count = {}\n for item in access:\n access_count[item] = 0\n\n beard_count = {}\n for item in beard:\n beard_count[item] = 0\n\n for image in all_images:\n face_count[image[\"Face\"]] += 1\n ears_count[image[\"Ears\"]] += 1\n eyes_count[image[\"Eyes\"]] += 1\n hair_count[image[\"Hair\"]] += 1\n mouth_count[image[\"Mouth\"]] += 1\n nose_count[image[\"Nose\"]] += 1\n access_count[image[\"Access\"]] += 1\n beard_count[image[\"Beard\"]] += 1\n\n print(face_count)\n print(ears_count)\n print(eyes_count)\n print(hair_count)\n print(mouth_count)\n print(nose_count)\n print(access_count)\n print(beard_count)\n\n def create_images(self):\n\n all_images = self.all_images\n face_files, ears_files, eyes_files, hair_files, mouth_files, nose_files, access_files, beard_files = self.face_files, self.ears_files, self.eyes_files, self.hair_files, self.mouth_files, self.nose_files, self.access_files, self.beard_files\n\n #### Generate Images\n\n os.mkdir(f'./images')\n\n for item in all_images:\n im1 = Image.open(f'./scripts/face_parts/face/{face_files[item[\"Face\"]]}.png').convert('RGBA')\n im2 = Image.open(f'./scripts/face_parts/eyes/{eyes_files[item[\"Eyes\"]]}.png').convert('RGBA')\n im3 = Image.open(f'./scripts/face_parts/ears/{ears_files[item[\"Ears\"]]}.png').convert('RGBA')\n im4 = Image.open(f'./scripts/face_parts/hair/{hair_files[item[\"Hair\"]]}.png').convert('RGBA')\n im5 = Image.open(f'./scripts/face_parts/mouth/{mouth_files[item[\"Mouth\"]]}.png').convert('RGBA')\n im6 = Image.open(f'./scripts/face_parts/nose/{nose_files[item[\"Nose\"]]}.png').convert('RGBA')\n im7 = Image.open(f'./scripts/face_parts/access/{access_files[item[\"Access\"]]}.png').convert('RGBA')\n im8 = Image.open(f'./scripts/face_parts/beard/{beard_files[item[\"Beard\"]]}.png').convert('RGBA')\n\n # Create each composite\n com1 = Image.alpha_composite(im1, im2)\n com2 = Image.alpha_composite(com1, im3)\n com3 = Image.alpha_composite(com2, im4)\n com4 = Image.alpha_composite(com3, im5)\n com5 = Image.alpha_composite(com4, im6)\n com6 = Image.alpha_composite(com5, im7)\n com7 = Image.alpha_composite(com6, im8)\n\n # Convert to RGB\n rgb_im = com7.convert('RGB')\n file_name = str(item[\"tokenId\"]) + \".png\"\n rgb_im.save(\"./images/\" + file_name)\n\n def create_metadata(self):\n\n \"\"\" 각 이미지 별 Json 파일 생성 \"\"\"\n\n #### Generate Metadata for all Traits\n os.mkdir(f'./metadata')\n\n METADATA_FILE_NAME = './metadata/all-traits.json';\n with open(METADATA_FILE_NAME, 'w') as outfile:\n json.dump(self.all_images, outfile, indent=4)\n\n #### Generate Metadata for each Image\n\n f = open('./metadata/all-traits.json', )\n data = json.load(f)\n\n # Changes this IMAGES_BASE_URL to yours\n IMAGES_BASE_URL = \"https://gateway.pinata.cloud/ipfs/QmcKH9bwdM3KGij799reUYA24jtufvHSoYmY5xMXEM7T4E/\"\n PROJECT_NAME = \"NFT_CREATOR\"\n\n def getAttribute(key, value):\n return {\n \"trait_type\": key,\n \"value\": value\n }\n\n for i in data:\n token_id = i['tokenId']\n token = {\n \"image\": IMAGES_BASE_URL + str(token_id) + '.png',\n \"tokenId\": token_id,\n \"name\": PROJECT_NAME + ' ' + str(token_id),\n \"attributes\": []\n }\n token[\"attributes\"].append(getAttribute(\"Face\", i[\"Face\"]))\n token[\"attributes\"].append(getAttribute(\"Ears\", i[\"Ears\"]))\n token[\"attributes\"].append(getAttribute(\"Eyes\", i[\"Eyes\"]))\n token[\"attributes\"].append(getAttribute(\"Hair\", i[\"Hair\"]))\n token[\"attributes\"].append(getAttribute(\"Mouth\", i[\"Mouth\"]))\n token[\"attributes\"].append(getAttribute(\"Nose\", i[\"Nose\"]))\n token[\"attributes\"].append(getAttribute(\"Access\", i[\"Access\"]))\n token[\"attributes\"].append(getAttribute(\"Beard\", i[\"Beard\"]))\n\n with open('./metadata/' + str(token_id) + \".json\", 'w') as outfile:\n json.dump(token, outfile, indent=4)\n f.close()\n\n \n\nif __name__ == \"__main__\":\n nft = NFT()\n nft.create_images()\n nft.create_metadata()\n\n\n\n","sub_path":"nft_insights/001_NFT_training/nft_create_1.py","file_name":"nft_create_1.py","file_ext":"py","file_size_in_byte":10734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"336602861","text":"# coding: utf-8\n\n\"\"\"\n 3Di API\n\n 3Di simulation API (latest version: 3.0) Framework release: 1.0.16 3Di core release: 2.0.11 deployed on: 07:33AM (UTC) on September 04, 2020 # noqa: E501\n\n The version of the OpenAPI document: 3.0\n Contact: info@nelen-schuurmans.nl\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom openapi_client.configuration import Configuration\n\n\nclass TimedStructureControl(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n \"id\": \"int\",\n \"url\": \"str\",\n \"simulation\": \"str\",\n \"offset\": \"int\",\n \"duration\": \"int\",\n \"value\": \"list[float]\",\n \"type\": \"str\",\n \"structure_id\": \"int\",\n \"structure_type\": \"str\",\n \"state\": \"str\",\n \"state_detail\": \"object\",\n \"grid_id\": \"int\",\n \"uid\": \"str\",\n }\n\n attribute_map = {\n \"id\": \"id\",\n \"url\": \"url\",\n \"simulation\": \"simulation\",\n \"offset\": \"offset\",\n \"duration\": \"duration\",\n \"value\": \"value\",\n \"type\": \"type\",\n \"structure_id\": \"structure_id\",\n \"structure_type\": \"structure_type\",\n \"state\": \"state\",\n \"state_detail\": \"state_detail\",\n \"grid_id\": \"grid_id\",\n \"uid\": \"uid\",\n }\n\n def __init__(\n self,\n id=None,\n url=None,\n simulation=None,\n offset=None,\n duration=None,\n value=None,\n type=None,\n structure_id=None,\n structure_type=None,\n state=None,\n state_detail=None,\n grid_id=None,\n uid=None,\n local_vars_configuration=None,\n ): # noqa: E501\n \"\"\"TimedStructureControl - a model defined in OpenAPI\"\"\" # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._id = None\n self._url = None\n self._simulation = None\n self._offset = None\n self._duration = None\n self._value = None\n self._type = None\n self._structure_id = None\n self._structure_type = None\n self._state = None\n self._state_detail = None\n self._grid_id = None\n self._uid = None\n self.discriminator = None\n\n if id is not None:\n self.id = id\n if url is not None:\n self.url = url\n if simulation is not None:\n self.simulation = simulation\n self.offset = offset\n self.duration = duration\n self.value = value\n self.type = type\n self.structure_id = structure_id\n self.structure_type = structure_type\n if state is not None:\n self.state = state\n if state_detail is not None:\n self.state_detail = state_detail\n self.grid_id = grid_id\n if uid is not None:\n self.uid = uid\n\n @property\n def id(self):\n \"\"\"Gets the id of this TimedStructureControl. # noqa: E501\n\n\n :return: The id of this TimedStructureControl. # noqa: E501\n :rtype: int\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"Sets the id of this TimedStructureControl.\n\n\n :param id: The id of this TimedStructureControl. # noqa: E501\n :type: int\n \"\"\"\n\n self._id = id\n\n @property\n def url(self):\n \"\"\"Gets the url of this TimedStructureControl. # noqa: E501\n\n\n :return: The url of this TimedStructureControl. # noqa: E501\n :rtype: str\n \"\"\"\n return self._url\n\n @url.setter\n def url(self, url):\n \"\"\"Sets the url of this TimedStructureControl.\n\n\n :param url: The url of this TimedStructureControl. # noqa: E501\n :type: str\n \"\"\"\n\n self._url = url\n\n @property\n def simulation(self):\n \"\"\"Gets the simulation of this TimedStructureControl. # noqa: E501\n\n\n :return: The simulation of this TimedStructureControl. # noqa: E501\n :rtype: str\n \"\"\"\n return self._simulation\n\n @simulation.setter\n def simulation(self, simulation):\n \"\"\"Sets the simulation of this TimedStructureControl.\n\n\n :param simulation: The simulation of this TimedStructureControl. # noqa: E501\n :type: str\n \"\"\"\n\n self._simulation = simulation\n\n @property\n def offset(self):\n \"\"\"Gets the offset of this TimedStructureControl. # noqa: E501\n\n offset of event in simulation in seconds # noqa: E501\n\n :return: The offset of this TimedStructureControl. # noqa: E501\n :rtype: int\n \"\"\"\n return self._offset\n\n @offset.setter\n def offset(self, offset):\n \"\"\"Sets the offset of this TimedStructureControl.\n\n offset of event in simulation in seconds # noqa: E501\n\n :param offset: The offset of this TimedStructureControl. # noqa: E501\n :type: int\n \"\"\"\n if (\n self.local_vars_configuration.client_side_validation and offset is None\n ): # noqa: E501\n raise ValueError(\n \"Invalid value for `offset`, must not be `None`\"\n ) # noqa: E501\n if (\n self.local_vars_configuration.client_side_validation\n and offset is not None\n and offset > 2147483647\n ): # noqa: E501\n raise ValueError(\n \"Invalid value for `offset`, must be a value less than or equal to `2147483647`\"\n ) # noqa: E501\n if (\n self.local_vars_configuration.client_side_validation\n and offset is not None\n and offset < 0\n ): # noqa: E501\n raise ValueError(\n \"Invalid value for `offset`, must be a value greater than or equal to `0`\"\n ) # noqa: E501\n\n self._offset = offset\n\n @property\n def duration(self):\n \"\"\"Gets the duration of this TimedStructureControl. # noqa: E501\n\n\n :return: The duration of this TimedStructureControl. # noqa: E501\n :rtype: int\n \"\"\"\n return self._duration\n\n @duration.setter\n def duration(self, duration):\n \"\"\"Sets the duration of this TimedStructureControl.\n\n\n :param duration: The duration of this TimedStructureControl. # noqa: E501\n :type: int\n \"\"\"\n if (\n self.local_vars_configuration.client_side_validation and duration is None\n ): # noqa: E501\n raise ValueError(\n \"Invalid value for `duration`, must not be `None`\"\n ) # noqa: E501\n if (\n self.local_vars_configuration.client_side_validation\n and duration is not None\n and duration < 1\n ): # noqa: E501\n raise ValueError(\n \"Invalid value for `duration`, must be a value greater than or equal to `1`\"\n ) # noqa: E501\n\n self._duration = duration\n\n @property\n def value(self):\n \"\"\"Gets the value of this TimedStructureControl. # noqa: E501\n\n\n :return: The value of this TimedStructureControl. # noqa: E501\n :rtype: list[float]\n \"\"\"\n return self._value\n\n @value.setter\n def value(self, value):\n \"\"\"Sets the value of this TimedStructureControl.\n\n\n :param value: The value of this TimedStructureControl. # noqa: E501\n :type: list[float]\n \"\"\"\n if (\n self.local_vars_configuration.client_side_validation and value is None\n ): # noqa: E501\n raise ValueError(\n \"Invalid value for `value`, must not be `None`\"\n ) # noqa: E501\n\n self._value = value\n\n @property\n def type(self):\n \"\"\"Gets the type of this TimedStructureControl. # noqa: E501\n\n\n :return: The type of this TimedStructureControl. # noqa: E501\n :rtype: str\n \"\"\"\n return self._type\n\n @type.setter\n def type(self, type):\n \"\"\"Sets the type of this TimedStructureControl.\n\n\n :param type: The type of this TimedStructureControl. # noqa: E501\n :type: str\n \"\"\"\n if (\n self.local_vars_configuration.client_side_validation and type is None\n ): # noqa: E501\n raise ValueError(\n \"Invalid value for `type`, must not be `None`\"\n ) # noqa: E501\n allowed_values = [\n \"set_discharge_coefficients\",\n \"set_crest_level\",\n \"set_pump_capacity\",\n ] # noqa: E501\n if (\n self.local_vars_configuration.client_side_validation\n and type not in allowed_values\n ): # noqa: E501\n raise ValueError(\n \"Invalid value for `type` ({0}), must be one of {1}\".format( # noqa: E501\n type, allowed_values\n )\n )\n\n self._type = type\n\n @property\n def structure_id(self):\n \"\"\"Gets the structure_id of this TimedStructureControl. # noqa: E501\n\n\n :return: The structure_id of this TimedStructureControl. # noqa: E501\n :rtype: int\n \"\"\"\n return self._structure_id\n\n @structure_id.setter\n def structure_id(self, structure_id):\n \"\"\"Sets the structure_id of this TimedStructureControl.\n\n\n :param structure_id: The structure_id of this TimedStructureControl. # noqa: E501\n :type: int\n \"\"\"\n if (\n self.local_vars_configuration.client_side_validation\n and structure_id is not None\n and structure_id > 2147483647\n ): # noqa: E501\n raise ValueError(\n \"Invalid value for `structure_id`, must be a value less than or equal to `2147483647`\"\n ) # noqa: E501\n if (\n self.local_vars_configuration.client_side_validation\n and structure_id is not None\n and structure_id < -2147483648\n ): # noqa: E501\n raise ValueError(\n \"Invalid value for `structure_id`, must be a value greater than or equal to `-2147483648`\"\n ) # noqa: E501\n\n self._structure_id = structure_id\n\n @property\n def structure_type(self):\n \"\"\"Gets the structure_type of this TimedStructureControl. # noqa: E501\n\n\n :return: The structure_type of this TimedStructureControl. # noqa: E501\n :rtype: str\n \"\"\"\n return self._structure_type\n\n @structure_type.setter\n def structure_type(self, structure_type):\n \"\"\"Sets the structure_type of this TimedStructureControl.\n\n\n :param structure_type: The structure_type of this TimedStructureControl. # noqa: E501\n :type: str\n \"\"\"\n if (\n self.local_vars_configuration.client_side_validation\n and structure_type is None\n ): # noqa: E501\n raise ValueError(\n \"Invalid value for `structure_type`, must not be `None`\"\n ) # noqa: E501\n allowed_values = [\n \"v2_pumpstation\",\n \"v2_pipe\",\n \"v2_orifice\",\n \"v2_culvert\",\n \"v2_weir\",\n \"v2_channel\",\n ] # noqa: E501\n if (\n self.local_vars_configuration.client_side_validation\n and structure_type not in allowed_values\n ): # noqa: E501\n raise ValueError(\n \"Invalid value for `structure_type` ({0}), must be one of {1}\".format( # noqa: E501\n structure_type, allowed_values\n )\n )\n\n self._structure_type = structure_type\n\n @property\n def state(self):\n \"\"\"Gets the state of this TimedStructureControl. # noqa: E501\n\n\n :return: The state of this TimedStructureControl. # noqa: E501\n :rtype: str\n \"\"\"\n return self._state\n\n @state.setter\n def state(self, state):\n \"\"\"Sets the state of this TimedStructureControl.\n\n\n :param state: The state of this TimedStructureControl. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"processing\", \"valid\", \"invalid\"] # noqa: E501\n if (\n self.local_vars_configuration.client_side_validation\n and state not in allowed_values\n ): # noqa: E501\n raise ValueError(\n \"Invalid value for `state` ({0}), must be one of {1}\".format( # noqa: E501\n state, allowed_values\n )\n )\n\n self._state = state\n\n @property\n def state_detail(self):\n \"\"\"Gets the state_detail of this TimedStructureControl. # noqa: E501\n\n\n :return: The state_detail of this TimedStructureControl. # noqa: E501\n :rtype: object\n \"\"\"\n return self._state_detail\n\n @state_detail.setter\n def state_detail(self, state_detail):\n \"\"\"Sets the state_detail of this TimedStructureControl.\n\n\n :param state_detail: The state_detail of this TimedStructureControl. # noqa: E501\n :type: object\n \"\"\"\n\n self._state_detail = state_detail\n\n @property\n def grid_id(self):\n \"\"\"Gets the grid_id of this TimedStructureControl. # noqa: E501\n\n\n :return: The grid_id of this TimedStructureControl. # noqa: E501\n :rtype: int\n \"\"\"\n return self._grid_id\n\n @grid_id.setter\n def grid_id(self, grid_id):\n \"\"\"Sets the grid_id of this TimedStructureControl.\n\n\n :param grid_id: The grid_id of this TimedStructureControl. # noqa: E501\n :type: int\n \"\"\"\n if (\n self.local_vars_configuration.client_side_validation\n and grid_id is not None\n and grid_id > 2147483647\n ): # noqa: E501\n raise ValueError(\n \"Invalid value for `grid_id`, must be a value less than or equal to `2147483647`\"\n ) # noqa: E501\n if (\n self.local_vars_configuration.client_side_validation\n and grid_id is not None\n and grid_id < -2147483648\n ): # noqa: E501\n raise ValueError(\n \"Invalid value for `grid_id`, must be a value greater than or equal to `-2147483648`\"\n ) # noqa: E501\n\n self._grid_id = grid_id\n\n @property\n def uid(self):\n \"\"\"Gets the uid of this TimedStructureControl. # noqa: E501\n\n\n :return: The uid of this TimedStructureControl. # noqa: E501\n :rtype: str\n \"\"\"\n return self._uid\n\n @uid.setter\n def uid(self, uid):\n \"\"\"Sets the uid of this TimedStructureControl.\n\n\n :param uid: The uid of this TimedStructureControl. # noqa: E501\n :type: str\n \"\"\"\n\n self._uid = uid\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(\n map(lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x, value)\n )\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(\n map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\")\n else item,\n value.items(),\n )\n )\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, TimedStructureControl):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, TimedStructureControl):\n return True\n\n return self.to_dict() != other.to_dict()\n","sub_path":"openapi_client/models/timed_structure_control.py","file_name":"timed_structure_control.py","file_ext":"py","file_size_in_byte":16698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"603081910","text":"\n\nimport sqlite3\nimport time\n\n\ndtb='data_files/simPop.sqlite'\nconn=sqlite3.connect(dtb)\nc=conn.cursor()\n\nc.execute( '''\n select zip_cd,\n round(avg(dist),2),\n round(avg(tme),2)\n from inZipCombs3\n group by zip_cd\n '''\n )\n\nrecs=c.fetchall()\n\nfor rec in recs:\n c.execute('''INSERT INTO distTme(zip_cd1,zip_cd2,dist,tme)\n VALUES(?,?,?,?)''', (rec[0],rec[0],rec[1],rec[2])) \n\n\nconn.commit()\nconn.close()\n","sub_path":"medSched/mbr_data/inZip4.py","file_name":"inZip4.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"549766741","text":"import unittest\nfrom worlds import *\n\nSimpleConfig = \"\"\"\nQ direct_value 10\n\nA make_value\n\tnow 1h\n\tdirect_value 100\n\"\"\"\n\nTreeConfig = \"\"\"\nQ direct_value 1\n\nQ A 1\n\tdirect_value 1\n\nQ B 1\n\tdirect_value 1\n\nQ AA 1\n\tA 1\n\nQ AB 1\n\tA 1\n\tB 1\n\nQ BB 1\n\tB 1\n\n\"\"\"\n\nMetaAnnuityConfig = \"\"\"\nQ direct_value 1\n\nQ dv_annuity 1\n\t@direct_value 2 r1\n\nQ dv_meta_annuity 1\n\t@dv_annuity 2 r1\n\nQ dv_meta_meta_annuity 1\n\t@dv_meta_annuity 2 r1\n\nA make_dv_mm_annuity\n\tnow 1h\n\tdv_meta_meta_annuity 1\n\"\"\"\n\nclass TestQuantityValuation(unittest.TestCase):\n\tdef test_simple(self):\n\t\tself.assertEqual(ParseConfig(SimpleConfig).GetQuantityValue(), 10.)\n\tdef test_meta_annuities(self):\n\t\tC = ParseConfig(MetaAnnuityConfig)\n\t\tshouldbe = 2*2*2+2*2+2+1\n\t\tself.assertTrue((shouldbe-.1) < C.GetQuantityValue() < (shouldbe+0.01))\n\tdef test_tree_propagation(self):\n\t\tC = ParseConfig(TreeConfig)\n\t\tshouldbe = 1 + 1*(1+(1*1)+(1*1)) + 1*(1+(1*1)+(1*1))\n\t\tself.assertTrue((shouldbe-.1) < C.GetQuantityValue() < (shouldbe+0.01))\n\nclass TestAgosValuation(unittest.TestCase):\n\tdef test_simple(self):\n\t\tC = ParseConfig(SimpleConfig)\n\t\tself.assertTrue(abs(\n\t\t\tC.AgosIndirectValue(\"make_value\")[0] - 100./3600.)\n\t\t\t< 0.01 )\n\tdef test_meta_annuity_creation(self):\n\t\tC = ParseConfig(MetaAnnuityConfig)\n\t\tself.assertTrue(abs(\n\t\t\tC.AgosIndirectValue(\"make_dv_mm_annuity\")[0] - (2*2*2)/3600.)\n\t\t\t< 0.1 )\n\nif __name__ == \"__main__\":\n\tunittest.main()\n","sub_path":"worlds_test.py","file_name":"worlds_test.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"530257262","text":"class Quick_UnionFind:\r\n def __init__(self, n):\r\n self._id = list(range(n))\r\n self._sz = [1] * n\r\n self.cc = n # connected components\r\n\r\n def _root(self, i):\r\n while (i != self._id[i]):\r\n self._id[i] = self._id[self._id[i]]\r\n i = self._id[i]\r\n return i\r\n\r\n def find(self, p, q):\r\n return self._root(p) == self._root(q)\r\n\r\n def union(self, p, q):\r\n i = self._root(p)\r\n j = self._root(q)\r\n if i == j:\r\n return\r\n if (self._sz[i] < self._sz[j]):\r\n self._id[i] = j\r\n self._sz[j] += self._sz[i]\r\n else:\r\n self._id[j] = i\r\n self._sz[i] += self._sz[j]\r\n self.cc -= 1\r\n \r\nimport numpy as np\r\nimport random\r\n\r\nclass Percolation:\r\n def __init__(self, n):\r\n self.n = n\r\n self._grid = np.zeros((n, n)).astype(int)\r\n self.elements = Quick_UnionFind(n**2+2)\r\n \r\n def open_site(self,row,column):\r\n self._grid[row][column] = 1\r\n return self.elements._id[row*self.n+column]\r\n \r\n def is_open(self, row, column):\r\n return self._grid[row][column] == 1\r\n \r\n def num_of_opensite(self):\r\n m = 0\r\n for i in range(self.n):\r\n for j in range(self.n):\r\n if self.is_open(i, j):\r\n m += 1\r\n return m/self.n**2\r\n \r\n def percolates(self,n):\r\n self.top = -1\r\n self.bottom = n**2\r\n #uf = Quick_UnionFind(n**2-1)\r\n last_row = n * (n-1)\r\n for k in range(0, n):\r\n self.elements.union(last_row + k, self.bottom)\r\n self.elements.union(k, self.top)\r\n \r\n if self.elements.find(self.top, self.bottom):\r\n return True\r\n \r\nclass PercolationStats:\r\n def __init__(self, n, num_trails):\r\n self.n = n\r\n self.num_trails = num_trails\r\n \r\n def open_num(self, n):\r\n \r\n test = Percolation(self.n)\r\n while not test.percolates(n):\r\n site = random.randint(0, self.n**2-1)\r\n row = int(site//n)\r\n column = int(site%n)\r\n \r\n open_one = test.open_site(row, column)\r\n \r\n for row_next, column_next in [(int(row+1), int(column)), (int(row-1), int(column)), (int(row), int(column+1)), (int(row), int(column-1))]:\r\n if 0 <= row_next < n and 0 <= column_next < n and test.is_open(row_next, column_next):\r\n test.elements.union(open_one, test.open_site(row_next, column_next))\r\n \r\n \r\n return test.num_of_opensite()\r\n \r\n def trails(self, n, num_trails):\r\n i = 1\r\n trails = []\r\n \r\n while i <= num_trails:\r\n trails.append(self.open_num(n))\r\n i += 1\r\n \r\n return trails\r\n \r\n def trails_mean(self, n, num_trails):\r\n return np.mean(self.trails(n, num_trails))\r\n \r\n def trails_std(self, n, num_trails):\r\n return np.std(self.trails(n, num_trails))\r\n \r\n def trails_confidenceLo(self, n, num_trails):\r\n return self.trails_mean(n, num_trails) - ((1.96 * self.trails_std(n, num_trails)) / np.sqrt(num_trails))\r\n \r\n def trails_confidenceHi(self, n, num_trails):\r\n return self.trails_mean(n, num_trails) + ((1.96 * self.trails_std(n, num_trails)) / np.sqrt(num_trails))\r\n \r\n \r\nres = PercolationStats(50, 50)\r\nres.trails(50, 50)\r\nres.trails_mean(50, 50)","sub_path":"Assignment 1 Percolation/Percolation.py","file_name":"Percolation.py","file_ext":"py","file_size_in_byte":3531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"238096322","text":"# -*- coding: utf-8 -*-\nimport math\nlista=[]\nn=int(input('digite o numero:'))\nfor i in range(0,n,1):\n numero=int(input('digite um nemuro:'))\n lista.append(numero)\n \nsoma=0\nfor i in range(0,len(lista),1):\n soma=soma+lista[i]\n media=soma/len(lista)\n \nsoma1=0\nfor i in range(0,len(lista),1):\n soma1=soma1+(lista[i]-media(lista))**2\ndesvio=(soma1/(lenlista)-1)**0.5\nprint('%2.f' %lista[0])\nprint('%2.f' %lista[n-1])\nprint('%2.f' %media(lista))\nprint('%2.f' %desvio(lista))\n \n \n","sub_path":"moodledata/vpl_data/95/usersdata/189/57633/submittedfiles/desvpad.py","file_name":"desvpad.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"17948136","text":"def validate_pin(pin):\n if len(pin) > 0 and pin.isnumeric():\n if len(pin) == 4 or len(pin) == 6:\n return True\n else:\n return False\n\n\n\n\n\n\nx = validate_pin(\"1\")\nprint(x)","sub_path":"CodeWars/Regex validate PIN code.py","file_name":"Regex validate PIN code.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"28795281","text":"from contextlib import contextmanager\n\n\n@contextmanager\ndef managed(sessionClass, auto_flush=False):\n session = sessionClass()\n session.autoflush = auto_flush\n session.autocommit = False\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n\n\n@contextmanager\ndef commit_on_success(session):\n try:\n yield session\n session.commit()\n except:\n raise\n","sub_path":"alchemytools/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"220350589","text":"\"\"\"Module prime number checker.\n\nThis module is use for checking the input, supplies functions:\nisPrimeNumber(), printDivisors()\n\n\"\"\"\n\n\n# Author: Barbana Klimekova \n# Lucia Kubaskova \n# Tomas_Prikasky \n#\n# Description: Create a program to evaluate whether the entered number\n# is a prime number.\n# Prime numbers have exactly two divisors, are divisible\n# by only 1 and by themselves.\n\n\ndef isPrimeNumber(number):\n \"\"\"Count number of dividers.\n\n Parameters:\n - dividers (int[]): List of integers - dividers\n - counter (int): number of dividers\n Returns:\n - output (int): number of dividors (counter) and\n printed list of dividors\n \"\"\"\n # logic of prime number evaluation\n isPrimeNumber = True\n for divisor in range(2, number):\n if number % divisor == 0:\n isPrimeNumber = False\n break\n return isPrimeNumber\n\n\ndef getDivisors(number):\n \"\"\"Count number of dividers and print them.\n\n Parameters:\n - dividers (int[]): List of integers - dividers\n - counter (int): number of dividers\n Returns:\n - output (int): number of dividors (counter) and\n printed list of dividors\n \"\"\"\n # inicialize and define number of divisers as 0 at the beginning\n\n divisors = []\n for divisor in range(1, number + 1):\n if number % divisor == 0:\n divisors.append(divisor)\n return divisors\n\n\ndef main():\n \"\"\"Execute all functions.\n\n Parameters:\n - number - input from console\n Returns:\n - output (int): print statements into console\n \"\"\"\n try:\n number = int(input(\"Vlož číslo: \"))\n if number < 0:\n print(\"Nebylo zadáno kladne číslo\")\n return 1\n if isPrimeNumber(number):\n print(f\"Číslo {number} je prvočíslo\")\n else:\n print(f\"Číslo {number} nie je prvoč��slo\")\n divisors = getDivisors(number)\n print(\"Delitele: \", end=\"\")\n print(*divisors, sep=\", \")\n print('Počet deliteľov:', len(divisors))\n except Exception:\n print(\"Nebylo zadáno číslo\")\n\n\nif __name__ == \"__main__\":\n main()\n\n# print documentation using pydoc.\n\n# print documentation for class\n# print(PrimeNumber.__doc__)\n# print documentation for function isPrimeNumber\n# print(isPrimeNumber.__doc__)\n# print documentation for function printDivisors\n# print(printDivisors.__doc__)\n","sub_path":"primeNumber.py","file_name":"primeNumber.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"315075455","text":"import time\nfrom report import report_sxw\nfrom osv import osv\n#import amount_to_text_id\n\nclass report_webkit_html(report_sxw.rml_parse):\n def __init__(self, cr, uid, name, context):\n super(report_webkit_html, self).__init__(cr, uid, name, context=context)\n self.localcontext.update({\n 'time': time,\n 'cr':cr,\n 'uid': uid,\n #'convert':self.convert,\n })\n \"\"\"def convert(self, amount, cur):\n amt_id = amount_to_text_id.amount_to_text(amount, 'id', cur)\n return amt_id\"\"\"\n \nreport_sxw.report_sxw('report.webkit.purchase.order',\n 'purchase.order', \n 'addons/ad_report_webkit_purchase/report/report_webkit_html.mako',\n parser=report_webkit_html)\n","sub_path":"ad_report_webkit_purchase/report/report_webkit_html.py","file_name":"report_webkit_html.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"192950919","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the saveThePrisoner function below.\n# n = number of prisoners\n# m = number of sweets\n# s = starting position\ndef saveThePrisoner(n, m, s):\n s2 = s - 1 # zero indexing makes more sense for modular arithmetic\n w = (s2 + (m - 1)) % n # first sweet at position s2, loops around at n\n return w + 1 # adjust to get rid of zero indexing\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n t = int(input())\n\n for t_itr in range(t):\n nms = input().split()\n\n n = int(nms[0])\n\n m = int(nms[1])\n\n s = int(nms[2])\n\n result = saveThePrisoner(n, m, s)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","sub_path":"Hackerrank/save-the-prisoner.py","file_name":"save-the-prisoner.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"601891688","text":"from tkinter import *\r\n\r\n\r\n\r\n# create the canvas, size in pixels\r\ncanvas = Canvas(width=300, height=200)\r\n\r\n# load the .gif image file\r\ngif1 = PhotoImage(file=r'C:\\Users\\drybo\\Desktop\\python\\AP Comp sci solitaire\\solitaire.png')\r\n# put gif image on canvas\r\n# pic's upper left corner (NW) on the canvas is at x=50 y=10\r\ncanvas.create_image(0, 0, image=gif1, anchor=NW)\r\n\r\n# pack the canvas into a frame/form\r\ncanvas.grid(row = 0, column = 0, rowspan = 1, sticky='nsew')\r\n\r\n# run it ...\r\nmainloop()\r\n","sub_path":"python practices and documantation/tkinter gui/canvases with images on them.py","file_name":"canvases with images on them.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"572026877","text":"import adv_test\nimport adv\nfrom adv import *\n\ndef module():\n return D_Cleo\n\nclass D_Cleo(adv.Adv):\n conf = {\n \"mod_a\": ('att' , 'passive', 0.13) ,\n \"mod_d\":[('att' , 'passive', 0.45) ,\n ('crit', 'chance' , 0.20)],\n 'condition':'hp70'\n }\n\n def init(this):\n this.stance = 0\n this.energy = 0\n this.hits = 0\n\n def dmg_proc(this, name, amount):\n if name == 'x1':\n this.hits += 1\n elif name == 'x2':\n this.hits += 2\n elif name == 'x3':\n this.hits += 2\n elif name == 'x4':\n this.hits += 1\n elif name == 'x5':\n this.hits += 1\n elif name == 'fs':\n this.hits += 3\n elif name == 's1':\n this.hits += 11\n elif name == 's2':\n this.hits += 5\n if this.hits >= 30:\n this.add_energy(1)\n this.hits -= 30\n\n def add_energy(this, count):\n this.energy += count\n log(\"buff\",\"energy\",this.energy)\n\n def s1_proc(this, e):\n if this.stance == 0:\n this.stance = 1\n elif this.stance == 1:\n this.stance = 2\n adv.Buff('s1s',0.1,10,'att').on()\n elif this.stance == 2:\n this.stance = 0\n adv.Buff('s1s',0.1,10,'att').on()\n adv.Buff('s1c',0.08,10,'crit','chance').on()\n\n if this.energy >= 5:\n this.energy = 0\n log(\"buff\",\"energy\",this.energy)\n this.dmg_make(\"o_s1_energy\",this.conf[\"s1_dmg\"]*0.4)\n else:\n this.add_energy(1)\n \n\n def s2_proc(this, e):\n if this.energy >= 5:\n this.energy = 0\n log(\"buff\",\"energy\",this.energy)\n this.dmg_make(\"o_s2_energy\",this.conf[\"s2_dmg\"]*0.4)\n\n\n\nif __name__ == '__main__':\n conf = {}\n conf['acl'] = \"\"\"\n `s1, seq=5 and cancel or fsc\n `s2, seq=5 and cancel or fsc\n `s3, seq=5 and cancel or fsc\n `fs, seq=5\n \"\"\"\n\n adv_test.test(module(), conf, verbose=0)\n","sub_path":"adv/d_cleo.py","file_name":"d_cleo.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"93602361","text":"from opencage.geocoder import OpenCageGeocode\nfrom darksky import forecast\nfrom discord.ext import commands\nimport discord\nfrom discord import embeds, Color\n\ndeg = u'\\u00b0'\n\nwith open('opencage_key.txt','r') as f: #add opencage token to opencage_key.txt\n cage_key = f.readline()\nwith open('darksky_key.txt','r') as f: #add opencage token to darksky_key.txt\n dark_key = f.readline()\n\n\n\nclass Weather:\n def __init__(self, client):\n self.client = client\n self.geocoder = OpenCageGeocode(key=cage_key)\n\n @commands.command(pass_context=True)\n async def weather(self, ctx, *, city):\n # Gives the current weather of the specified location\n if not city:\n raise commands.MissingRequiredArgument(\"Please add city.\")\n\n g = self.geocoder.geocode(city)\n #print(g)\n if not g:\n raise commands.BadArgument(\"City not found\")\n lat = g[0]['geometry']['lat']\n lng = g[0]['geometry']['lng']\n #radar_map = 'https://darksky.net/map-embed/@temperature,' + str(lat) + ',' + str(lng) + ',4.js?embed=true&timeControl=false&fieldControl=false&defaultField=temperature&defaultUnits=_f' dark sky does not have radar snapshots only embeds\n with forecast(dark_key, lat, lng) as myforecast:\n embed=discord.Embed(title=g[0]['formatted'], url='https://darksky.net/forecast/' + str(lat) +'/' + str(lng), description=myforecast.currently.summary + ' : ' + myforecast.daily.summary, color=0xfa14e9)#0x02adf7\n embed.set_thumbnail(url='https://darksky.net/images/darkskylogo.png')\n embed.add_field(name='Current Temperature:', value='{}F ({:.1f}C)'.format(myforecast.currently.temperature, (myforecast.currently.temperature - 32) * (5 / 9)), inline=True)\n embed.add_field(name='Feels Like:', value='{}F ({:.1f}C)'.format(myforecast.currently.apparentTemperature, (myforecast.currently.apparentTemperature - 32) * (5 / 9)), inline=True)\n embed.add_field(name='Chance of Precipitation:', value='{}%'.format(myforecast.currently.precipProbability * 100, inline=False))\n try:#try catch for precipType\n embed.add_field(name='Type of Precipitation:', value=myforecast.currently.precipType, inline=True)\n except:\n embed.add_field(name='Type of Precipitation:', value='N/A', inline=True)\n embed.add_field(name='Humidity:', value='{}%'.format(myforecast.currently.humidity * 100, inline=False))\n embed.add_field(name='Wind Speed:', value='{}mph'.format(myforecast.currently.windSpeed , inline=True))\n try:#try catch for alerts\n embed.add_field(name='Alerts:', value= myforecast.alerts[0].title , inline=False)\n embed.add_field(name='Alert URL:', value=myforecast.alerts[0].uri , inline=False)\n except:\n embed.add_field(name='Alerts:', value= 'No alerts currently' , inline=False)\n embed.set_footer(text='Powered by DarkSky: https://darksky.net/poweredby/')\n return await self.client.say(embed=embed)\n \ndef setup(client):\n client.add_cog(Weather(client))\n","sub_path":"cogs/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"345013994","text":"from crownstone_core.util.BufferReader import BufferReader\n\n\nclass UartCrownstoneStatusPacket:\n \"\"\"\n UART crownstone status packet:\n 1B flags\n \"\"\"\n\n def __init__(self, buffer: BufferReader or list or bytearray):\n if isinstance(buffer, BufferReader):\n reader = buffer\n else:\n reader = BufferReader(buffer)\n\n self.flags = reader.getUInt8()\n\n # Parse flags\n self.encryptionRequired = self.flags & (1 << 0) != 0\n self.hasBeenSetUp = self.flags & (1 << 1) != 0\n self.hubMode = self.flags & (1 << 2) != 0\n self.hasError = self.flags & (1 << 3) != 0\n","sub_path":"crownstone_uart/core/uart/uartPackets/UartCrownstoneStatusPacket.py","file_name":"UartCrownstoneStatusPacket.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"14920865","text":"import numpy as np\nimport time\nimport matplotlib.pyplot as plt\n\n\n# Ex1\n# Given two arrays A and B each of the same size calculate their sum (elementwise) and their product (elementwise).\n# A = np.arange(5)\n# B = np.arage(5, 10)\n\n\ndef sum_arrays(A, B):\n a = np.array(A)\n b = np.array(B)\n return a + b\n\n\ndef multiply_arrays(A, B):\n a = np.array(A)\n b = np.array(B)\n return a * b\n\n\n# Ex2\n# Given an array A with shape (128,) calculate the mean of the elements at even indexes.\nA = np.arange(128)\n\n\ndef mean_even_idx(A):\n a = A[::2]\n return np.average(a)\n\n\n# Given an array A with shape (N,) make an array with all elements of A in reverse order\n# and return as a matrix of size (N, 1).\n\n\ndef reverse(A):\n a = np.array(A)\n a = a[::-1]\n return a.reshape(len(A), 1)\n\n\n# STUDENT\ndef jthColumnOf(X, j):\n x = np.array(X)\n column = x[:, j]\n return column\n\n\nm = n = 5\nX = np.arange(m * n).reshape(m, n)\n\n\n# Given a data matrix X with shape (m,n) calculate the vector M of shape (n,),\n# where M[i] is the mean of the i-th column of X. Try doing this without using np.mean\n\ndef f(X):\n x = np.array(X)\n p = []\n # how to remove this for loop\n for i in range(len(X)):\n k = x[:, i]\n k = np.average(k)\n p.append(k)\n s = np.array(p)\n return s\n\n\nk = [[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]\n ]\n\n\n# Given column j, find the largest element and return the entire row of this element.\n# Hint: look at the function np.argmax for this.\ndef F(X, j):\n x = np.array(X)\n p = x[:, j]\n k = np.argmax(p)\n return x[k, :]\n\n\np = [[1, 2, 4, 5],\n [5, 6, 2, 1],\n [4, 3, 5, 2],\n [56, 61, 4, 2]]\n\n\n# Calculate the inner product of two vector v and w both of shape (N,).\n# Validate your result by computing the dot product using multiply and sum operations.\ndef innerProduct(v, w):\n k = 0\n for i in range(len(v)):\n k = k + v[i] + w[i]\n return k\n\n\np = [1, 2, 3]\nv = [4, 6, 1]\n\n\n# print(innerProduct(p, v))\n\n\n# Calculate the product of a matrix A of shape (M,N) with a vector v of shape (N,).\ndef matrixTimeVector(M, v):\n answer = []\n m = np.array(M)\n for i in range(len(M)):\n r = m[i, :]\n row = np.array(r)\n temp = []\n for j in range(len(row)):\n p = row[j] * v[j]\n temp.append(p)\n addToAnsw = sum(temp)\n answer.append(addToAnsw)\n return answer\n\n\ndef matrixTimeVectorOtherSolution(M, v):\n k = np.array(M)\n m = len(k)\n n = len(M[0])\n X = np.arange(m * n).reshape(m, n)\n v = np.arange(n)\n return (X @ v).reshape(m, 1)\n\n\na = [1, 2]\nb = [2, 1]\nM = [a, b]\nv = [3, 4]\nprint(matrixTimeVector(M, v))\nprint(matrixTimeVectorOtherSolution(M, v))\n","sub_path":"src/assignment1/arrays.py","file_name":"arrays.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"119146460","text":"import yaml\nimport argparse\nimport matlab.engine\nfrom pathlib import Path\nimport iqa_lpips\nimport iqa_fid\n\n\ndef _str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('BOOLEAN VALUE EXPECTED.')\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--opt', '-opt', type=str, default='opt.yml', help='path to option YAML file.')\nparser.add_argument('--case', '-case', type=str, default='rbqe_div2k_qf30', help='specified case in YML.')\nparser.add_argument('--mode', '-mode', type=str, default='a', help='add (a) or write (w).')\nparser.add_argument('--if_src', '-if_src', type=_str2bool, default=False,\n help='if evaluate src for NIQE-M, PI, NIQE, MA.')\nparser.add_argument('--if_dst', '-if_dst', type=_str2bool, default=False, help='if evaluate dst for all matrices.')\nparser.add_argument('--start_idx', '-start_idx', type=int, default=0, help='start from the idx-th image.')\nparser.add_argument('--max_num', '-max_num', type=int, default=-1, help='total num of images.')\nargs = parser.parse_args()\n\ncurrent_dir = Path(__file__).resolve().parent\nopt_fp = current_dir / args.opt\nwith open(opt_fp, 'r') as fp:\n opts_dict = yaml.load(fp, Loader=yaml.FullLoader)\nopts_dict = opts_dict[args.case]\n\ntag = args.case\nsrc_dir = opts_dict['src_dir']\ndst_dir = opts_dict['dst_dir']\ntar_dir = opts_dict['tar_dir']\nmode = args.mode\nif_src = args.if_src\nif_dst = args.if_dst\nstart_idx = args.start_idx\nmax_num = args.max_num\n\nif src_dir.split('/')[0] == '~':\n src_dir = Path.home() / ('/'.join(src_dir.split('/')[1:]))\nif dst_dir.split('/')[0] == '~':\n dst_dir = Path.home() / ('/'.join(dst_dir.split('/')[1:]))\nif tar_dir.split('/')[0] == '~':\n tar_dir = Path.home() / ('/'.join(tar_dir.split('/')[1:]))\n\nprint('MAIN: INFO')\nfor _item in [tag, src_dir, dst_dir, tar_dir, mode, if_src, if_dst, start_idx, max_num]:\n print(_item)\n\nif max_num == -1:\n tar_path_lst = sorted(Path(tar_dir).resolve().glob('*.png'))[start_idx:]\nelse:\n tar_path_lst = sorted(Path(tar_dir).resolve().glob('*.png'))[start_idx: start_idx + max_num]\n\nassert len(tar_path_lst) != 0, 'NOT FOUND!'\n\nif opts_dict['if_lpips']:\n print('\\nMAIN: evaluating LPIPS...')\n iqa_lpips.main(tag, mode, tar_path_lst, src_dir, dst_dir, if_dst)\n\nif opts_dict['if_fid']:\n print('\\nMAIN: evaluating FID...')\n iqa_fid.main(tag, mode, tar_path_lst, src_dir, dst_dir, if_dst)\n\ntar_path_lst = [str(path_) for path_ in tar_path_lst]\n\nif opts_dict['if_psnr']:\n print('\\nMAIN: evaluating PSNR, SSIM and NIQE-M...')\n eng = matlab.engine.start_matlab()\n eng.iqa_psnr_ssim_niqe(tag, mode, tar_path_lst, str(src_dir), str(dst_dir), if_src, if_dst, nargout=0)\n eng.quit()\n\nif opts_dict['if_pi']:\n print('\\nMAIN: evaluating PI, NIQE and MA...')\n eng = matlab.engine.start_matlab()\n eng.iqa_pi_niqe_ma(tag, mode, tar_path_lst, str(src_dir), str(dst_dir), if_src, if_dst, nargout=0)\n eng.quit()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"634323475","text":"import json\nimport re\n\ndef clean_record(text, \n page_breaks=True,\n midline_returns=True,\n time_marks=True):\n \"\"\"This function wraps several others and allows them to be turned \n on or off using keyword arguments\"\"\"\n clean_text = text\n if(page_breaks):\n clean_text = remove_page_breaks(clean_text)\n if(midline_returns):\n clean_text = remove_midline_returns(clean_text)\n if(time_marks):\n clean_text = remove_time_marks(clean_text)\n \n return clean_text\n\ndef remove_page_breaks(raw_text):\n page_break = '\\n\\[+Pages?.*\\]+\\n{1,2}'\n clean_text = re.sub(page_break, '', raw_text)\n return clean_text\n\n# Remove line breaks that cut paragraphs in half\ndef remove_midline_returns(raw_text):\n mid_line_returns = '\\n(\\w)'\n clean_text = re.sub(mid_line_returns, '\\\\1', raw_text)\n return clean_text\n\ndef remove_time_marks(raw_text):\n time_marks = '\\W*\\{time\\}.*\\n'\n clean_text = re.sub(time_marks, '', raw_text)\n return clean_text\n \n\n# Split document around blank lines\n# to get multi-paragraph sections\ndef get_sections(text):\n sections = []\n paragraph = ''\n for line in text.splitlines():\n new_section = (line == '' or get_speaker(line))\n if new_section and paragraph != '':\n sections.append(paragraph)\n paragraph = ''\n paragraph += line\n\n sections.append(paragraph)\n\n # Throw away empty sections\n sections = [val for val in sections if val != '']\n return sections\n\n# After getting a speaker match from\n# get_speaker() (below), double-check\n# a few things to remove false positives\ndef match_is_good(match):\n ignore_phrases = ['chair recognizes', 'during the vote', 'committee resumed']\n word_limit = 7\n match_is_good = True\n\n if match:\n for text in match.groups():\n if text and (len(text.split()) > word_limit):\n match_is_good = False\n for phrase in ignore_phrases:\n if text and re.match(phrase, text.lower()):\n match_is_good = False\n else:\n match_is_good = False\n\n\n return match_is_good\n\n# Look at the beginning of a section to see if\n# it corresponds to someone speaking. If it does,\n# figure out who\ndef get_speaker(section):\n titles = '(?:The|Mr\\.|Mrs\\.|Ms\\.)'\n caps_name_or_title = '([A-Z][A-Za-z ]+)'\n paren_name = '(?:\\(([A-Za-z. ]+)\\))?'\n speaker_string = ' {} {}{}\\.'.format(titles, caps_name_or_title, paren_name)\n\n speaker_match = re.match(speaker_string, section)\n if match_is_good(speaker_match):\n if speaker_match.group(2):\n speaker = speaker_match.group(2).lower()\n else: \n speaker = speaker_match.group(1).lower()\n else:\n speaker = None\n\n # Check for 'Mr. Foo of Bar' and get 'Foo'\n if speaker:\n of_match = re.search('([a-z]+) of ', speaker)\n if of_match:\n speaker = of_match.group(1)\n\n # Check for 'Mr. Foo' and get 'Foo'\n if speaker:\n title_match = re.match('(?:mr\\.|mrs\\.|ms\\.) (\\w+)', speaker)\n if title_match:\n speaker = title_match.group(1)\n\n return speaker\n","sub_path":"concord/recordparser.py","file_name":"recordparser.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"242418735","text":"import requests\nimport urllib.request\nfrom bs4 import BeautifulSoup\nimport os\nimport time\nimport json\nimport datetime\nfrom googletrans import Translator\n\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ntranslator=Translator();\n\ncn_key=input(\"key: \")\n\nkey=translator.translate(cn_key).text\n\nurl=\"https://www.flaticon.com/search?word=\"+key\nphoto_limit=5\n\n\nheaders = {\"user-agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.92 Safari/537.36\",\"upgrade-insecure-requests\":\"1\"}\nresponse = requests.get(url,headers = headers)\nsoup = BeautifulSoup(response.content,\"html.parser\")\n\n\nitems = soup.find_all(\"img\",attrs={\"class\":\"lzy\"})\n\ntheTime = datetime.datetime.now()\nstr_time=str(theTime).replace(\".\",\"_\")\nstr_time=str_time.replace(\":\",\"_\")\n\nprint(str_time)\n\nfolder_path=\"./image/\"+str_time+\"/\"+key+\"/\"\n\nif(os.path.exists(folder_path)== False):\n os.makedirs(folder_path)\n\nimg_arr=[]\n\nfor index , item in enumerate(items):\n if(item and index < photo_limit):\n html = requests.get(item.get(\"data-src\"))\n img_name = folder_path + str(index+1)+\".png\"\n\n img_arr.append(img_name);\n\n print(\"finish\",index+1)\n else:\n break\n\n with open(img_name,\"wb\") as file:\n file.write(html.content)\n file.flush()\n file.close()\n\n \n\n time.sleep(1)\nprint(\"done\") \n\n#顯示圖片\n\ntoImage = Image.new('RGB', (128,64),(255,255,255))\nimg1 = Image.open(img_arr[0])\nimg12 = Image.open(img_arr[1])\nimg1 = img1.resize((64, 64),Image.ANTIALIAS)\nimg12 = img12.resize((64, 64),Image.ANTIALIAS)\ntoImage.paste(img1, (0, 0))\ntoImage.paste(img12, (64, 0, 64 + img1.size[0], 0 + img1.size[1]))\n\n#函式描述:toImage:背景圖片,paste()函式四個變數分別為:起始橫軸座標,起始縱軸座標,橫軸結束座標,縱軸結束座標;\ntoImage.save(\"merged.png\")\nplt.imshow(toImage)\nplt.title(\"level\")\nplt.show()","sub_path":"P/getimage.py","file_name":"getimage.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"393165271","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n# 사진 데이터 읽어들이기\nphotos = np.load('myproject/photos.npz')\nx = photos['x']\ny = photos['y']\n\n# 시작 인덱스 --( 1)\nidx = 0\n\n#pyplot로 출력하기\nplt.figure(figsize=(10, 10))\n\nfor i in range(9):\n plt.subplot(3, 3, i+1)\n plt.title(y[i+idx])\n plt.imshow(x[i+idx])\nplt.show()","sub_path":"myproject/sc3.py","file_name":"sc3.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"549610978","text":"import os\n\ndef val_char(x,alpha):\n for i in range(0,len(alpha)):\n if(x==alpha[i]):\n return i\n\ndef chiffrer_vernam(message,cle,alpha,mode):\n res=\"\"\n j = 0\n for i in range(0,len(message)):\n if(mode and ord(message[i])!=32):\n res=res+alpha[(val_char(message[i],alpha)+val_char(cle[j],alpha))%len(alpha)]\n elif(mode == 0):\n res=res+alpha[(val_char(message[i],alpha)-val_char(cle[j],alpha))%len(alpha)]\n j=(j+1)%(len(cle));\n return res;\n\n#==========================================#\n\nif __name__ == \"__main__\":\n os.system(\"clear\")\n \n #alpha = \"abcdefghijklmnopqrstuvwxyz\"\n alpha = \"01\" #pour le binaire\n #alpha = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" #pour les majs\n print (\"alphabet: \"+alpha)\n \n msg = raw_input(\"message: \")\n cle = raw_input(\"cle: \")\n \n msg = chiffrer_vernam(msg,cle,alpha,1)\n print(\"\\nchiffrer: \"+ msg+\"\\n\")\n msg2 = chiffrer_vernam(msg,cle,alpha,0)\n print(\"\\ndechiffrer: \"+ msg2+\"\\n\\n\") \n \n","sub_path":"Python/vernam.py","file_name":"vernam.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"166046914","text":"import argparse, sys, os\n\n\n\ndef main(args):\n\tfiles = {\n\t\t'revert': args.revert,\n\t\t'reverted': args.reverted\n\t}\n\t\n\tfor line in args.input:\n\t\tty = eval(line.strip().split(\"\\t\")[0])\n\t\tfiles[ty].write(line.split(\"\\t\", 1)[1])\n\t\t\n\t\tif ty == \"revert\":\n\t\t\tsys.stderr.write(\"<\")\n\t\telif ty == \"reverted\":\n\t\t\tsys.stderr.write(\"|\")\n\t\t\n\tsys.stderr.write(\"\\n\")\n\t\t\n\t\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser(\n\t\tdescription='Cleans revert data from a dump map process.'\n\t)\n\tparser.add_argument(\n\t\t'-i', '--input',\n\t\tmetavar=\"\",\n\t\ttype=lambda fn:open(fn, \"r\"), \n\t\thelp='the path of the file to filter (defaults to stdin)',\n\t\tdefault=sys.stdin\n\t)\n\tparser.add_argument(\n\t\t'--reverted',\n\t\tmetavar=\"\",\n\t\ttype=lambda fn:open(os.path.expanduser(fn), \"w\"), \n\t\thelp='the path to a file to produce representing the reverted revisions'\n\t)\n\tparser.add_argument(\n\t\t'--revert',\n\t\tmetavar=\"\",\n\t\ttype=lambda fn:open(os.path.expanduser(fn), \"w\"), \n\t\thelp='the path to a file to produce representing the reverting revisions'\n\t)\n\targs = parser.parse_args()\n\tmain(args)\n\n","sub_path":"tools/wsor/overworked/convert_reverts.py","file_name":"convert_reverts.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"388332540","text":"\ndef Largest(a,b,c):\n if a>max(b,c):\n return -1\n elif a <= min(b,c):\n return min(b,c)\n else:\n return max(b,c)\n\n\ndef uniquerelation(list1,list2,len_input):\n\n list3=[]\n for i in range(len_input):\n if len(list3)==0:\n if list1[i] > list2[i]:\n list3.append(list2[i])\n else:\n list3.append(list1[i])\n else:\n x = Largest(list3[i-1],list1[i],list2[i])\n if x is not -1:\n list3.append(x)\n else:\n\n return 'NO'\n\n return 'YES'\n\nTest_cases = int(input())\nfor noTC in range(Test_cases):\n Len = int(input())\n\n\n seq1=input().split()\n seq1=[int(i) for i in seq1]\n\n seq2=input().split()\n seq2=[int(i) for i in seq2]\n\n x=uniquerelation(seq1,seq2,Len)\n print(x)\n","sub_path":"ListCompare.py","file_name":"ListCompare.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"357012702","text":"class Convert:\n\n\tru_symbols = list('Ёёйцукенгшщзхъфывапролджэячсмитьбю.ЙЦУКЕНГШЩЗХ/ЪФЫВАПРОЛДЖЭЯЧСМИТЬБЮ,\"№;:?')\n\ten_symbols = list('~`qwertyuiop[]asdfghjkl;\\'zxcvbnm,./QWERTYUIOP{|}ASDFGHJKL:\"ZXCVBNM<>@#$%^&')\n\n\t@staticmethod\n\tdef detect_layout(symbols):\n\n\t\tstring_length = len(symbols)\n\t\tru_percentage = en_percentage = 0\n\n\t\tfor symbol in symbols:\n\t\t\t\n\t\t\tif symbol in Convert.ru_symbols:\n\t\t\t\tru_percentage += 1 / string_length * 100\n\t\t\telif symbol in Convert.en_symbols:\n\t\t\t\ten_percentage += 1 / string_length * 100\n\n\t\treturn None if ru_percentage == en_percentage else ('ru' if ru_percentage > en_percentage else 'en')\n\n\t@staticmethod\n\tdef convert_symbols(symbols):\n\t\t\n\t\tlayout = Convert.detect_layout(symbols)\n\n\t\tif layout is None:\n\t\t\treturn\n\n\t\tclipboard = ''\n\n\t\tfor symbol in symbols:\n\t\t\t\n\t\t\ttry:\n\t\t\t\tif layout == 'ru':\n\t\t\t\t\tclipboard += Convert.en_symbols[Convert.ru_symbols.index(symbol)]\n\t\t\t\telif layout == 'en':\n\t\t\t\t\tclipboard += Convert.ru_symbols[Convert.en_symbols.index(symbol)]\n\t\t\texcept ValueError:\n\t\t\t\tclipboard += symbol\n\n\t\treturn clipboard\n","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"24831276","text":"import random as random\nimport sys\n\nfrom game import *\n\n#Read the User Guide for more information about AI schemes!!!\nclass AI: #AI subclasses will inherit functions from \n\n def __init__(self, _name):\n self.name = _name\n\n def process_decision_params(self, _stip, _optional): #preprocessing used \n stip = _stip\n #if there is no stipulation, return a function that just returns the list of cards passed to it\n if not _stip:\n stip = lambda x: x\n optional_card = []\n #when choosing between cards, optional_card can be appended to the list of cards to choose from, \n #which will automatically handle dealing with op\n if _optional:\n optional_card = [ImaginaryCard()]\n return stip, optional_card\n\n def action_fn(self, _game, _player, _stip, _optional):\n stip, optional_card = self.process_decision_params(_stip, _optional)\n #get action cards to choose from\n choose_from = stip(_player.my_deck.get_actions_in_hand()) + optional_card\n #pick a random one to play\n if choose_from:\n return random.choice(choose_from)\n else:\n return ImaginaryCard()\n \n def discard_fn(self, _game, _player, _stip, _optional):\n stip, optional_card = self.process_decision_params(_stip, _optional)\n #get cards in hand to choose from\n choose_from = stip(_player.my_deck.hand) + optional_card\n #if there is at least one card available to pick, pick a random card from those available\n if choose_from:\n return random.choice(choose_from)\n else:\n return ImaginaryCard()\n \n def buy_fn(self, _game, _player, _stip, _optional):\n stip, optional_card = self.process_decision_params(_stip, _optional)\n #get valid cards in shop that the player is able to buy\n cards_available = _game.shop.get_cards_under_amount(_player.coins)\n choose_from = stip(cards_available) + optional_card\n #choose a random card from the valid one in the shop\n if choose_from:\n return random.choice(choose_from)\n else:\n return ImaginaryCard()\n\n def trash_fn(self, _game, _player, _stip, _optional):\n stip, optional_card = self.process_decision_params(_stip, _optional)\n #get a valid card from the player's hand\n choose_from = stip(_player.my_deck.hand) + optional_card\n if choose_from:\n return random.choice(choose_from)\n else:\n return ImaginaryCard()\n\n def gain_fn(self, _game, _player, _stip, _optional):\n stip, optional_card = self.process_decision_params(_stip, _optional)\n #get all cards in the shop, the stipulation will provide the limit if there is one\n cards_available = _game.shop.get_cards_under_amount(999999999)\n #get all valid cards to gain\n choose_from = stip(cards_available) + optional_card\n if choose_from:\n return random.choice(choose_from)\n else:\n return ImaginaryCard()\n \n def put_on_top_fn(self, _game, _player, _stip, _optional):\n stip, optional_card = self.process_decision_params(_stip, _optional)\n #get a valid card from the player's hand\n choose_from = stip(_player.my_deck.hand) + optional_card\n if choose_from:\n return random.choice(choose_from)\n else:\n return ImaginaryCard()\n","sub_path":"axiom/ai_plugins/dominion_ai.py","file_name":"dominion_ai.py","file_ext":"py","file_size_in_byte":3446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"534089502","text":"# -*- coding: utf-8 -*-\n\ndef measure_vehicle_pos(leftx, rightx,shape):\n xm_per_pix = 3.7 / 700 # meters per pixel in x dimension\n\n middle_line = leftx[-1] + (rightx[-1] - leftx[-1]) / 2\n middel_cat = shape / 2\n\n vehicle_pos = (middel_cat - middle_line) * xm_per_pix\n\n return vehicle_pos\n\n\n\n","sub_path":"lane/measure_vehicle_pos.py","file_name":"measure_vehicle_pos.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"540498753","text":"import logging\n\nfrom bulk_sync import bulk_sync\n\nfrom list_org_parser.services.list_org_parser import ListOrgParser\nfrom list_org_parser.models import OrganizationUrl, Organization\nfrom list_org_parser.models import Address, Phone, Fax, Email, Site\nfrom classifiers.models import Okved2, Okved2007, Okpd2\n\nlogger = logging.getLogger(__name__)\n\ndef save_urls(start_page_path: str, start_page_num: int = 1, start_org_num: int = 1, end_page_num: int = None, end_org_num=None):\n parser = ListOrgParser()\n try:\n orgs_urls = parser.parse_orgs_list_pages(\n orgs_list_page_path=start_page_path,\n start_page_num=start_page_num,\n start_org_num=start_org_num,\n end_page_num=end_page_num,\n end_org_num=end_org_num\n )\n orgs_urls = [OrganizationUrl(**org_url) for org_url in orgs_urls]\n ret = bulk_sync(new_models=orgs_urls, filters=[], key_fields=('url',), skip_deletes=True)\n except Exception as e:\n logger.exception(str(e))\n else:\n logger.info(\"Results of bulk_sync: {created} created, {updated} updated, {deleted} deleted.\".format(**ret['stats']))\n\n\ndef save_orgs():\n parser = ListOrgParser()\n loaded_ids = Organization.objects.values_list('list_org_link', flat=True)\n urls = OrganizationUrl.objects.filter(is_active=True).exclude(id__in=loaded_ids)\n try:\n orgs = parser.parse_orgs_pages(urls)\n for org in orgs:\n save(org)\n except Exception as e:\n logger.exception(str(e))\n else:\n return len(orgs)\n\n\ndef save(org: dict):\n org = org.copy()\n address = org.pop('address')\n gps_coordinates = org.pop('gps_coordinates')\n ur_address = org.pop('ur_address')\n\n phones = org.pop('phones')\n faxes = org.pop('faxes')\n emails = org.pop('emails')\n sites = org.pop('sites')\n\n main_okved2007 = org['main_okved2007']\n if(main_okved2007):\n try:\n org['main_okved2007'] = Okved2007.objects.get(code=main_okved2007)\n except Okved2007.DoesNotExist:\n # TODO надо что-то с этим делать\n if not org['main_okved2']:\n org['main_okved2'] = org['main_okved2007']\n org['main_okved2007'] = None\n logger.error(f'Can\\'t find code {main_okved2007} in OKVED 2007 when try to save organization {org}')\n\n sup_okveds2007 = org.pop('sup_okveds2007')\n if(sup_okveds2007):\n okveds = []\n for okved in sup_okveds2007:\n try:\n okveds.append(Okved2007.objects.get(code=okved))\n except Okved2007.DoesNotExist:\n if not org['sup_okveds2']:\n org['sup_okveds2'] = []\n org['sup_okveds2'].append(okved)\n logger.error(f'Can\\'t find code {okved} in OKVED 2007 when try to save organization {org}')\n sup_okveds2007 = okveds\n\n\n main_okved2 = org['main_okved2']\n if(main_okved2):\n try:\n org['main_okved2'] = Okved2.objects.get(code=main_okved2)\n except Okved2.DoesNotExist:\n org['main_okved2'] = None\n logger.error(f'Can\\'t find code {main_okved2} in OKVED 2 when try to save organization {org}')\n\n sup_okveds2 = org.pop('sup_okveds2')\n if(sup_okveds2):\n okveds = []\n for okved in sup_okveds2:\n try:\n okveds.append(Okved2.objects.get(code=okved))\n except Okved2.DoesNotExist:\n logger.error(f'Can\\'t find code {okved} in OKVED 2 when try to save organization {org}')\n sup_okveds2 = okveds\n\n\n org = Organization(**org)\n org.save()\n if sup_okveds2007:\n org.sup_okveds2007.add(*sup_okveds2007)\n if sup_okveds2:\n org.sup_okveds2.add(*sup_okveds2)\n\n # Сохраняем адресс и юридический адресс с gps координатами (если заданы) \n longitude = gps_coordinates['longitude'] if gps_coordinates['longitude'] else None\n latitude = gps_coordinates['latitude'] if gps_coordinates['latitude'] else None\n if address:\n address = Address(organization=org, address=address, gps_longitude=longitude, gps_latitude=latitude)\n address.save()\n if ur_address:\n ur_address = Address(organization=org, address=ur_address, is_legal=True)\n ur_address.save()\n elif(ur_address):\n ur_address = Address(organization=org, address=ur_address, is_legal=True, gps_longitude=longitude, gps_latitude=latitude)\n ur_address.save()\n\n # Сохраняем телефонные номера\n phones = [Phone(organization=org, phone=phone) for phone in phones]\n for phone in phones:\n phone.save()\n\n # Сохраняем факсы\n faxes = [Fax(organization=org, fax=fax) for fax in faxes]\n for fax in faxes:\n fax.save()\n\n # Сохраняем email'ы\n emails = [Email(organization=org, email=email) for email in emails]\n for email in emails:\n email.save()\n\n # Сохраняем сыллки на сайты организации\n sites = [Site(organization=org, site=site) for site in sites]\n for site in sites:\n site.save()\n\n return org \n","sub_path":"list_org_parser/services/save_orgs_command.py","file_name":"save_orgs_command.py","file_ext":"py","file_size_in_byte":5208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"9457599","text":"import logging\nimport io\nimport codecs\nimport difflib\nfrom tempfile import NamedTemporaryFile\n\nimport editor\n\nfrom . import exceptions\nfrom . import utils\n\nlogger = logging.getLogger(__name__)\n__escape_decoder = codecs.getdecoder('unicode_escape')\n\n\ndef parse_env_var(value):\n \"\"\"\n Split a env var text like\n\n ENV_VAR_NAME=env_var_value\n\n into a tuple ('ENV_VAR_NAME', 'env_var_value')\n \"\"\"\n k, _, v = value.partition('=')\n\n # Remove any leading and trailing spaces in key, value\n k, v = k.strip(), v.strip().encode('unicode-escape').decode('ascii')\n\n if v and v[0] == v[-1] in ['\"', \"'\"]:\n v = __escape_decoder(v[1:-1])[0]\n return k, v\n\n\nclass File:\n def __init__(self, name, storage=None):\n self.name = name\n self._storage = storage\n self._stream = None\n\n @property\n def storage(self):\n if not self._storage:\n from .storages import LocalStorage\n self._storage = LocalStorage()\n return self._storage\n\n def read_into_stream(self, stream):\n self.storage.read_into_stream(self.name, stream=stream)\n\n def read(self):\n return self.storage.read_into_stream(self.name).read()\n\n def exists(self):\n if list(self.storage.list(self.name)):\n return True\n return False\n\n def md5(self, raise_if_not_exists=True):\n try:\n md5hash, _ = next(self.storage.list(self.name))\n except StopIteration:\n if raise_if_not_exists:\n raise exceptions.FileDoesNotExist(self.name)\n else:\n md5hash = None\n return md5hash\n\n def write(self, data):\n if isinstance(data, str):\n data = data.encode('utf-8')\n if isinstance(data, bytes):\n self.storage.write(io.BytesIO(data), self.name)\n else:\n self.storage.write(data, self.name)\n\n def diff(self, file_stream, fromfile='remote', tofile='local', **kwargs):\n file_stream.seek(0)\n self_str = io.TextIOWrapper(self.storage.read_into_stream(self.name))\n result = difflib.unified_diff(\n self_str.readlines(),\n file_stream.readlines(),\n fromfile=fromfile,\n tofile=tofile,\n **kwargs\n )\n # avoid io.TextIOWrapper closing the stream when being garbage collected\n # https://bugs.python.org/issue21363\n self_str.detach()\n return result\n\n def edit(self, create=False):\n with NamedTemporaryFile(mode='rb+', buffering=0) as f:\n data_to_edit = b''\n try:\n data_to_edit = self.read()\n except exceptions.FileDoesNotExist:\n if not create:\n raise\n\n f.write(data_to_edit)\n\n original_md5 = self.md5(raise_if_not_exists=False)\n edited_data = editor.edit(filename=f.name)\n new_md5 = utils.md5s3(f)\n\n if not edited_data and not original_md5:\n logger.warning('Remote file does not exist and no input was provided. '\n 'No attempt to write will be done.')\n elif original_md5 != new_md5:\n # this does not solve concurrency problems, but shrinks the\n # race condition window to a very small period of time\n if original_md5 == self.md5(raise_if_not_exists=False):\n self.write(edited_data)\n else:\n f_str = io.TextIOWrapper(f)\n diff = self.diff(f_str)\n e = exceptions.LocalCopyOutdated(\n 'Remote file was edited while editing local copy. Diff:\\n\\n{}'.format(''.join(diff))\n )\n # avoid io.TextIOWrapper closing the stream when being garbage collected\n # https://bugs.python.org/issue21363\n f_str.detach()\n raise e\n else:\n logger.warning('File not changed. Nothing to write.')\n\n\nclass EnvFile(File):\n @classmethod\n def from_file(cls, obj):\n obj.__class__ = cls\n return obj\n\n def as_dict(self):\n env_dict = {}\n try:\n lines = str(self.read(), 'utf-8').splitlines()\n lines = [line for line in lines if line and not line.startswith('#') and '=' in line]\n env_dict = dict(parse_env_var(line) for line in lines)\n except FileNotFoundError:\n pass\n return env_dict\n\n def set(self, value, create=False):\n new_key, new_value = parse_env_var(value)\n new_lines = []\n value_set = False\n try:\n for line in str(self.read(), 'utf-8').splitlines():\n key, value = parse_env_var(line)\n if key == new_key:\n new_lines.append('{}={}'.format(new_key, new_value))\n value_set = True\n else:\n new_lines.append(line)\n except FileNotFoundError:\n if not create:\n raise\n if not value_set:\n new_lines.append('{}={}'.format(new_key, new_value))\n self.write('\\n'.join(new_lines))\n\n def unset(self, unset_key):\n new_lines = []\n unset_done = False\n try:\n for line in str(self.read(), 'utf-8').splitlines():\n key, value = parse_env_var(line)\n if key == unset_key:\n unset_done = True\n continue\n else:\n new_lines.append(line)\n except FileNotFoundError:\n logger.warning('File does not exist')\n\n if unset_done:\n self.write('\\n'.join(new_lines))\n else:\n logger.info('Key %s not found in environemnt file, doing nothing...', unset_key)\n\n def from_dict(self, env_vars):\n file_data = ''\n for var_name, var_value in env_vars.items():\n file_data += '{}={}\\n'.format(var_name, var_value)\n self.write(file_data)\n\n def update(self, another_file):\n env_vars = self.as_dict()\n env_vars.update(another_file.as_dict())\n self.from_dict(env_vars)\n","sub_path":"s3conf/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":6204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"237371139","text":"from typing import Optional, List\n\nfrom fastapi import FastAPI, Depends\nfrom fastapi.responses import JSONResponse\nfrom pydantic import BaseModel\nfrom typing import Optional\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import declarative_base, sessionmaker, Session\nfrom sqlalchemy import Boolean, Column, Float, String, Integer\nfrom fastapi.middleware.cors import CORSMiddleware\nimport uvicorn\nimport requests\n\nimport config\n\napp = FastAPI()\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n#SqlAlchemy Setup\nSQLALCHEMY_DATABASE_URL = 'sqlite+pysqlite:///./db.sqlite3'\nengine = create_engine(SQLALCHEMY_DATABASE_URL, echo=True, future=True, connect_args={'check_same_thread': False},)\nSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)\nBase = declarative_base()\n\n# Dependency\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n\nclass VideoSource(BaseModel):\n source: str\n pix_fmt: Optional[str] = None\n res: Optional[str] = None\n\nclass Service(BaseModel):\n name: str\n ip: str = \"\"\n port: int = 8000\n alive: Optional[bool] = False\n\n class Config:\n orm_mode = True\n\nclass ServiceUpdate(BaseModel):\n name: str\n ip: Optional[str]\n port: Optional[int]\n alive: Optional[bool]\n\nclass DBService(Base):\n '''\n Database model to store serivices:\n id: Identifier of the record\n name: Name of the service/container\n ip: IP assigned to the container\n port: Port where the API is listening\n alive: If the container is alive\n '''\n __tablename__ = 'services'\n\n id = Column(Integer, primary_key=True, index=True)\n name = Column(String(50))\n ip = Column(String(50))\n port = Column(Integer)\n alive = Column(Boolean)\n\nBase.metadata.create_all(bind=engine)\n\n\ndef get_service(db: Session, name: str):\n return db.query(DBService).where(DBService.name == name).first()\n\n\ndef get_services(db: Session):\n return db.query(DBService).all()\n\n\ndef register_service(db: Session, service):\n db_service = DBService(**service.dict())\n service_found = db.query(DBService).where(DBService.name == db_service.name).first()\n if service_found:\n # Service already exists\n if db_service.ip:\n service_found.ip = db_service.ip\n if db_service.port:\n service_found.port = db_service.port\n if service_found.alive != db_service.alive:\n service_found.alive = db_service.alive\n db.commit()\n return db_service\n db.add(db_service)\n db.commit()\n return db_service\n\n\n@app.post('/services/', response_model=Service)\ndef register_service_view(service: Service, db: Session = Depends(get_db)):\n db_service = register_service(db, service)\n return db_service\n\n\n@app.get('/services/', response_model=List[Service])\ndef get_all_available_services_view(db: Session = Depends(get_db)):\n return get_services(db)\n\n\n@app.get(\"/services/{name}\")\ndef get_service_by_name(name: str, db: Session = Depends(get_db)):\n return get_service(db, name)\n\n\n@app.get('/')\nasync def index():\n return {'message': 'Main controller Service'}\n\n\n@app.get('/healthcheck')\nasync def healthcheck(db: Session = Depends(get_db)):\n status_code = 200\n status = {}\n status[config.EFFECTS_CONTAINER_NAME] = \"Not Ok\"\n status[config.VIDEOPROXY_CONTAINER_NAME] = \"Not Ok\"\n status[config.DEVICE_MNGR_CONTAINER_NAME] = \"Not Ok\"\n effect_svc = get_service(db, config.EFFECTS_CONTAINER_NAME)\n if effect_svc:\n url = f\"http://{effect_svc.ip}:{effect_svc.port}/ping\"\n response = requests.get(url)\n if response.status_code == 200:\n status[config.EFFECTS_CONTAINER_NAME] = \"Ok\"\n service = ServiceUpdate(name=config.EFFECTS_CONTAINER_NAME, alive=True)\n else:\n status_code = 400\n service = ServiceUpdate(name=config.EFFECTS_CONTAINER_NAME, alive=False)\n register_service(db, service)\n\n video_svc = get_service(db, config.VIDEOPROXY_CONTAINER_NAME)\n if video_svc:\n url = f\"http://{video_svc.ip}:{video_svc.port}/ping\"\n response = requests.get(url)\n if response.status_code == 200:\n status[config.VIDEOPROXY_CONTAINER_NAME] = \"Ok\"\n service = ServiceUpdate(name=config.VIDEOPROXY_CONTAINER_NAME, alive=True)\n else:\n status_code = 400\n service = ServiceUpdate(name=config.VIDEOPROXY_CONTAINER_NAME, alive=False)\n register_service(db, service)\n\n device_svc = get_service(db, config.DEVICE_MNGR_CONTAINER_NAME)\n if device_svc:\n url = f\"http://{device_svc.ip}:{device_svc.port}/heartbeat\"\n response = requests.get(url)\n if response.status_code == 200:\n status[config.DEVICE_MNGR_CONTAINER_NAME] = \"Ok\"\n service = ServiceUpdate(name=config.DEVICE_MNGR_CONTAINER_NAME, alive=True)\n else:\n status_code = 400\n service = ServiceUpdate(name=config.DEVICE_MNGR_CONTAINER_NAME, alive=False)\n register_service(db, service)\n\n return JSONResponse(\n status_code=status_code,\n content=status,\n )\n\n\n@app.get('/config/')\nasync def get_services_config(db: Session = Depends(get_db)):\n configs = {}\n effect_svc = get_service(db, config.EFFECTS_CONTAINER_NAME)\n if effect_svc:\n url = f\"http://{effect_svc.ip}:{effect_svc.port}/service\"\n response = requests.get(url)\n effect_config = {\n \"config\": response.json(),\n \"status_code\": 200\n }\n if response.status_code != 200:\n effect_config[\"status_code\"] = response.status_code\n url = f\"http://{effect_svc.ip}:{effect_svc.port}/effect\"\n response = requests.get(url)\n effect_config[\"config\"].update(response.json())\n if response.status_code != 200:\n effect_config[\"status_code\"] = response.status_code\n else:\n effect_config = {\n \"config\": \"Not found. Service not registered\",\n \"status_code\": 400\n }\n video_svc = get_service(db, config.VIDEOPROXY_CONTAINER_NAME)\n if video_svc:\n url = f\"http://{video_svc.ip}:{video_svc.port}/service\"\n response = requests.get(url)\n video_config = {\n \"config\": response.json(),\n \"status_code\": response.status_code\n }\n if response.status_code != 200:\n video_config[\"status_code\"] = response.status_code\n url = f\"http://{video_svc.ip}:{video_svc.port}/stream_type\"\n response = requests.get(url)\n video_config[\"config\"].update(response.json())\n if response.status_code != 200:\n video_config[\"status_code\"] = response.status_code\n url = f\"http://{video_svc.ip}:{video_svc.port}/video_source\"\n response = requests.get(url)\n video_config[\"config\"].update(response.json())\n if response.status_code != 200:\n video_config[\"status_code\"] = response.status_code\n url = f\"http://{video_svc.ip}:{video_svc.port}/virtual_device\"\n response = requests.get(url)\n video_config[\"config\"].update(response.json())\n if response.status_code != 200:\n video_config[\"status_code\"] = response.status_code\n else:\n video_config = {\n \"config\": \"Not found. Service not registered\",\n \"status_code\": 400\n }\n device_svc = get_service(db, config.DEVICE_MNGR_CONTAINER_NAME)\n if device_svc:\n url = f\"http://{device_svc.ip}:{device_svc.port}/cameras\"\n response = requests.get(url)\n device_config = {\n \"config\": {\"webcams\": response.json()},\n \"status_code\": response.status_code\n }\n else:\n device_config = {\n \"config\": \"Not found. Service not registered\",\n \"status_code\": 400\n }\n configs[config.EFFECTS_CONTAINER_NAME] = effect_config\n configs[config.VIDEOPROXY_CONTAINER_NAME] = video_config\n configs[config.DEVICE_MNGR_CONTAINER_NAME] = device_config\n return JSONResponse(\n status_code=200,\n content=configs,\n )\n\n\n@app.post('/video')\nasync def start_video(db: Session = Depends(get_db)):\n status_code = 200\n status = {}\n effect_svc = get_service(db, config.EFFECTS_CONTAINER_NAME)\n if effect_svc:\n url = f\"http://{effect_svc.ip}:{effect_svc.port}/service\"\n response = requests.post(url)\n status_code = response.status_code\n if status_code != 200:\n status[\"error\"] = response.json()\n else:\n status[\"server\"] = response.json()\n else:\n status_code = 400\n status[\"error\"] = \"Effect service not registered\"\n\n if status_code == 200:\n video_svc = get_service(db, config.VIDEOPROXY_CONTAINER_NAME)\n if video_svc:\n url = f\"http://{video_svc.ip}:{video_svc.port}/service\"\n response = requests.post(url)\n if status_code != 200:\n status[\"error\"] = response.json()\n else:\n status[\"client\"] = response.json()\n else:\n status_code = 400\n status[\"error\"] = \"Video proxy service not registered\"\n \n return JSONResponse(\n status_code=status_code,\n content=status,\n )\n\n\n@app.delete('/video')\nasync def stop_video(db: Session = Depends(get_db)):\n status_code = 200\n status = {}\n effect_svc = get_service(db, config.EFFECTS_CONTAINER_NAME)\n if effect_svc:\n url = f\"http://{effect_svc.ip}:{effect_svc.port}/service\"\n response = requests.delete(url)\n status_code = response.status_code\n if status_code != 200:\n status[\"error\"] = response.json()\n else:\n status[\"server\"] = response.json()\n else:\n status_code = 400\n status[\"error\"] = \"Effect service not registered\"\n\n if status_code == 200:\n video_svc = get_service(db, config.VIDEOPROXY_CONTAINER_NAME)\n if video_svc:\n url = f\"http://{video_svc.ip}:{video_svc.port}/service\"\n response = requests.delete(url)\n if status_code != 200:\n status[\"error\"] = response.json()\n else:\n status[\"client\"] = response.json()\n else:\n status_code = 400\n status[\"error\"] = \"Video proxy service not registered\"\n \n return JSONResponse(\n status_code=status_code,\n content=status,\n )\n\n@app.post('/iccam')\nasync def create_iccam(db: Session = Depends(get_db)):\n status_code=200\n status = {}\n effect_svc = get_service(db, config.DEVICE_MNGR_CONTAINER_NAME)\n if effect_svc:\n url = f\"http://{effect_svc.ip}:{effect_svc.port}/cameras\"\n response = requests.post(url)\n status_code = response.status_code\n if status_code != 200:\n status[\"error\"] = response.json()\n else:\n status[\"iccam_id\"] = response.json()[\"device_id\"]\n else:\n status_code = 400\n status[\"error\"] = \"Device Manager service not registered\"\n \n if status_code == 200:\n video_svc = get_service(db, config.VIDEOPROXY_CONTAINER_NAME)\n if video_svc:\n url = f\"http://{video_svc.ip}:{video_svc.port}/virtual_device/{status['iccam_id']}\"\n response = requests.post(url)\n if status_code != 200:\n status[\"error\"] = response.json()\n else:\n status_code = 400\n status[\"error\"] = \"Video proxy service not registered\"\n \n return JSONResponse(\n status_code=status_code,\n content=status,\n )\n\n\n@app.delete('/iccam')\nasync def delete_iccam(db: Session = Depends(get_db)):\n status_code=200\n status = {}\n effect_svc = get_service(db, config.DEVICE_MNGR_CONTAINER_NAME)\n if effect_svc:\n url = f\"http://{effect_svc.ip}:{effect_svc.port}/cameras\"\n response = requests.delete(url)\n status_code = response.status_code\n if status_code != 200:\n status[\"error\"] = response.json()\n else:\n status_code = 400\n status[\"error\"] = \"Device Manager service not registered\"\n \n return JSONResponse(\n status_code=status_code,\n content=status,\n )\n\n\n@app.post('/effect/{effect_name}')\nasync def set_effect(effect_name: str, db: Session = Depends(get_db)):\n effect_svc = get_service(db, config.EFFECTS_CONTAINER_NAME)\n if effect_svc:\n url = f\"http://{effect_svc.ip}:{effect_svc.port}/effect/{effect_name}\"\n response = requests.post(url)\n return JSONResponse(\n status_code=response.status_code,\n content=response.json()\n )\n return JSONResponse(\n status_code=400,\n content=\"Effect service not registered\",\n )\n\n\n@app.delete('/effect')\nasync def remove_effect(db: Session = Depends(get_db)):\n effect_svc = get_service(db, config.EFFECTS_CONTAINER_NAME)\n if effect_svc:\n url = f\"http://{effect_svc.ip}:{effect_svc.port}/effect\"\n response = requests.delete(url)\n return JSONResponse(\n status_code=response.status_code,\n content=response.json()\n )\n return JSONResponse(\n status_code=400,\n content=\"Effect service not registered\",\n )\n\n\n@app.post('/video_source')\nasync def set_video_source(video_source: VideoSource, db: Session = Depends(get_db)):\n video_svc = get_service(db, config.VIDEOPROXY_CONTAINER_NAME)\n if video_svc:\n url = f\"http://{video_svc.ip}:{video_svc.port}/video_source\"\n response = requests.post(url, json=video_source.dict())\n return JSONResponse(\n status_code=response.status_code,\n content=response.json()\n )\n return JSONResponse(\n status_code=400,\n content=\"Video proxy service not registered\",\n )\n\nuvicorn.run(app, host=\"0.0.0.0\", port=config.API_PORT)\n","sub_path":"client/intelligent_collab/services/main_controller/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":13953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"253705226","text":"import json\nimport base64\nimport os\nimport requests\nfrom datetime import timedelta\nfrom tornado import httpclient, gen, ioloop, queues, escape\nfrom urllib.parse import quote_plus\nimport uuid\n\n\nimport app.database\nimport app.queue\nfrom app.config import get_path\nfrom model import Site, Result, Group\nfrom model.configuration import get_config\nimport worker\n\nUSER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) '\\\n 'Gecko/20100101 Firefox/40.1'\n\nhttpclient.AsyncHTTPClient.configure(\"tornado.curl_httpclient.CurlAsyncHTTPClient\")\nCONNECT_TIMEOUT = 10\n\nimport logging\nlogging.basicConfig(filename='/var/log/hgprofiler.log', level=logging.ERROR)\n\n\nclass ScrapeException(Exception):\n ''' Represents a user-facing exception. '''\n\n def __init__(self, message):\n self.message = message\n\n\ndef validate_site_response(site, response):\n \"\"\"\n Parse response and test against site criteria to determine whether username exists. Used with\n python requests response object.\n \"\"\"\n if response.status_code == site.status_code:\n if(site.search_text in response.text or\n site.search_text in response.headers):\n return True\n return False\n\n\ndef response_contains_username(site, response):\n \"\"\"\n Parse response and test against site criteria to determine whether username exists. Used with\n tornado httpclient response object.\n \"\"\"\n if response.code == site.status_code:\n data = escape.json_decode(response.body)\n html = data['html'] if isinstance(data['html'], str) else data['html'].decode()\n if(site.search_text in html or\n site.search_text in response.headers):\n return True\n return False\n\n\ndef scrape_site(site, username):\n \"\"\"\n Download page at `site.url' and parse for username (synchronous).\n \"\"\"\n url = site.url.replace('%s', username)\n\n headers = {\n 'User-Agent': USER_AGENT\n }\n\n result = {\n 'site': site,\n 'found': True,\n 'error': None,\n 'url': url\n }\n\n try:\n response = requests.get(\n url,\n headers=headers,\n verify=False,\n timeout=12\n )\n result['found'] = validate_site_response(site, response)\n except requests.exceptions.ConnectionError:\n result['found'] = False\n result['error'] = 'Domain does not exist'\n except requests.exceptions.Timeout:\n result['found'] = False\n result['error'] = 'Request timed out (limit 12 seconds)'\n except requests.exceptions.InvalidURL:\n result['found'] = False\n result['error'] = 'URL invalid'\n\n return result\n\n\ndef scrape_username(username, group_id=None):\n '''\n Scrape all sites for username (synchronous).\n '''\n worker.start_job()\n job = worker.get_job()\n redis = worker.get_redis()\n db_session = worker.get_session()\n\n if group_id is not None:\n group = db_session.query(Group).get(group_id)\n sites = group.sites\n else:\n sites = db_session.query(Site).all()\n\n total = len(sites)\n number = 0\n\n for site in sites:\n scrape_result = scrape_site(site, username)\n\n result = Result(\n job_id=job.id,\n site_name=scrape_result['site'].name,\n site_url=scrape_result['url'],\n found=scrape_result['found'],\n total=total,\n number=number+1\n )\n\n if scrape_result['error'] is not None:\n result.error = scrape_result['error']\n\n db_session.add(result)\n db_session.flush()\n redis.publish('result', json.dumps(result.as_dict()))\n number += 1\n\n # Save results\n db_session.commit()\n # Complete\n worker.finish_job()\n\n\n@gen.coroutine\ndef save_image(image_name, bytestring):\n data_dir = get_path(\"data\")\n screenshot_dir = os.path.join(data_dir, 'screenshot')\n image_path = os.path.join(screenshot_dir, image_name)\n with open(image_path, 'wb') as f:\n f.write(base64.decodestring(bytestring.encode('utf8')))\n\n\n@gen.coroutine\ndef scrape_site_for_username(site, username, splash_url, request_timeout=10):\n \"\"\"\n Download the page at `site.url` using Splash and parse for the username (asynchronous).\n \"\"\"\n page_url = site.url.replace('%s', username)\n url = '{}/render.json?url={}&html=1&frame=1&jpeg=1'.format(splash_url, quote_plus(page_url))\n headers = {\n 'User-Agent': USER_AGENT,\n 'X-Splash-timeout': '{}'.format(request_timeout)\n }\n result = {\n 'site': site,\n 'found': True,\n 'error': None,\n 'url': page_url\n }\n headers = {}\n\n try:\n response = yield httpclient.AsyncHTTPClient().fetch(url,\n headers=headers,\n connect_timeout=5,\n validate_cert=False)\n\n except Exception as e:\n result['error'] = e\n result['found'] = False\n raise gen.Return(result)\n\n data = escape.json_decode(response.body)\n result['found'] = response_contains_username(site, response)\n result['image'] = data['jpeg']\n result['code'] = response.code\n\n raise gen.Return(result)\n\n\n@gen.coroutine\ndef parse_result(scrape_result, total, job_id):\n result = Result(\n job_id=job_id,\n site_name=scrape_result['site'].name,\n site_url=scrape_result['url'],\n found=scrape_result['found'],\n total=total,\n number=1\n )\n # Save image\n if scrape_result['error'] is None:\n image_name = uuid.uuid4()\n thumb_name = '{}-thumb'.format(image_name)\n image_name = '{}.jpeg'.format(image_name)\n thumb_name = '{}.jpeg'.format(thumb_name)\n try:\n save_image(image_name, scrape_result['image'])\n result.image = image_name\n result.thumb = thumb_name\n except Exception as e:\n raise ScrapeException('{}'.format(e))\n\n raise gen.Return(result)\n\n\n@gen.coroutine\ndef scrape_sites(username, group_id=None):\n \"\"\"\n Scrape all sites for username (asynchronous).\n \"\"\"\n job = worker.get_job()\n redis = worker.get_redis()\n db_session = worker.get_session()\n concurrency = get_config(db_session, 'scrape_concurrency', required=True).value\n\n try:\n concurrency = int(concurrency)\n except:\n raise ScrapeException('Value of scrape_concurrency must be an integer')\n\n request_timeout = get_config(db_session, 'scrape_request_timeout', required=True).value\n try:\n request_timeout = int(request_timeout)\n except:\n raise ScrapeException('Value of scrape_request_timeout must be an integer')\n\n splash_url = get_config(db_session, 'splash_url', required=True).value\n\n if group_id is not None:\n group = db_session.query(Group).get(group_id)\n sites = group.sites\n else:\n sites = db_session.query(Site).all()\n\n total = len(sites)\n q = queues.Queue()\n fetching, fetched = set(), set()\n #results = list()\n\n @gen.coroutine\n def scrape_site():\n ''' Worker functions for performing scraping tasks asynchronously. '''\n current_site = yield q.get()\n try:\n if current_site in fetching:\n return\n\n fetching.add(current_site)\n # Scrape the page, get the result\n scrape_result = yield scrape_site_for_username(\n current_site, username, splash_url, request_timeout)\n # Parse result\n result = yield parse_result(scrape_result, total, job.id)\n db_session.add(result)\n db_session.flush()\n\n fetched.add(current_site)\n # Notify clients of the result\n redis.publish('result', json.dumps(result.as_dict()))\n finally:\n q.task_done()\n\n @gen.coroutine\n def async_worker():\n while True:\n yield scrape_site()\n\n for site in sites:\n q.put(site)\n\n # Start workers, then wait for the work queue to be empty.\n for _ in range(concurrency):\n async_worker()\n\n yield q.join(timeout=timedelta(seconds=300))\n assert fetching == fetched\n\n # Save results\n db_session.commit()\n\n\ndef search_username(username, group=None):\n \"\"\"\n Concurrently search username across all sites using an asyncronous loop.\n \"\"\"\n worker.start_job()\n io_loop = ioloop.IOLoop.current()\n io_loop.run_sync(lambda: scrape_sites(username, group))\n # Complete\n worker.finish_job()\n","sub_path":"lib/worker/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":8586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"37055299","text":"#separate words by iterating through and checking for white space\n\nwords = input('Enter words separated by spaces, human\\n')\n\ni = 0\n\nwordlist = []\n\nprint(len(words))\n\nprint(len(words) - 1)\n\nwhile i < len(words) - 1: #The loop doesn't break when the condition is met\n #therefore the backup below. Stupid python\n letters = []\n while words[i] != ' ':\n letters.append(words[i])\n i += 1\n if i == len(words):\n break\n word = ''.join(letters)\n wordlist.append(word)\n i += 1\n\nprint(wordlist)","sub_path":"Learning Projects/Word Seperator.py","file_name":"Word Seperator.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"229647760","text":"class Node:\n def __init__(self, data = None):\n self.data = data\n self.next = None\n \n def __repr__(self):\n return str(self.data)\n\nclass Stack:\n def __init__(self):\n self.top = None\n self.size = 0\n\n def push(self, data):\n node = Node(data)\n if self.top is None:\n self.top = node\n \n else:\n node.next = self.top\n self.top = node\n\n self.size += 1\n\n def pop(self):\n if self.top is None:\n print(\"List is Empty !!!\")\n return None\n\n else:\n data = self.top.data\n self.size -= 1\n if self.top.next:\n self.top = self.top.next\n else:\n self.top = None\n return data\n\n def peek(self):\n if self.top:\n return self.top.data\n else:\n return None\n\n'''\n Now let us look at an example application showing how we can \n use our stack implemetation. We are going to write a little \n function that will verify whether a statement contains \n brackets - (, [, { - is balanced, that is, whether the number \n of closng brackets matches the number of opening brackets. \n It will also ensure that one pair of brackets really is \n contained in one another. \n''' \n\ndef check_brackets(statement):\n stack = Stack()\n for ch in statement:\n if ch in ('{', '[', '('):\n stack.push(ch)\n if ch in ('}', ']', ')'):\n last = stack.pop()\n if last is '{' and ch is '}':\n continue\n elif last is '[' and ch is ']':\n continue\n\n elif last is '(' and ch is ')':\n continue\n else:\n return False\n if stack.size > 0:\n return False\n else:\n return True\n","sub_path":"Stacks and queues/Stack.py","file_name":"Stack.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"607502510","text":"import sys\nfrom datetime import datetime, timedelta\nsys.path.insert(0, \"../utils\")\n\nimport time_util\n\nclass SleepEvent():\n def __init__(self, sleep_start, sleep_end, timedelta_slept=None):\n self.sleep_start = sleep_start\n self.sleep_end = sleep_end\n self.timedelta_slept = timedelta_slept\n self.sleep_opportunity = self.sleep_end - self.sleep_start\n\n def __str__(self):\n sleep_opportunity_hr_str = \"%5s\" % (\"%2.2f\"%(self.sleep_opportunity/60/60))\n\n # populate sleep opportunity field if it's been set\n sleep_opportunity_print_str = \"\"\n if self.timedelta_slept:\n timedelta_slept_hr_str = \"%5s\" % (\"%2.2f\"%(self.timedelta_slept.seconds/60/60))\n sleep_opportunity_print_str = \" hours out of %s sleep opportunity.\" % sleep_opportunity_hr_str\n\n retstr = \"\"\n retstr += \"%s -- %s: slept %s%s\" % (\\\n time_util.epoch_to_md_hm(self.sleep_start),\\\n time_util.epoch_to_md_hm(self.sleep_end),\\\n timedelta_slept_hr_str,\\\n sleep_opportunity_print_str)\n return retstr\n\n def to_db_tuple(self):\n return (time_util.epoch_to_yyyymmdd(self.sleep_end), self.sleep_start, self.sleep_end, self.timedelta_slept.seconds)\n","sub_path":"src/oop/sleep_event.py","file_name":"sleep_event.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"155188804","text":"from PIL import Image\nfrom PIL import ImageFile\nimport sys\nimport os\nimport sqlite3\nfrom sqlite3 import Error\n\nImageFile.LOAD_TRUNCATED_IMAGES = True \n\nbase_directory = os.getcwd()\n\nCARD_ART_DIRECTORY = base_directory + \"/MTG_card_art\"\n\ntry:\n data_base_connection = sqlite3.connect(base_directory + \"/card_image_data.db\")\n data_base_cursor = data_base_connection.cursor()\n\n table = \"\"\" CREATE TABLE IF NOT EXISTS card_info (\n card_name text,\n card_set text, \n image_width integer,\n image_height integer,\n image_red integer,\n image_green integer,\n image_blue integer);\"\"\"\n\n data_base_cursor.execute(table)\n\n os.chdir(CARD_ART_DIRECTORY)\n\n card_set_list = os.listdir('.')\n\n for card_set in card_set_list:\n os.chdir(CARD_ART_DIRECTORY + \"/\" + card_set)\n for card_art in os.listdir('.'):\n print(\"Set: \" + card_set + \" -> Card Art: \" + card_art)\n card_image = Image.open(card_art)\n width, height = card_image.size\n pixels = card_image.getcolors(width*height)\n most_frequent_pixel = pixels[0]\n\n for count, color in pixels:\n if count > most_frequent_pixel[0]:\n most_frequent_pixel = (count, color)\n\t\t\t\n image_info = (card_art, card_set, width, height, most_frequent_pixel[1][0], most_frequent_pixel[1][1], most_frequent_pixel[1][2])\n print(image_info)\n print(\"\\n\")\n\n sql = ''' INSERT INTO card_info(card_name, card_set, image_width, image_height, image_red, image_green, image_blue)\n VALUES(?,?,?,?,?,?,?);'''\n data_base_cursor = data_base_connection.cursor()\n data_base_cursor.execute(sql, image_info)\n data_base_connection.commit()\n\n\nexcept Error as e:\n print(e)\n\nfinally:\n data_base_connection.close()\n\t\n\n","sub_path":"color_analyser.py","file_name":"color_analyser.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"481956631","text":"import couchdb\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\nimport re\nimport json\nfrom pprint import pprint\nfrom sAPI.sentimentx import SentimentAnalysis\nimport sys\n\nmypath = os.path.abspath(__file__)\nroot = mypath.split('/')\nroot.pop()\nroot.pop()\nroot = '/'.join(root)\n\nsentimentAPI = SentimentAnalysis()\n\nfrom optparse import OptionParser\nparser = OptionParser()\nparser.add_option(\"-p\", \"--path\", dest=\"path\",\n help=\"choose root directory\", metavar=\"FILE\")\n# parser.add_option(\"-q\", \"--quiet\",\n# action=\"store_false\", dest=\"verbose\", default=True,\n# help=\"don't print status messages to stdout\")\n\n(options, args) = parser.parse_args()\nprint(options, args)\n\npathx = options.__dict__['path']\nprint('indicate path: ')\nprint(pathx)\n\nif not pathx:\n print(mypath)\n print(root)\nelse:\n root = pathx\n print(root)\n\ndef full_path(mpth, f_name):\n return os.path.join(mpth, f_name)\n\ndef explore_dir_for_json(path):\n only_json_files = []\n directories = [path]\n\n while directories:\n path = directories.pop(0)\n for ff in listdir(path):\n if isfile(join(path, ff)):\n if re.search(r'\\.json', ff):\n only_json_files.append(join(path, ff))\n else:\n directories.append(join(path, ff))\n\n pprint(only_json_files)\n print('\\n')\n return only_json_files\n\n\njson_files = explore_dir_for_json(root)\n\n# exit()\n\n\"\"\" ####################### part 2##################### \"\"\"\n\nDB_NAME = 'en_tweets_db'\n\ncouch = couchdb.Server('http://130.56.250.125:5984')\ncouch.resource.credentials = ('team14', 'shadow')\n\n\n\nif DB_NAME not in couch:\n couch.create(DB_NAME)\ndb = couch[DB_NAME]\n\n\nexit()\ndb.query()\n\ncc = []\n\n\nlog_file = open('db_insert.log', 'a+')\n\nbad_tweets = open('bad_tweets.log', 'a+')\nprint()\n\nlog_file.seek(0)\nrecord = log_file.readlines()\nff_record = [i for i in record if re.search(r'.json', i)]\nmm = None\nif record:\n mm = re.search(r'BREAK:\\s&(.+)&\\s#([0-9]+)', record[-1])\n print('PPPPPPPPPPPPPPPPPPPPPP', record[-1])\nrecord = []\n\nbreak_point = None\nif mm:\n break_point = mm.group(1), int(mm.group(2))\n\nprint('\\n', '-'*60, '\\n')\nprint(ff_record)\nprint('\\n')\nskip = False\nskip_i = 0\n\nfor f in json_files:\n\n if f + '\\n' in ff_record:\n print('skip file: ', f)\n else:\n instream = open(f)\n i = 0\n\n while True:\n\n if break_point and break_point[0] == f:\n while i < break_point[1] - 1:\n i += 1\n l = instream.readline()\n # if i and i % 10 == 0:\n skip = True\n skip_i += 1\n l = instream.readline()\n i += 1\n if l == '':\n break\n\n try:\n jdic = json.loads(l)\n if 'id_str' in jdic \\\n and 'lang' in jdic \\\n and jdic['lang'] == 'en' \\\n and 'twt' + jdic['id_str'] not in db:\n\n jdic['_id'] = 'twt' + jdic['id_str']\n jdic['sentiment'] = sentimentAPI.sentiment_analysis(jdic['text'])[0]\n db.save(jdic)\n if skip:\n print('skipped duplicated tweets: ', skip_i)\n skip = False\n skip_i = 0\n\n else:\n skip = True\n skip_i += 1\n pass\n except json.decoder.JSONDecodeError:\n # print('json.decoder.JSONDecodeError')\n # log_file.write('JsonError: &' + f + '& #' + str(i) + '\\n')\n pass\n\n # exit()\n except:\n log_file.write('BREAK: &' + f + '& #' + str(i) + '\\n')\n bad_tweets.write(l + '\\n')\n bad_tweets.flush()\n print(\"Unexpected error:\", sys.exc_info()[0])\n print(\"Unexpected error:\", sys.exc_info())\n exit()\n\n # re.search(r'BREAK:\\s&(.+)&\\s#([0-9]+)', rec)\n\n if i and i % 200 == 0:\n print(f, i)\n\n if skip:\n print('skipped duplicated tweets: ', skip_i)\n skip = False\n skip_i = 0\n\n log_file.write(f + '\\n')\n log_file.flush()\n print('COMPLETE: ', f)\n\n\n\nprint('Done')","sub_path":"couchpy/rtrt.py","file_name":"rtrt.py","file_ext":"py","file_size_in_byte":4408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"562871172","text":"import pandas as pd\nimport numpy as np\nimport sys\n\ndf = pd.read_csv('/data/sidana/recnet_draft/'+sys.argv[1]+'/recnet_all/test_all_raw.csv',sep=',',header=0)\n\nusers = set(df['userId'])\nusers = list(users)\nitems_all = set(df['movieId'])\nitems_all = list(items_all)\nuser_item_dict = {}\nnew_df_data = []\nts = 1\n\nfor user in users:\n \n df_user = df[df['userId']==user].sort_values(by='timestamp')#subdataset for each user, sorted by timestamp\n click = df_user['rating']\n click = list(click)\n it_for_u = df_user['movieId']\n it_for_u = list(it_for_u)\n clicks = []\n \n for i in click:\n if (i >= 4):\n clicks = clicks + [1]\n else:\n clicks = clicks + [0]\n \n for n in range(len(clicks)):#saving all negative for each user for the next training \n if (clicks[n] == 1):\n user_item_dict[(user,it_for_u[n])] = 1\n\n for item in items_all:\n if (user, item) in user_item_dict:\n new_df_data.append([user,item,4,ts])\n else:\n new_df_data.append([user,item,1,ts])\n ts += 1 \n \n \ndf2 = pd.DataFrame(new_df_data,columns=['userId', 'movieId', 'rating', 'timestamp'])\ndf2.to_csv('/data/sidana/recnet_draft/'+sys.argv[1]+'/recnet_all/test_all_cart_raw.csv', index = False)","sub_path":"cart_prod_form.py","file_name":"cart_prod_form.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"14097525","text":"import scrapy\nfrom diseasesdatabase.items import SymptomItem,RelationshipItem\n\nclass DSSpider(scrapy.Spider):\n name = \"relationship\"\n start_urls = [\n 'http://www.diseasesdatabase.com/relationships.asp?glngUserChoice=28856&bytRel=0&blnBW=0&strBB=RL&blnClassSort=0'\n ]\n\n def parse(self, response):\n symptoms_page=response.xpath(\"//*[@id='page_specific_content']/ul/li/strong/a/@href\").extract()\n base_url=\"http://www.diseasesdatabase.com/{0}\"\n for page in symptoms_page:\n yield scrapy.Request(base_url.format(page),dont_filter=True,callback=self.parse_page)\n\n def parse_page(self,response):\n symptomitem=SymptomItem()\n symptomitem['symptom']=response.xpath('//*[@id=\"page_specific_content\"]/p[1]/strong/text()').extract_first()\n caused_url=response.xpath('//*[@id=\"page_specific_content\"]/p[1]/strong/text()').extract_first()\n yield symptomitem","sub_path":"diseasesdatabase/spiders/relationship.py","file_name":"relationship.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"469314282","text":"import torch\nimport os\nimport numpy as np\nfrom controller import CLF_CBF_controller,MPC_controller, MPC_controller_defence,Fast_controller_defence,Fast_Catch\nfrom ra_env import ReachAvoidAgent\nfrom sac import SAC\nimport argparse\n\nparser = argparse.ArgumentParser(description='PyTorch Soft Actor-Critic Args')\nparser.add_argument('--env-name', default=\"HalfCheetah-v2\",\n help='Mujoco Gym environment (default: HalfCheetah-v2)')\nparser.add_argument('--policy', default=\"Deterministic\",\n help='Policy Type: Gaussian | Deterministic (default: Gaussian)')\nparser.add_argument('--eval', type=bool, default=True,\n help='Evaluates a policy a policy every 10 episode (default: True)')\nparser.add_argument('--gamma', type=float, default=0.99, metavar='G',\n help='discount factor for reward (default: 0.99)')\nparser.add_argument('--tau', type=float, default=0.005, metavar='G',\n help='target smoothing coefficient(τ) (default: 0.005)')\nparser.add_argument('--lr', type=float, default=0.0003, metavar='G',\n help='learning rate (default: 0.0003)')\nparser.add_argument('--alpha', type=float, default=0.2, metavar='G',\n help='Temperature parameter α determines the relative importance of the entropy\\\n term against the reward (default: 0.2)')\nparser.add_argument('--automatic_entropy_tuning', type=bool, default=False, metavar='G',\n help='Automaically adjust α (default: False)')\nparser.add_argument('--seed', type=int, default=123456, metavar='N',\n help='random seed (default: 123456)')\nparser.add_argument('--batch_size', type=int, default=256, metavar='N',\n help='batch size (default: 256)')\nparser.add_argument('--num_steps', type=int, default=1000001, metavar='N',\n help='maximum number of steps (default: 1000000)')\nparser.add_argument('--hidden_size', type=int, default=256, metavar='N',\n help='hidden size (default: 256)')\nparser.add_argument('--updates_per_step', type=int, default=1, metavar='N',\n help='model updates per simulator step (default: 1)')\nparser.add_argument('--start_steps', type=int, default=1000, metavar='N',\n help='Steps sampling random actions (default: 10000)')\nparser.add_argument('--target_update_interval', type=int, default=1, metavar='N',\n help='Value target update per no. of updates per step (default: 1)')\nparser.add_argument('--replay_size', type=int, default=1000000, metavar='N',\n help='size of replay buffer (default: 10000000)')\nparser.add_argument('--cuda', action=\"store_true\",default=True,\n help='run on CUDA (default: False)')\nargs = parser.parse_args()\n\nif __name__ == '__main__':\n env = ReachAvoidAgent()\n i =0 \n state = env.reset()\n record = []\n \n # agent = SAC(env.observation_space.shape[0], env.action_space, args)\n # agent.load_model(\"models/pretrain\",None)\n # agent.load_model(\"models/save2/mpc_actor\",\"models/save2/mpc_critic\")\n\n p = np.random.rand()\n k=0\n if (p>0.4):\n k=1\n elif (0.4<=p<0.8):\n k=2\n else:\n k=3\n while True:\n # print(env.action_space.shape)\n # act_n = []\n # act_n = np.array([0,0])\n act_n = MPC_controller(state[:3],state[3:])\n # act_n = env.action_space.sample()\n # act_n = agent.select_action(state, evaluate=True)\n\n record.append( np.append(state,np.array([act_n[0],act_n[1]])) )\n\n \n obs_n, reward_n, done_n, _ = env.step(act_n,k)\n state = obs_n\n i=i+1\n # np.save(\"record.npy\",record)\n\n if(done_n == True):\n print(env.times)\n env.reset(True)\n p = np.random.rand()\n k=0\n if (p>0.4):\n k=2\n elif (0.4<=p<0.8):\n k=2\n else:\n k=3\n \n \n # print(done_n)\n # env.render()\n \n","sub_path":"reach_avoid_training/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"176409812","text":"import random\n\n\nclass Board:\n\n def __init__(self, width):\n self.board_width = width\n self.board_height = 5\n self.curr_board = [[0 for x in range(self.board_width)] for y in range(self.board_height)]\n self.top_row = self.curr_board[0]\n self.player = Player(board_width=self.board_width)\n self.score = 0\n self.alive = True\n self.curr_state = []\n self.last_state = []\n self.reward = 0\n\n def print_board(self):\n self.curr_board[0][self.player.get_current_pos()] = \"X\"\n print(\"SCORE: %s\\n\" % self.get_score())\n for row in self.curr_board:\n for val in row:\n print(val, end=\" \")\n print()\n print(\"\\n\")\n\n def next_row(self):\n del self.curr_board[0]\n self.add_row()\n self.check_survive()\n\n def add_row(self):\n next_row = [0 for x in range(self.board_width)]\n next_row[random.randint(0, self.board_width-1)] = 1\n self.curr_board.append(next_row)\n\n def check_survive(self):\n self.alive = self.curr_board[0][self.player.get_current_pos()] == 0\n return self.alive\n\n def update_score(self):\n if self.check_survive():\n if self.player.last_move == 0:\n self.reward = 1\n self.score += 1\n else:\n self.reward = float(1) / (abs(self.player.last_move)+1)\n self.score += float(1) / (abs(self.player.last_move)+1)\n else:\n self.reward = -1\n\n def update_current_state(self):\n self.last_state = self.curr_state\n # Alive/Dead, Current Position, Matrix Values\n state = [self.player.curr_pos]\n for row in self.curr_board:\n for val in row:\n state.append(val)\n self.curr_state = state\n\n def get_score(self):\n return self.score\n\n def get_player(self):\n return self.player\n\n def get_reward(self):\n return self.reward\n\n def get_width(self):\n return self.board_width\n\n def get_state(self):\n return self.curr_state\n\n def get_state_size(self):\n return 1 + self.board_width*self.board_height\n\n\nclass Player:\n\n def __init__(self, board_width):\n self.board_width = board_width\n self.initial_pos = int(board_width / 2)\n self.curr_pos = self.initial_pos\n self.last_move = 0\n\n def move(self, direction):\n self.last_move = direction\n self.curr_pos = self.curr_pos + direction\n if self.curr_pos < 0:\n self.curr_pos = 0\n elif self.curr_pos >= self.board_width:\n self.curr_pos = self.board_width - 1\n\n def get_current_pos(self):\n return self.curr_pos\n\n\nif __name__ == \"__main__\":\n board = Board(width=5)\n player = board.get_player()\n while board.check_survive():\n board.print_board()\n player.move(int(input(\"DIR: \")))\n board.next_row()\n board.update_score()\n board.update_current_state()\n\n\n print(\"You Died! \\t Final Score: %s\" % board.get_score())\n\n","sub_path":"simple_game.py","file_name":"simple_game.py","file_ext":"py","file_size_in_byte":3082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"343177135","text":"import os\nfrom flask import Flask, flash, request, redirect, url_for, render_template, send_from_directory\nfrom tsn_predict import TSNPredictor as CelebASpoofDetector\n\nfrom werkzeug.utils import secure_filename\nimport model\nimport fas\n\n\nUPLOAD_FOLDER = './static/images/'\nALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}\n\ndetector = CelebASpoofDetector()\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\n@app.after_request\ndef add_header(response):\n \"\"\"\n Add headers to both force latest IE rendering engine or Chrome Frame,\n and also to cache the rendered page for 10 minutes.\n \"\"\"\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n # check if the post request has the file part\n # if (request.files['file']):\n # filename = 'web.png'\n # request.files['file'].save(os.path.join(\n # app.config['UPLOAD_FOLDER'], filename))\n # return redirect(request.url)\n # if 'file' not in request.files:\n # flash('No file part')\n # return redirect(request.url)\n file = request.files['file']\n # if user does not select file, browser also\n # submit an empty part without filename\n # if file.filename == '':\n # flash('No selected file')\n # return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = 'image.png'\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n result = fas.run_test(detector, \"./static/images/image.png\")\n if (result >= .5):\n flag = \"Real\"\n color = \"green\"\n else:\n flag = \"Fake\"\n color = \"red\"\n return redirect(url_for('uploaded_file',\n flag=flag, color=color))\n return render_template(\"index.html\")\n\n\n@app.route('/result//', methods=['GET', 'POST'])\ndef uploaded_file(flag, color):\n return render_template(\"result1.html\", flag=flag, color=color)\n\n\n@app.route('/web_result', methods=['GET', 'POST'])\ndef web_result():\n result = fas.run_test(detector, \"./static/images/image.png\")\n if (result >= .5):\n flag = \"Real\"\n color = \"green\"\n else:\n flag = \"Fake\"\n color = \"red\"\n print(flag, color, \"redirecting\")\n return redirect(url_for('uploaded_file',\n flag=flag, color=color))\n\n\n@app.route('/web', methods=['GET', 'POST'])\ndef web():\n if request.method == 'POST':\n i = request.files['image'] # get the image\n f = \"image.png\"\n i.save('%s/%s' % (\"./static/images/\", f))\n\n # show the form, it wasn't submitted\n return render_template('web.html')\n\n# # save the image as a picture\n# @app.route('/image', methods=['GET', 'POST'])\n# def image():\n\n# i = request.files['image'] # get the image\n# f = \"image.png\"\n# i.save('%s/%s' % (\"./upload/\", f))\n# # result = fas.run_test(detector, \"./static/images/image.png\")\n# result =.6\n# if (result >= .5):\n# flag = \"Real\"\n# color = \"green\"\n# else:\n# flag = \"Fake\"\n# color = \"red\"\n# print(flag, color, \"redirecting\")\n# return render_template(\"image.html\", flag=flag)\n\n# @app.route(\"/sub\", methods = [\"POST\"])\n# def submit():\n# if request.method == \"POST\":\n# name = request.form[\"username\"]\n\n# return render_template(\"sub.html\", name= name)\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port='5001', debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"472703552","text":"from django.shortcuts import render, get_object_or_404\nfrom django.views.generic import View\nfrom .models import Producto\nfrom cart.forms import CartAddProductForm\n\nclass Lista(View):\n def get(self, request):\n productos = Producto.objects.all()\n template_name = 'tienda/lista.html'\n context = {\n 'productos':productos\n }\n return render(request, template_name, context)\n\nclass Detalle(View):\n def get(self, request, producto_id):\n template_name = 'tienda/detalle.html'\n producto = get_object_or_404(Producto,id=producto_id)\n form = CartAddProductForm()\n context = {\n 'producto':producto,\n 'form':form\n }\n return render(request, template_name, context)\n","sub_path":"tienda/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"534742926","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom setuptools import setup, find_packages, Distribution\n\nwith open('requirements.txt') as f:\n requirements = f.read().splitlines()\n\nsetup(\n name=\"any\",\n version=\"0.0.2\",\n author=\"Anyscale Inc.\",\n description=(\"Command Line Interface for Anyscale\"),\n packages=find_packages(),\n install_requires=requirements,\n entry_points={\n \"console_scripts\": [\n \"any=any.scripts:main\"\n ]\n },\n include_package_data=True,\n zip_safe=False)\n","sub_path":"pypi_install_script/any-0.0.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"494484806","text":"\"\"\"\nThe script for preprocessing data for visual module training.\n\"\"\"\n\nfrom __future__ import print_function\nimport os\nimport numpy as np\nimport PIL\nfrom PIL import Image\nimport tensorflow as tf\nimport json\nimport argparse\n\ndef read_file(data_dir = \".\", train_fname = \"annotations_train.json\"):\n train = json.load(open(os.path.join(data_dir, train_fname), 'r'))\n #val = json.load(open(os.path.join(data_dir, test_fname), 'r'))\n return train\n\ndef transform_bbox(bbox):\n return [bbox[0], bbox[2], bbox[1], bbox[3]]\n\ndef _get_box(bbox1, bbox2):\n return (min(bbox1[2], bbox2[2]),\\\n min(bbox1[0], bbox2[0]),\\\n max(bbox1[3], bbox2[3]),\\\n max(bbox1[1], bbox2[1]))\n\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list = tf.train.BytesList(value = [value]))\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list = tf.train.Int64List(value = [value]))\n\ndef _get_output_filename(output_dir, basename, idx):\n return '%s/%s_%03d.tfrecord' % (output_dir, basename, idx)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n \n parser.add_argument(\"--data_dir\", help = \"the json_data dir\", default = \"/home/mxy/json_data/\")\n parser.add_argument(\"--train_fname\", help = \"train json\", default = \"annotations_train.json\")\n #parser.add_argument(\"--test_fname\", help = \"test json\", default = \"annotations_test.json\")\n \n parser.add_argument(\"--tfrecord_dir\", help = \"tfrecord directory\", default = \".\")\n parser.add_argument(\"--tfrecord_fname\", help = \"tfrecord basename\", default = \"tf_record\")\n \n parser.add_argument(\"--dataset_dir\", help = \"sp_data dir\", default = \"/home/mxy/json_data/sg_dataset/sg_train_images\")\n \n parser.add_argument(\"--bbox_height\", help = \"bbox height\", default = 224)\n parser.add_argument(\"--bbox_width\", help = \"bbox width\", default = 224)\n\n parser.add_argument(\"--process_bbox\", help = \"store union of two bboxs\", default = \"true\")\n parser.add_argument(\"--tfrecord_num\", help = \"store number of images per tfrecord\", default = 100)\n args = parser.parse_args()\n \n print(\"Reading from {}\".format(os.path.join(args.data_dir, args.train_fname)))\n #print(\"Reading from {}\".format(os.path.join(args.data_dir, args.test_fname)))\n train = read_file(args.data_dir, args.train_fname)\n #record_fname = os.path.join(args.tfrecord_dir, args.tfrecord_fname)\n #record_fname.append(\".tfrecord\")\n \n idx = 0\n record_fname = _get_output_filename(args.tfrecord_dir, args.tfrecord_fname, idx)\n tfrecord_writer = tf.python_io.TFRecordWriter(record_fname)\n print(\"Saving to {}\".format(record_fname))\n \n cnt = 0\n tmp = 0\n if args.process_bbox == \"true\":\n for ii in train:\n \n for jj in range(len(train[ii])):\n # bbox = [Ymin, Ymax, Xmin, Xmax]\n bbox1 = train[ii][jj]['object']['bbox']\n bbox2 = train[ii][jj]['subject']['bbox']\n #bbox1 = transform_bbox(bbox1)\n #bbox2 = transform_bbox(bbox2)\n predicate = train[ii][jj]['predicate']\n fname = os.path.join(args.dataset_dir, ii)\n if not os.path.isfile(fname):\n print(\"file {} does not exist\".format(fname))\n continue\n img = Image.open(fname)\n #img = img.load()\n bbox = _get_box(bbox1, bbox2)\n\n b = np.array(img.crop(bbox).resize([args.bbox_height, args.bbox_width], PIL.Image.BILINEAR))\n\n example = tf.train.Example(features = tf.train.Features(feature = { \\\n 'bbox': _bytes_feature(b.tostring()),\\\n 'predicate' : _int64_feature(predicate)}))\n\n tfrecord_writer.write(example.SerializeToString())\n tmp += 1\n if tmp % args.tfrecord_num == 0:\n tfrecord_writer.close()\n idx += 1\n record_fname = _get_output_filename(args.tfrecord_dir, args.tfrecord_fname, idx)\n print(\"Saving to {}\".format(record_fname))\n tfrecord_writer = tf.python_io.TFRecordWriter(record_fname)\n cnt += 1\n print(\"finish {}\".format(cnt / float(len(train))))\n\n else :\n for ii in train:\n fname = os.path.join(args.dataset_dir, ii)\n if not os.path.isfile(fname):\n print(\"file {} does not exist\".format(fname))\n continue\n img = Image.open(fname)\n # size(height, width)\n size = img.size\n height = size[0]\n width = size[1]\n img = np.array(img)\n bbox1 = []\n bbox2 = []\n predicate = []\n for jj in range(len(train[ii])):\n bbox1.extend(train[ii][jj]['object']['bbox'])\n bbox2.extend(train[ii][jj]['subject']['bbox'])\n predicate.append(train[ii][jj]['predicate'])\n\n bbox1 = np.array(bbox1)\n bbox2 = np.array(bbox2)\n predicate = np.array(predicate)\n example = tf.train.Example(features = tf.train.Features(feature = {\\\n 'bbox1': _bytes_feature(bbox1.tostring()),\\\n 'bbox2': _bytes_feature(bbox2.tostring()),\\\n 'image': _bytes_feature(img.tostring()),\\\n 'height': _int64_feature(height),\\\n 'width': _int64_feature(width),\\\n 'predicate': _bytes_feature(predicate.tostring())\\\n }))\n tfrecord_writer.write(example.SerializeToString())\n cnt += 1\n print(\"finish {}\".format(cnt / float(len(train))))\n if cnt % args.tfrecord_num == 0:\n tfrecord_writer.close()\n idx += 1\n record_fname = _get_output_filename(args.tfrecord_dir, args.tfrecord_fname, idx)\n print(\"Saving to {}\".format(record_fname))\n tfrecord_writer = tf.python_io.TFRecordWriter(record_fname)\n\n tfrecord_writer.close()\n\n \nif __name__ == \"__main__\":\n main()\n\n","sub_path":"prepare_utils/sg_preprocess.py","file_name":"sg_preprocess.py","file_ext":"py","file_size_in_byte":6127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"317192191","text":"# -*- coding:utf8 -*-\n\n###############################\n# Progamme damiers/pions #\n# Auteur: C.WELLER 11/05/15 #\n# Licence: GPL #\n###############################\n\n\n#####################################\n# importations de fonctions externes\nfrom tkinter import *\nfrom random import randrange\n\n\n#####################################\n# définition des fonctions locales\n\ndef damier():\n \"creation d'un damier\"\n x1,x2=0,30\n y1,y2=0,30\n while y2<=300:\n while x2<=300:\n can.create_rectangle(x1, y1, x2, y2, fill='black')\n x1,x2 =x1+60,x2+60\n x1,x2=x1-330,x2-330\n y1,y2=y1+30,y2+30\n\n\ndef pions(coul='blue'):\n \"creation d'un pion\"\n can.create_oval(0, 30, 30, 60, outline=coul,fill=coul)\n \n\n###### PROGRAMME PRINCIPAL ######\n\nfen = Tk()\ncan = Canvas(fen, width=300, height=300, bg='white')\ncan.pack(side=TOP,padx=5,pady=5)\nb1 = Button(fen, text='damier', command=damier)\nb1.pack(side=LEFT, padx=3, pady=3)\nb2 = Button(fen, text='pions', command=pions)\nb2.pack(side=RIGHT, padx=3, pady=3)\nfen.mainloop()\n","sub_path":"exo_damiers.py","file_name":"exo_damiers.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"291023129","text":"import graphics as gr\nimport math\n\nwidth = 500 # int(input('Введите ширину поля... ')) * 10\nx0 = 250\ny0 = 250\n\nsensors = [[-45, 50], [-15, 50], [15, 50], [45, 50], [-30, 35], [0, 35],\n [30, 35], [-15, 20], [15, 20], [-45, 20], [45, 25],\n [-30, 5], [-5, 5], [-45, -10], [-15, -10], [25, -10], [45, -10],\n [-25, -20], [10, -30], [35, -30], [-45, -40], [-15, -50], [15, -50],\n [45, -50]]\n# sensors = [[-15, -10], [10, -30]]\n\n\nwindow = gr.GraphWin('Picture', width, width)\n\n\ndef drawRectangle(x1, y1, x2, y2, fill='red', outline='red'):\n my_rectangle = gr.Rectangle(gr.Point(x1, y1), gr.Point(x2, y2))\n my_rectangle.setFill(fill)\n my_rectangle.setOutline(outline)\n my_rectangle.draw(window)\n\n\ndef drawCircle(x, y, r, fill='white', outline='black'):\n my_circle = gr.Circle(gr.Point(x, y), r)\n my_circle.setFill(fill)\n my_circle.setOutline(outline)\n my_circle.draw(window)\n\n\ndef drawLine(x1, y1, x2, y2, width=1, fill='red', outline='red'):\n my_line = gr.Line(gr.Point(x1, y1), gr.Point(x2, y2))\n my_line.setFill(fill)\n my_line.setOutline(outline)\n my_line.setWidth(width)\n my_line.draw(window)\n\n\ndef drawSensorRadius(sensor, r=75, fill='white', outline='white'):\n coordX = x0+sensor[0]*5\n coordY = (y0-sensor[1]*5)\n drawCircle(coordX, coordY, r, fill, outline)\n\n\ndef drawErrorSensor(sensor):\n coordX = x0+sensor[0]*5\n coordY = (y0-sensor[1]*5)\n drawCircle(coordX, coordY, 10, 'red', 'red')\n\n\ndef drawSensorInfo(sensor):\n coordX = x0+sensor[0]*5\n coordY = (y0-sensor[1]*5)\n drawCircle(coordX, coordY, 5, 'yellow', 'yellow')\n\n txt = gr.Text(gr.Point(coordX+10, coordY+15), sensor)\n txt.setSize(10)\n txt.draw(window)\n\n\ndef checkPoint(A, r, errors=[]):\n ABmin = 99999\n res = False\n for B in sensors:\n ABLength = math.sqrt((A[0]-B[0])**2+(A[1]-B[1])**2)\n if ABLength <= r:\n return False\n if ABLength < ABmin:\n ABmin = ABLength\n res = B\n\n return res\n\n\ndef showBlindSpots(sensors):\n res = []\n criticals = []\n\n for i in range(-45, 50, 1):\n for j in range(-50, 50, 1):\n A = [i, j]\n crit = checkPoint(A, 15)\n if crit:\n res.append(A)\n criticals.append(crit)\n\n\n for sensor in sensors:\n drawSensorRadius(sensor)\n\n for point in res:\n drawSensorRadius(point, 2, 'green', 'green')\n\n for point in criticals:\n drawErrorSensor(point)\n\n for sensor in sensors:\n drawSensorInfo(sensor)\n\n\nshowBlindSpots(sensors)\ndrawRectangle(0, width/2, width, width/2, 'blue', 'blue')\ndrawRectangle(width/2, 0, width/2, width, 'blue', 'blue')\n\n\nwindow.getMouse()\nwindow.close()\n","sub_path":"Lesson-4/extra/ex-03-original.py","file_name":"ex-03-original.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"598199798","text":"import sys\nimport copy\nimport os\nimport mConfig\nimport mImage\nimport logging\nimport mCase\nimport mFile\nimport mPicLib\nimport mUtils\nfrom PIL import Image,ImageDraw,ImageFont,ImagePath,ImageChops, ImageDraw,ImageFont\n\ndef dumpCrop (size):\n print (size);\n s = \"left:%s top:%s right:%s bottom:%s\"%(size[\"left\"], size[\"top\"], size[\"right\"], size[\"bottom\"]);\n return s;\n\n\n\ndef littleBox (path, widthTarget, heightTarget, ref=\"CC\"):\n im = Image.open (path);\n size = im.size ;\n widthOrg = size [0];\n heightOrg = size [1];\n widthTemp = widthTarget;\n heightTemp = heightTarget;\n logging.info (\"widthTarge:%d heightTarget:%d\"%(widthTarget, heightTarget));\n\n if (widthTarget*1.0)/(widthOrg*1.0) > (heightTarget*1.0)/(heightOrg*1.0):\n heightTemp = int(heightOrg*(widthTarget*1.0/(widthOrg*1.0)));\n im = im.resize((widthTarget, heightTemp), Image.LANCZOS);\n else:\n widthTemp = int(widthOrg*(heightTarget*1.0/(heightOrg*1.0)));\n im = im.resize((widthTemp, heightTarget), Image.LANCZOS);\n\n cropSize = getCrop (widthTemp, heightTemp, widthTarget, heightTarget, ref);\n logging.info (dumpCrop(cropSize));\n im = im.crop ((cropSize[\"left\"], cropSize[\"top\"], cropSize[\"right\"], cropSize[\"bottom\"]));\n return im;\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO);\n im = pansan(\"C:/10052.tif\", 400, 200, \"LC\");\n im.save (\"c:/400LC.jpg\");\n\n","sub_path":"mLittleBox.py","file_name":"mLittleBox.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"265915617","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\n\n# dataset_url = 'http://mlr.cs.umass.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv'\ndata = pd.read_csv('winequality-red.csv', sep=';')\nX = data.drop('quality', axis=1)\ny = data.quality\n\nX_train, X_test, y_train, y_test = train_test_split(X, y)\nknn = KNeighborsClassifier(n_neighbors=1)\nknn.fit(X_train, y_train)\n\nresult = knn.score(X_test, y_test)\n# print(result)","sub_path":"kNN examples/kNN wine set.py","file_name":"kNN wine set.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"142626542","text":"\"\"\"\nGoodland is a country with 'n' cities, and each city 'ci' is sequentially numbered from '0' to 'n-1'. These cities are connected by 'n-1' roads, and each road connects city 'ci' to its neighboring city, 'ci+1' . The distance between any two cities 'ci' and 'cj' is '|i-j|'.\n\nGoodland's government started a project to improve the country's infrastructure and bring electricity to its citizens. It built at most one electrical tower in every city, but they haven't turned any of them on yet. Once switched on, each tower produces enough power to provide electricity to all neighboring cities at a distance 'n):\n j=n-1\n while (loc<=j[-\\w]+)/$', views.options_view, name='options'),\n url(r'^feature/(?P[-\\w]+)/$', views.features_view, name='features'),\n \n url(r'^i18n/', include('django.conf.urls.i18n')),\n ]\n\nif settings.DEBUG:\n if settings.MEDIA_ROOT:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n \n\n\nurlpatterns += staticfiles_urlpatterns()\n\n\n","sub_path":"uitt_net/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"487650117","text":"from django.shortcuts import render\nfrom .models import ChessBoard\nimport sys, requests, json\n\ndef chess_board(request):\n # new chess board layout\n Board = ChessBoard()\n # show layout if 'fen' was posted\n if request.POST.get('fen'):\n Board.fen = request.POST.get('fen')\n # perform a recommended chess move if 'move' was posted\n if (request.POST.get('move') == 'true'):\n try:\n # get recommended move\n api_response = requests.get('https://syzygy-tables.info/api/v2?fen=' + Board.fen)\n except requests.exceptions.RequestException as e:\n # our api request failed\n Board.api_error = e\n try:\n # convert to api response to json and get the first option in 'moves'\n moves = json.loads(api_response.text)['moves']\n recommended_move = next(iter(moves))\n # move chess pieces\n Board.move(recommended_move)\n except:\n # the api didn't return a move, probably because the FEN is invalid\n Board.moved = 'no recommended move from API'\n # pass the chess board to the view template\n return render(request, 'byucodechallenge/chess.html', {'board': Board})\n ","sub_path":"byucodechallenge/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"549275540","text":"# Paul & Valentin - 16.08.18\n\n# Exercise Selection\nex = int(input(\"Choose the exercise from 1 to 9: \"))\n\n# Exercise 1\n\nif ex == 1:\n a = int(input(\"Number 1: \"))\n b = int(input(\"Number 2: \"))\n c = int(input(\"Number 3: \"))\n\n values = [a,b,c]\n\n min_ = 0;\n max_ = 0;\n\n for value in values:\n if min_ == 0 or value < min_:\n min_ = value\n if max_ == 0 or value > max_:\n max_ = value\n\n print(\"Minimum: \"+str(min_))\n print(\"Maximum: \"+str(max_))\n\n\n# Exercise 2\n\nif ex == 2:\n\n money = int(input(\"Type in your balance: \"))\n wantedMoney = int(input(\"Type in your balance: \"))\n tax = int(input(\"Type in your tax yearly percentage: \"))\n time = int(input(\"Type in your timespan in years: \"))\n\n result = money;\n\n for i in range(time):\n result = result + ((result / 100) * tax);\n\n print(\"Your balance equals \"+str(result))\n\n# Exercise 3\n\nif ex == 3:\n\n number = int(input(\"Type in a number: \"))\n\n result = 1;\n\n for i in range(1, number+1):\n result = result*i;\n\n print(\"The faculty is \"+str(result))\n\n# Exercise 4\n\nif ex == 4:\n\n h = int(input(\"Type in the hours: \"))\n m = int(input(\"Type in the minutes: \"))\n\n time = h+m/60.0 # Hours\n\n print(\"Hour amount \"+str(time))\n\n price = 1.50;\n\n if time >= 3:\n time = time - 3\n price = price + time * 0.75;\n\n print(\"You have to pay \"+str(price)+\" Euro\")\n\n# Exercise 5\n\nif ex == 5:\n\n for x in range(10):\n for y in range(10):\n print(str(x)+\" * \"+str(y)+\" = \"+str(x*y))\n\n\n\n# Exercise 6\n\nif ex == 6:\n M = int(input(\"Type in your km: \"))\n P = int(input(\"Type in the amount of students: \"))\n\n tickets = P/3*2\n price = M *0.2\n\n amount = (tickets * price) / P\n\n print(\"You have to pay \"+str(amount)+ \" Euro per person\")\n\n# Exercise 7\n\nif ex == 7:\n print(\"Math is trash\")\n\n# Exercise 8\n\nif ex == 8:\n import collections\n\n money = float(input(\"Type in the amount of money: \"))\n money = money*100;\n\n i = money;\n\n # All available currencies in cents\n currency = {\n 5000:0,\n 2000:0,\n 1000:0,\n 500:0,\n 200:0,\n 100:0,\n 50:0,\n 20:0,\n 10:0,\n 5:0,\n 2:0,\n 1:0\n }\n\n # Ordering Array (Descending)\n currency = collections.OrderedDict(sorted(currency.items(), reverse=True))\n\n # Calculating\n while i != 0:\n for k,v in currency.items():\n if k <= i:\n amount = int(i/k);\n i = i % k;\n currency[k] = currency[k]+amount;\n\n # Outputting\n for k,v in currency.items():\n if v != 0:\n print(str(v)+\"x \"+str(k/100.0)+\" Euro\")\n\n\n# Exercise 9\n\nif ex == 9:\n\n print(\"Type in the values and type in close to continue\")\n\n running = True\n values = [];\n\n while running:\n value = raw_input();\n\n if value == \"close\":\n running = False\n break\n\n try:\n values.append(int(value))\n except:\n print(\"You have to type in a number instead\")\n\n print(\"------------\")\n\n min = values[0];\n max = values[0];\n sum = 0;\n\n for v in values:\n if v < min:\n min = v;\n if v > max:\n max = v;\n\n sum += v;\n\n print(\"Minimum: \"+str(min))\n print(\"Maximum: \"+str(max))\n print(\"Sum: \"+str(sum))\n print(\"Average: \"+str(sum/len(values)))\n\n\n\n\n\n","sub_path":"16.08.18.py","file_name":"16.08.18.py","file_ext":"py","file_size_in_byte":3432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"294581879","text":"import pygame\r\nfrom pygame.draw import *\r\nimport random\r\nfrom random import randint \r\npygame.init()\r\n\r\nFPS = 3\r\nscreen = pygame.display.set_mode((700, 700))\r\n\r\nRED = (255, 0, 0)\r\nBLUE = (0, 0, 255)\r\nYELLOW = (255, 255, 0)\r\nGREEN = (0, 255, 0)\r\nMAGENTA = (255, 0, 255)\r\nCYAN = (0, 255, 255)\r\nBLACK = (0, 0, 0)\r\nCOLORS = [RED, BLUE, YELLOW, GREEN, MAGENTA, CYAN]\r\n\r\ndef new_ball(x, y):\r\n '''\r\n Функция рисует цветной мяч на экране.\r\n Параметры:\r\n x (int): Абсцисса центра мяча\r\n y (int): Ордината центра мяча\r\n Возвращаемые значения:\r\n Pigame.Rect: Выводит мяч на экран\r\n '''\r\n color = COLORS[randint(0, 5)]\r\n circle(screen, color, (x, y), 30)\r\n\r\ndef new_angry(a, b, R):\r\n '''\r\n Функция рисует злой смайлик на экране.\r\n Параметры:\r\n x (int): Абсцисса центра смайлика\r\n y (int): Ордината центра смайлика\r\n R (int): Радиус смайлика \r\n Возвращаемые значения:\r\n Pigame.Rect: Выводит смайлик на экран\r\n '''\r\n N = 150//R\r\n circle(screen, (225, 225, 0), (a, b), 150//N)\r\n circle(screen, (255, 0, 0), (a - 80//N, b - 25//N), 40//N)\r\n circle(screen, (255, 0, 0), (a + 80//N, b - 25//N), 40//N)\r\n circle(screen, (0, 0, 0), (a + 80//N, b - 25//N), 20//N)\r\n circle(screen, (0, 0, 0), (a - 80//N, b - 25//N), 20//N)\r\n rect(screen, (0, 0, 0), (a - 80//N, b + 55//N , 150//N, 30//N))\r\n polygon(screen, (0, 0, 0), [[a + 40//N, b - 35//N], [a + 20//N, b - 55//N], [a + 110//N, b - 95//N], [a + 100//N, b - 75//N]])\r\n polygon(screen, (0, 0, 0,), [[a - 40//N, b - 35//N], [a - 20//N, b - 55//N], [a - 110//N, b - 95//N], [a - 100//N, b - 75//N]])\r\n\r\ndef new_sad(a, b, R):\r\n '''\r\n Функция рисует грустный смайлик на экране.\r\n Параметры:\r\n x (int): Абсцисса центра смайлика\r\n y (int): Ордината центра смайлика\r\n R (int): Радиус смайлика \r\n Возвращаемые значения:\r\n Pigame.Rect: Выводит смайлик на экран\r\n '''\r\n N = 150//R\r\n circle(screen, (225, 225, 0), (a, b), 150//N)\r\n circle(screen, (0, 200, 0), (a - 80//N, b - 25//N), 40//N)\r\n circle(screen, (0, 200, 0), (a + 80//N, b - 25//N), 40//N)\r\n circle(screen, (0, 0, 0), (a + 80//N, b - 25//N), 20//N)\r\n circle(screen, (0, 0, 0), (a - 80//N, b - 25//N), 20//N)\r\n rect(screen, (0, 0, 0), (a - 80//N, b + 55//N, 150//N, 30//N))\r\n polygon(screen, (0, 0, 0,), [[a - 110//N, b - 35//N], [a - 130//N, b - 55//N], [a - 40//N, b - 95//N], [a - 50//N, b - 75//N]])\r\n polygon(screen, (0, 0, 0,), [[a + 110//N, b - 35//N], [a + 130//N, b - 55//N], [a + 40//N, b - 95//N], [a + 50//N, b - 75//N]])\r\n\r\ndef ball_move(x, determination):\r\n '''\r\n Функция управляет движением мячика.\r\n Параметры:\r\n x (int): координата центра мяча\r\n determination {1, 2}: параметр отвечающий за движение мяча 1 - увеличение координаты, 2 - уменьшение\r\n Возвращаемые значения:\r\n list: Список из двух элементов: новой координаты и нового направления\r\n '''\r\n if x < 560:\r\n if determination == 1:\r\n x = x + randint(1, 30)\r\n else:\r\n if x > 30:\r\n x = x - randint(1, 30)\r\n else:\r\n determination = 1\r\n x = x + 31\r\n else:\r\n determination = 2\r\n x = x - 31\r\n return list([x, determination])\r\n \r\ndef move(a):\r\n '''\r\n Функция управляет движением смайликов.\r\n Параметры:\r\n a (int): координата центра смайлика\r\n Возвращаемые значения:\r\n a (int): новая координата центра смайлика\r\n '''\r\n if a < 500 and a > 100: \r\n a = a + int(random.uniform(100, -100))\r\n else:\r\n a = randint(50, 500)\r\n return a\r\n\r\n\r\nprint('Game rules:')\r\nprint('Your main aim is to click to a sad green smile by left mousebutton. If it is suceccessful you get + 10 scores')\r\nprint('However you should not click to the angre smile (big and little), else you would get -1 life. At the beginning you have 10 lives so if it turns 0 you lose.')\r\nprint('If you have 100 points you should click by right mousebutton. When you do it you scores turns 0 and a new small angre smile appear. Do not click on it!')\r\nprint('You have also another opportunity to get points. There is also a colourful ball on the screen. So if you click right mousebutton on the moment the main (big) angry smile is situated on the ball, it would be a strike and you get +30 scores!')\r\nprint('You win if there are 3 small angre smiles on the screen and you have 100 scores')\r\n\r\n\r\na0 = randint(15, 585)\r\nb0 = randint(15, 585)\r\nR = randint(30,50)\r\nx = randint(15, 585)\r\ny = randint(15, 585)\r\nr = randint(30,50)\r\nc = randint(15, 585)\r\nd = randint(15, 585)\r\nx_determination = 1\r\ny_determination = 1\r\nt = 1\r\nlives = 10\r\na = []\r\nb = []\r\nfor i in range(1,21):\r\n a.append(randint(15, 585))\r\n b.append(randint(15, 585))\r\nscores = 0\r\nnew_angry(a0, b0, R)\r\nnew_sad(c, d, r)\r\n\r\npygame.display.update()\r\nclock = pygame.time.Clock()\r\nfinished = False\r\n\r\nwhile not finished:\r\n clock.tick(FPS)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n finished = True\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n pos = pygame.mouse.get_pos()\r\n x_mouse = pos[0]\r\n y_mouse = pos[1]\r\n if ((a0 - x_mouse)**2 + (b0 - y_mouse)**2) < R**2:\r\n lives = lives - 1\r\n if t >= 2:\r\n for i in range(1, t):\r\n if ((a[i] - x_mouse)**2 + (b[i] - y_mouse)**2) < 400:\r\n lives = lives - 1\r\n print('scores = ', scores)\r\n print('lives = ', lives)\r\n if ((c - x_mouse)**2 + (d - y_mouse)**2) < r**2:\r\n scores = scores + 10\r\n print('scores = ', scores)\r\n print('lives = ', lives)\r\n if (((a0 - x)**2 + (b0 - y)**2) < (R +30)**2) and (event.button == 3):\r\n scores = scores + 30\r\n print('Strike!!!')\r\n print('scores = ', scores)\r\n print('lives = ', lives)\r\n if (scores >= 100) and (event.button == 3):\r\n t = t + 1;\r\n scores = 0;\r\n if lives <= 0:\r\n f1 = open('You have lost.txt', 'w')\r\n f1.write('You have lost.Better luck next time.\\n')\r\n f1.write('your lives = 0!!!\\n')\r\n if t >= 6:\r\n f1 = open('Congratulations.txt', 'w')\r\n f1.write('Congratulations!!!\\n')\r\n f1.write('You get more than 100 points!!!\\n')\r\n f1.write('You are the winner!!!')\r\n rect(screen, (102, 0, 102), (0, 0, 600, 600), 20)\r\n f = ball_move(x, x_determination)\r\n x = f[0]\r\n x_determination = f[1]\r\n g = ball_move(y, y_determination)\r\n y = g[0]\r\n y_determination = g[1]\r\n new_ball(x, y)\r\n a0 = move(a0)\r\n b0 = move(b0)\r\n R = randint(30,50)\r\n new_angry(a0, b0, R)\r\n c =move(c)\r\n d = move(d)\r\n for i in range(1, t):\r\n a[i] = move(a[i])\r\n b[i] = move(b[i])\r\n new_angry(a[i], b[i], 20)\r\n r = randint(30,50)\r\n new_sad(c, d, r)\r\n pygame.display.update()\r\n screen.fill(BLACK)\r\n\r\npygame.quit()","sub_path":"game_smiles.py","file_name":"game_smiles.py","file_ext":"py","file_size_in_byte":7484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"621411953","text":"import numpy as np\n\nnp.set_printoptions(precision=2) # sólo dos decimales\nnp.set_printoptions(suppress=True) # no usar notación exponencial\n\n\"\"\"\nEjercicio 1: Resolución de un sistema lineal con sustitución progresiva.\n\nFunción sust_prog: Calcula utilizando sustitución progresiva la solución \ndel sistema lineal L*x = b, con L cuadrada triangular inferior no singular.\n\nArgumentos de entrada:\n L: matriz cuadrada triangular inferior no singular del sistema lineal \n L*x = b (array numpy de dos dimensiones).\n b: vector de términos independientes (array numpy de una dimensión).\n \nArgumentos de salida:\n x: solución del sistema L*x = b (array numpy de una dimensión).\n\nEjemplos: \n L = np.array([[2., 0, 0], [1, 2, 0]])\n b = np.array([2., -1, 0])\n x = sus_prog(L,b)\n print('Solución =', x)\n Salida:\n Error: La matriz L no es cuadrada\n Solución = None\n \n L = np.array([[2., 0, 0], [1, 2, 0], [1, 1, 0]])\n b = np.array([2., -1, 0])\n x = sus_prog(L,b)\n print('Solución =', x)\n Salida: \n Error: La matriz L es singular\n Solución = None\n \n L = np.array([[2., 0, 0], [1, 2, 0], [1, 1, 2]])\n b = np.array([2., -1, 0])\n x = sus_prog(L,b)\n print('Solución =', x)\n # Comprobamos que L*x = b\n print('Comprobación: L*x =', np.dot(L,x), 'b = ', b)\n Salida: \n Solución = [ 1. -1. 0.]\n Comprobación: L*x = [ 2. -1. 0.] b = [ 2. -1. 0.]\n \n \n\"\"\"\n\n\ndef sus_prog(L, b):\n m, n = L.shape\n if m != n:\n return None\n if n != len(b):\n return None\n if np.linalg.det(L) == 0:\n return None\n\n x = np.zeros_like(b)\n x[0] = b[0] / L[0][0]\n for i in range(len(b) - 1):\n index = i + 1\n x[index] = (1 / L[index][index]) * (b[index] - sum(L[index][:index] * x[:index]))\n return x\n\n\nL = np.array([[2., 0, 0], [1, 2, 0]])\nb = np.array([2., -1, 0])\nx = sus_prog(L, b)\nprint('Solución =', x)\n\nL = np.array([[2., 0, 0], [1, 2, 0], [1, 1, 0]])\nb = np.array([2., -1, 0])\nx = sus_prog(L, b)\nprint('Solución =', x)\n\nL = np.array([[2., 0, 0], [1, 2, 0], [1, 1, 2]])\nb = np.array([2., -1, 0])\nx = sus_prog(L, b)\nprint('Solución =', x)\nprint('Comprobación: L*x =', np.dot(L, x), 'b = ', b)\n","sub_path":"practica10/Ejercicio1.py","file_name":"Ejercicio1.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"629065964","text":"'''\nCreated on 10 de Mar de 2014\n\n@author: Hugo\n'''\n\n\n#-----------------------------------------------------------------------------------\n# Controllers that allow an operator to administrate its service extensions\n#-----------------------------------------------------------------------------------\n@auth.requires_membership('Operator')\ndef create():\n \"\"\"\n This action creates an extension to a tourism operator's service in the database.\n It creates a form with the fields corresponding to a table row.\n \"\"\"\n this_service = db.service(request.args(0,cast=int)) or redirect(URL('operator', 'index'))\n \n if this_service.operator_id.owner_id != auth.user.id:\n \n session.flash = T(\"Insufficient privileges\")\n \n redirect(URL('default','user',args='not_authorized'))\n \n db.service_extension.service_id.default = this_service.id\n \n db.service_extension.service_id.writable = False\n \n db.service_extension.comission.default = 20\n \n form = SQLFORM(db.service_extension).process(next=URL('op_services', 'show', args=this_service.id))\n \n return dict(form=form, service=this_service)\n\n\n@auth.requires_membership('Operator')\ndef edit():\n \"\"\"\n This action modifies or deletes a service extension associated with a tourism service.\n \"\"\"\n this_extension = db.service_extension(request.args(0,cast=int)) or redirect(URL('operator', 'index'))\n \n this_service = db.service(this_extension.service_id)\n \n db.service_extension.service_id.writable = False\n \n if this_service.operator_id.owner_id != auth.user.id:\n \n session.flash = T(\"Insufficient privileges\")\n \n redirect(URL('default','user',args='not_authorized'))\n \n form = SQLFORM(db.service_extension, this_extension, deletable=True).process(next=URL('op_services', 'show', args=this_service.id))\n \n return dict(form=form, service=this_service)\n\n","sub_path":"controllers/op_service_extensions.py","file_name":"op_service_extensions.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"550040909","text":"import socket, pickle, struct, cv2.cv2\r\n\r\nclient_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nhost_ip = '176.53.65.237' # server ip adresi\r\nport = 9999 #baglanilacak port\r\nclient_socket.connect((host_ip, port))\r\ndata = b\"\"\r\npayload_size = struct.calcsize(\"Q\")\r\nwhile True:\r\n while len(data) < payload_size:\r\n packet = client_socket.recv(4 * 1024)\r\n if not packet: break\r\n data += packet\r\n packed_msg_size = data[:payload_size]\r\n data = data[payload_size:]\r\n msg_size = struct.unpack(\"Q\", packed_msg_size)[0]\r\n\r\n while len(data) < msg_size:\r\n data += client_socket.recv(4 * 1024)\r\n frame_data = data[:msg_size]\r\n data = data[msg_size:]\r\n # **** IMPORTANT START ****\r\n # Burasi cok onemli.\r\n # frame degiskeni icerisine serverdan yayin yapilan frame ler gelir\r\n # frame aldiktan sonra yapay zeka birimine bu frame'in iletilmesi gerekir\r\n frame = pickle.loads(frame_data)\r\n # **** IMPORTANT END ****\r\n \r\n # detect_with_ai(frame, camera_id)\r\n # bu fonksiyon yapay zekayi tetikleyecek.\r\n # icerisine bir frame ve hangi kameradan bu goruntunun geldigi bilgisini alacak.\r\n #********* ACIKLAMA START **********\r\n # frame bilgisi zaten frame degiskeninden geliyor.\r\n # Burada onemli olan kisim ise camera_id bilgisi.\r\n # Yazilan server kodunda, bir video_server.py dosyasi sadece tek bir kamera\r\n # goruntusunu yayinlayabiliyor. Bundan dolayı her bir porttan sadece bir kameranin yayini yapilabilir.\r\n # camera_id bilgisi de her bir port icin unique(benzersiz)dir.\r\n # Kameralar sisteme kaydedilirken id bilgileri verilerek kaydedilecek. Bizim de ihtiyacimiz olan parametre ise camera_id.\r\n # Eger sistme bir kamera eklerken kameranin id bilgisini, yayinin server uzerinde yapilacagi port seklinde girersek\r\n # (yani: kameranin veritabanindaki id bilgisi = server uzerinde o kameranin yayinin yapilacagi port),\r\n # bu sayede port bilgisi bizim camera_id parametremiz olur.\r\n #********* ACIKLAMA END **********\r\n \r\n cv2.imshow(\"RECEIVING VIDEO\", frame)\r\n key = cv2.waitKey(1) & 0xFF\r\n if key == ord('q'):\r\n break\r\nclient_socket.close()\r\n","sub_path":"video_server_python/video_client.py","file_name":"video_client.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"193450276","text":"#%% \nimport numpy as np\nimport pandas as pd\nimport re\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.utils.multiclass import unique_labels\nfrom statistics import mean \nimport statistics as statistics\nimport matplotlib.pyplot as plt\n\nfrom sklearn import metrics\nimport unicodedata\n\ndataset = pd.read_csv('tweets_database.csv',encoding='cp1252')\ncolunas = [\"texto\",\"classificacao\"]\nlista = [\"usuario\",\"id\",\"texto\",\"classificacao\",\"categoria\"]\n\ndf = pd.read_csv('tweets_database.csv', sep = ',', names = lista, usecols=colunas, encoding=\"utf-8\")\ndf = df.drop(0)\n\ntextoEntradas = df.texto\nlabels = df.classificacao\n\nstopwords = [\"de\",\"a\",\"o\",\"que\",\"e\",\"do\",\n\"da\",\"em\",\"um\",\"para\",\"é\",\"com\",\"não\",\"uma\"\n,\"os\",\"no\",\"se\",\"na\",\"por\",\"mais\",\"as\",\"dos\"\n,\"como\",\"mas\",\"foi\",\"ao\",\"ele\",\"das\",\"tem\",\"à\"\n,\"seu\",\"sua\",\"ou\",\"ser\",\"quando\",\"muito\",\"há\"\n,\"nos\",\"já\",\"está\",\"eu\",\"também\",\"só\",\"pelo\"\n,\"pela\",\"até\",\"isso\",\"ela\",\"entre\",\"era\",\"depois\"\n,\"sem\",\"mesmo\",\"aos\",\"ter\",\"seus\",\"quem\",\"nas\"\n,\"me\",\"esse\",\"eles\",\"estão\",\"você\",\"tinha\",\"foram\"\n,\"essa\",\"num\",\"nem\",\"suas\",\"meu\",\"às\",\"minha\",\"têm\"\n,\"numa\",\"pelos\",\"elas\",\"havia\",\"seja\",\"qual\",\"será\"\n,\"nós\",\"tenho\",\"lhe\",\"deles\",\"essas\",\"esses\",\"pelas\"\n,\"este\",\"fosse\",\"dele\",\"tu\",\"te\",\"vocês\",\"vos\",\"lhes\"\n,\"meus\",\"minhas\",\"teu\",\"tua\",\"teus\",\"tuas\",\"nosso\"\n,\"nossa\",\"nossos\",\"nossas\",\"dela\",\"delas\",\"esta\"\n,\"estes\",\"estas\",\"aquele\",\"aquela\",\"aqueles\",\"aquelas\"\n,\"isto\",\"aquilo\",\"estou\",\"está\",\"estamos\",\"estão\"\n,\"estive\",\"esteve\",\"estivemos\",\"estiveram\",\"estava\",\"estávamos\"\n,\"estavam\",\"estivera\",\"estivéramos\",\"esteja\",\"estejamos\"\n,\"estejam\",\"estivesse\",\"estivéssemos\",\"estivessem\",\"estiver\"\n,\"estivermos\",\"estiverem\",\"hei\",\"há\",\"havemos\",\"hão\",\"houve\",\n\"houvemos\",\"houveram\",\"houvera\",\"houvéramos\",\"haja\",\"hajamos\",\n\"hajam\",\"houvesse\",\"houvéssemos\",\"houvessem\",\"houver\",\"houvermos\",\n\"houverem\",\"houverei\",\"houverá\",\"houveremos\",\"houverão\",\"houveria\",\n\"houveríamos\",\"houveriam\",\"sou\",\"somos\",\"são\",\"era\",\"éramos\",\"eram\",\n\"fui\",\"foi\",\"fomos\",\"foram\",\"fora\",\"fôramos\",\"seja\",\"sejamos\",\"sejam\",\n\"fosse\",\"fôssemos\",\"fossem\",\"for\",\"formos\",\"forem\",\"serei\",\"será\",\n\"seremos\",\"serão\",\"seria\",\"seríamos\",\"seriam\",\"tenho\",\"tem\",\"temos\",\"tém\",\n\"tinha\",\"tínhamos\",\"tinham\",\"tive\",\"teve\",\"tivemos\",\"tiveram\",\"tivera\",\n\"tivéramos\",\"tenha\",\"tenhamos\",\"tenham\",\"tivesse\",\"tivéssemos\"\n,\"tivessem\",\"tiver\",\"tivermos\",\"tiverem\",\"terei\",\"terá\",\"teremos\"\n,\"terão\",\"teria\",\"teríamos\",\"teriam\"]\n\ndef remover_acentuacao(input_str):\n nfkd_form = unicodedata.normalize('NFKD', input_str)\n return u\"\".join([c for c in nfkd_form if not unicodedata.combining(c)])\n\ndef preprocessamento_texto(text): \n for palavra in stopwords:\n text = text.replace(\" \" + palavra + \" \", \" \")\n text = remover_acentuacao(text)\n text = re.sub('((www\\.[^\\s]+)|(https?://[^\\s]+))','', text)\n text = re.sub('@[^\\s]+','', text) \n text = text.lower()\n text = re.sub('[^a-zA-Zа-яА-Я1-9]+', ' ', text)\n text = re.sub(' +',' ', text)\n return text.strip()\n\nentradas = [preprocessamento_texto(t) for t in textoEntradas]\n\ntext_clf = Pipeline([('vect', CountVectorizer()), \n ('clf', MultinomialNB())])\n\ntuned_parameters = {\n 'vect__ngram_range': [(1, 1), (1, 2), (2, 2)],\n 'clf__alpha': [1, 1e-1, 1e-2]\n}\n\nscore = 'accuracy'\n\ntSize = 0.2\n\nacuraccies = []\n\nwhile tSize <= 0.35:\n x_train, x_test, y_train, y_test = train_test_split(entradas, labels, test_size=tSize, random_state=42)\n clf = GridSearchCV(text_clf, tuned_parameters, cv=5, scoring=score)\n np.errstate(divide='ignore')\n clf.fit(x_train, y_train)\n print(\"Tamanho amostra de treinamento = %0.3f\" % tSize)\n print(\"Resultados para o CV.\")\n print(\"MÉDIA (+/- DESVIO PADRÃO)\")\n for mean, std, params in zip(clf.cv_results_['mean_test_score'], \n clf.cv_results_['std_test_score'], \n clf.cv_results_['params']):\n print(\"%0.3f (+/-%0.03f) for %r\" % (mean, std * 2, params))\n print()\n print(\"Parametros Globais com os melhores resultados:\")\n print()\n print(clf.best_params_)\n print()\n cr = classification_report(y_test, clf.predict(x_test), digits=4, output_dict=True)\n valor = cr['accuracy']\n acuraccies.append(valor)\n print()\n print()\n y_pred = clf.best_estimator_.predict(x_train)\n print()\n tSize += 0.01\n\nprint(acuraccies)\n\ndef media(vect):\n return sum(vect)/(len(vect)) \n\n\n\nprint(\"Media das acuracias = \", statistics.mean(acuraccies))\nprint(\"Maior acuracia = \", max(acuraccies))\nprint(\"Menor acuracia = \", min(acuraccies))\nprint(\"Mediana = \", statistics.median(acuraccies))\n\n#%%","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":4970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"429065733","text":"# Import module re\r\nimport re\r\n\r\n# \"^\" Matches the start of the string\r\n\r\ndef beginsWithHello():\r\n # Creating regex object\r\n regex = re.compile(r'^Hello') \r\n # Matching object\r\n mo = regex.search(\"Hello World!\")\r\n print(mo)\r\n print(mo.group())\r\n\r\n# \"$\" Matches the end of the string or just \r\n# before the newline at the of the string\r\n\r\ndef endsWithNumber():\r\n # Creating regex object\r\n regex = re.compile(r'\\d+$')\r\n # Matching object\r\n mo = regex.search(\"Your number is 42\")\r\n print(mo)\r\n print(mo.group())\r\n\r\ndef wholeStringIsNum():\r\n # Creating regex object\r\n regex = re.compile(r'^\\d+$')\r\n # Matching object\r\n mo = regex.search(\"0123456789\")\r\n print(mo)\r\n print(mo.group())\r\n\r\n# Calling function\r\nbeginsWithHello()\r\nprint()\r\nendsWithNumber()\r\nprint()\r\nwholeStringIsNum()\r\n","sub_path":"chapter_07/12_at_the_begin_and_end/regex.py","file_name":"regex.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"123666252","text":"\"\"\"\nCommands used to check the health of the bot.\n\"\"\"\nimport datetime\nfrom time import monotonic\n\nfrom dog import Cog\n\nfrom discord.ext import commands\n\n\nclass Health(Cog):\n @commands.command()\n async def ping(self, ctx):\n \"\"\" Pong! \"\"\"\n\n # measure gateway delay\n before = monotonic()\n msg = await ctx.send('\\u200b')\n after = monotonic()\n\n pong_ws = round(ctx.bot.latency * 1000, 2)\n pong_rest = round((after - before) * 1000, 2)\n pong_gateway_lag = round((datetime.datetime.utcnow() - msg.created_at).total_seconds() * 1000, 2)\n\n pong = f'Pong! WS: {pong_ws}ms, REST: {pong_rest}ms, GW lag: {pong_gateway_lag}ms'\n await msg.edit(content=pong)\n\n\ndef setup(bot):\n bot.add_cog(Health(bot))\n","sub_path":"dog/core/ext/health.py","file_name":"health.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"70475893","text":"#!/usr/bin/env python3\nfrom xml.etree import cElementTree\n\n\nwith open(\"data.xml\", \"r\") as f:\n tree = cElementTree.parse(f)\n\n\nfor path in (\"./child\", \"./child_with_tail\"):\n node = tree.find(path)\n print(node.tag)\n print(\" child node text:\", node.text)\n print(\" and tail text :\", node.tail)","sub_path":"data_persistence/_xml/elementtree_node_text.py","file_name":"elementtree_node_text.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"425544864","text":"from net import DeepNet, AdvNet\nfrom train import Train\nfrom keras.callbacks import Callback\nimport os\n\nclass Job(object):\n def describe(self): return self.__class__.__name__\n def __init__(self, name, problem, nfold, train_fold, epochs, hidden_Nlayer, hidden_Nnode, lr, momentum, output, activation, dropout_rate, para_train={}):\n self.nfold = int(nfold)\n self.problem = int(problem)\n self.train_fold = int(train_fold)\n self.epochs = int(epochs)\n self.hidden_Nlayer = int(hidden_Nlayer)\n self.hidden_Nnode = int(hidden_Nnode)\n self.lr = float(lr)\n self.momentum = float(momentum)\n self.activation = activation\n self.dropout_rate = float(dropout_rate)\n self.name = self.output if name is None else name\n self.output = f'job_{self.name}__l{self.hidden_Nlayer}n{self.hidden_Nlayer}_lr{self.lr}mom{self.momentum}_{self.activation}_k{self.nfold}_dp{self.dropout_rate}_e{self.epochs}_plb{self.problem}' if output is None else output\n\n self.para_train = para_train\n para_train['base_directory'] = self.output\n print('\\033[92m[INFO]\\033[0m', '\\033[92mJobname \\033[0m', self.output)\n\n def run(self):\n\n ''' An instance of Train for data handling '''\n self.trainer = Train(**self.para_train)\n self.trainer.split(nfold = self.nfold)\n\n ''' An instance of DeepNet for network construction and pass it to Train '''\n self.deepnet = DeepNet(name = self.name, problem = self.problem, build_dis = False, hidden_Nlayer = self.hidden_Nlayer, hidden_Nnode = self.hidden_Nnode, hidden_activation = self.activation, dropout_rate = self.dropout_rate)\n self.deepnet.build(input_dimension = self.trainer.shape, lr = self.lr, momentum = self.momentum)\n self.deepnet.plot(base_directory = self.output)\n self.trainer.setNetwork(self.deepnet.generator)\n \n ''' Run the training '''\n self.result = self.trainer.train(mode = 0, epochs = self.epochs, fold = self.train_fold, batch_size = 512)\n self.trainer.plotLoss(self.result)\n self.trainer.plotResults()\n\n def saveModel(self, prefix):\n # serialize model to JSON\n model_json = self.trainer.network.to_json()\n with open(prefix + '.json', 'w') as json_file:\n json_file.write(model_json)\n # serialize weights to HDF5\n self.trainer.network.save_weights(prefix + '.h5')\n print('Saved', prefix, 'to disk')\n\nclass JobAdv(Job):\n def __init__(self, preTrain_epochs, hidden_auxNlayer, hidden_auxNnode, batch_size, n_iteraction, lam, alr, amomentum, *args, **kwargs):\n super(self.__class__, self).__init__(*args, **kwargs)\n self.preTrain_epochs = int(preTrain_epochs)\n self.hidden_auxNlayer = int(hidden_auxNlayer)\n self.hidden_auxNnode = int(hidden_auxNnode)\n self.batch_size = int(batch_size)\n self.n_iteraction = int(n_iteraction)\n self.lam = float(lam)\n self.alr = float(alr)\n self.amomentum = float(amomentum)\n self.output = f'{self.output}__E{self.preTrain_epochs}_L{self.hidden_auxNlayer}N{self.hidden_auxNnode}_BS{self.batch_size}_alr{self.alr}mom{self.amomentum}_it{self.n_iteraction}_Loss{self.problem}_lam{self.lam}'\n self.para_train['base_directory'] = self.output\n print('\\033[92m[INFO]\\033[0m', '\\033[92mJobname \\033[0m', self.output)\n \n def run(self):\n\n ''' An instance of Train for data handling '''\n self.trainer = Train(**self.para_train)\n self.trainer.split(nfold = self.nfold)\n\n ''' An instance of AdvNet for network construction and pass it to Train '''\n self.advnet = AdvNet(name = self.name, problem = self.problem, build_dis = True, hidden_Nlayer = self.hidden_Nlayer, hidden_Nnode = self.hidden_Nnode,\n hidden_activation = self.activation, hidden_auxNlayer = self.hidden_auxNlayer, hidden_auxNnode = self.hidden_auxNnode, dropout_rate = self.dropout_rate)\n self.advnet.build(input_dimension = self.trainer.shape, lam = self.lam, lr = self.lr, momentum = self.momentum, alr = self.alr, amomentum = self.amomentum)\n self.advnet.plot(base_directory = self.output)\n\n class Evaluate(Callback):\n def __init__(self, name, trainer, mode):\n self.name = name\n self.trainer = trainer\n self.mode = mode\n def on_epoch_end(self, epoch, logs):\n print('\\033[92m[INFO]\\033[0m', '\\033[92mCheckpoint \\033[0m', self.name, epoch, self.mode, logs)\n# self.trainer.plotResults(self.name + str(epoch), mode)\n \n ''' pre-training '''\n if self.preTrain_epochs != 0:\n prefix = 'pre-gen'\n print('\\033[92m[INFO]\\033[0m', '\\033[92mpre-training generator (1st) with epochs\\033[0m', self.preTrain_epochs)\n AdvNet.make_trainable(self.advnet.discriminator, False)\n AdvNet.make_trainable(self.advnet.generator, True)\n self.trainer.setNetwork(self.advnet.generator)\n self.result = self.trainer.train(mode = 1, epochs = self.preTrain_epochs, fold = self.train_fold, batch_size = self.batch_size, callbacks=[Evaluate(prefix, self.trainer, 'y')])\n self.trainer.plotLoss(self.result, prefix)\n self.trainer.plotResults(prefix, 'y')\n\n prefix = 'pre-dis'\n dis_preTrain_epochs = 1\n print('\\033[92m[INFO]\\033[0m', '\\033[92mpre-training discriminator (2nd) with epochs\\033[0m', dis_preTrain_epochs)\n AdvNet.make_trainable(self.advnet.discriminator, True)\n AdvNet.make_trainable(self.advnet.generator, False)\n self.trainer.setNetwork(self.advnet.discriminator)\n self.result = self.trainer.train(mode = 2, epochs = dis_preTrain_epochs, fold = self.train_fold, batch_size = self.batch_size, callbacks=[Evaluate(prefix, self.trainer, 'z')])\n self.trainer.plotLoss(self.result, prefix, True)\n self.trainer.plotResults(prefix, 'z')\n else:\n print('\\033[91m[INFO]\\033[0m', '\\033[91mpre-training skipped!\\033[0m')\n\n self.output_path = '/'.join([self.output, self.describe()]) + '/'\n if not os.path.exists(self.output_path):\n os.makedirs(self.output_path)\n ''' Iterative training '''\n for i in range(1, self.n_iteraction+1):\n\n print('\\033[92m[INFO] Going to train\\033[0m', i, '\\033[92miteration, generator (1st) with epochs\\033[0m', self.epochs)\n AdvNet.make_trainable(self.advnet.discriminator, False)\n AdvNet.make_trainable(self.advnet.generator, True)\n self.trainer.setNetwork(self.advnet.adversary)\n self.result = self.trainer.train(mode = 3, epochs = self.epochs, fold = self.train_fold, batch_size = self.batch_size, callbacks=[Evaluate(prefix, self.trainer, 'yz')])\n\n AdvNet.make_trainable(self.advnet.discriminator, True)\n AdvNet.make_trainable(self.advnet.generator, True)\n self.trainer.plotIteration(i)\n\n prefix = 'iter-gen' + str(i)\n mode = 'y'\n print('\\033[92m[DEBUG] Going to inspect predction by\\033[0m', prefix, '\\033[92mwith mode\\033[0m', mode)\n self.trainer.setNetwork(self.advnet.generator)\n if (i % 5 == 0):\n self.trainer.plotResults(prefix, mode)\n\n if (i % 5 == 0):\n self.saveModel(self.output_path + self.trainer.name + '_' + str(i))\n\n print('\\033[92m[INFO] Going to train\\033[0m', i, '\\033[92miteration, discriminator (2nd) with epochs\\033[0m', 1)\n AdvNet.make_trainable(self.advnet.discriminator, True)\n AdvNet.make_trainable(self.advnet.generator, False)\n self.trainer.setNetwork(self.advnet.discriminator)\n self.result = self.trainer.train(mode = 2, epochs = 1, fold = self.train_fold, batch_size = self.batch_size, callbacks=[Evaluate(prefix, self.trainer, 'z')])\n\n prefix = 'iter-dis' + str(i)\n mode = 'z'\n print('\\033[92m[INFO] Going to inspect predction by\\033[0m', prefix, '\\033[92mwith mode\\033[0m', mode)\n if (i % 5 == 0) or (i<2):\n self.trainer.plotResults(prefix, mode)\n\n #self.trainer.setNetwork(self.advnet.adversary)\n #self.trainer.plotIteration(i+0.5)\n\n print('\\033[92m[INFO]\\033[0m', self.n_iteraction, '\\033[92mIteration done, storing and plotting results.\\033[0m')\n self.trainer.setNetwork(self.advnet.adversary)\n self.trainer.saveLoss()\n self.trainer.setNetwork(self.advnet.generator)\n self.trainer.plotResults('iter-genFinal')\n\n","sub_path":"advnn/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":8682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"291762884","text":"def maj(nums):\n if nums==None or len(nums)==0: return None\n a = nums[0]\n count = 1\n for i in range(1, len(nums)):\n if nums[i]!=a: count-=1\n else: count+=1\n if count==0:\n a = nums[i]\n count = 1\n return a\ndef main():\n a = [0,2,3,1,2,1,2,1,1,1,1]\n print(maj(a))\nif __name__ == \"__main__\":\n main()\n","sub_path":"pr/169.py","file_name":"169.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"335670408","text":"from flask import Flask, request, jsonify\nimport json\nfrom fixture.main import Maining\nfrom driver.convertor import Convertor\nimport threading\nfrom driver.loging import Loging\n\n\nserver = Flask(__name__)\nconvertor = Convertor()\nlog = Loging()\nmain = Maining(log=log)\n\n\ndef start_testing(data):\n main.set_object_data(data)\n main.start_frontol()\n main.make_document()\n main.exit_frontol()\n\n\n@server.route('/add_task', methods=['GET', 'POST'])\ndef read_json_file():\n file = request.get_json(silent=True)\n flag = convertor.convert_to_test_model(file)\n if flag == 0:\n thread = threading.Thread(target=start_testing, args=(convertor.data, ))\n thread.start()\n return json.dumps({'success': True}), 200, {'ContentType': 'application/json'}\n if flag == -1:\n return json.dumps({'success': False}), 200, {'ContentType': 'application/json'}\n\n@server.route('/get_err', methods=['GET', 'POST'])\ndef get_err():\n return jsonify({'exception': convertor.err})\n\n@server.route('/get_log', methods=['GET', 'POST'])\ndef get_log():\n return jsonify({'log_file': log.log_list})\n\n\n\nif __name__ == '__main__':\n server.run(host=\"127.0.0.1\", port=\"8000\", debug=True)","sub_path":"soft_testing.py","file_name":"soft_testing.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"346506279","text":"import time\r\n\r\ndef pythagchecker(a,b,c):\r\n if a ** 2 + b ** 2 == c ** 2:\r\n return('This is a Pythagorean Triple')\r\n elif a ** 2 + c ** 2 == b ** 2:\r\n return('This is a Pythagorean Triple')\r\n elif b ** 2 + c ** 2 == a ** 2:\r\n return('This is a Pythagorean Triple')\r\n else:\r\n return('This is not a Pythagorean Triple')\r\n\r\nloop = 'yes'\r\nwhile loop == 'yes':\r\n side1 = int(input('Side 1?'))\r\n side2 = int(input('Side 2?'))\r\n side3 = int(input('Side 3?'))\r\n print(pythagchecker(side1,side2,side3))\r\n run_again = input('Do you have another triangle to check? yes or no?: ')\r\n if run_again == 'yes' or run_again == 'Yes':\r\n print('Resetting...')\r\n print()\r\n print()\r\n time.sleep(2)\r\n elif run_again == 'no' or run_again == 'No':\r\n print('Have a nice day!')\r\n loop = 'Stop'\r\n else:\r\n print(\"Couldn't understand your input... Sorry.\")\r\n loop = 'Stop'\r\n ","sub_path":"Thonny/PythagoreanProj.py","file_name":"PythagoreanProj.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"394940051","text":"import sys\ninput = sys.stdin.readline\nn = int(input())\ntemp = list(map(int,input().split()))\na = []\nstack1 = []\nfor i in range(len(temp)):\n stack1.append((temp[i],i+1))\nstack2 = []\nans = [0]*n\n\nwhile stack1:\n v,idx = stack1.pop()\n\n if not stack2:\n stack2.append((v,idx))\n else:\n while stack2:\n v2,idx2 = stack2[-1]\n if v>v2:\n ans[idx2-1] = idx\n stack2.pop()\n else:break\n stack2.append((v,idx))\nfor v in ans:\n print(v,end=' ')","sub_path":"Gold5/탑.py","file_name":"탑.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"424787330","text":"from itertools import chain\nfrom optparse import make_option\n\nimport fbuild\nimport fbuild.db\nfrom fbuild.functools import call\nfrom fbuild.path import Path\nfrom fbuild.record import Record\n\nimport buildsystem\nfrom buildsystem.config import config_call\n\n# ------------------------------------------------------------------------------\n\ndef pre_options(parser):\n group = parser.add_option_group('config options')\n group.add_options((\n make_option('--prefix',\n default='/usr/local',\n help='specify the install location (default /usr/local)'),\n make_option('--bindir',\n default=None,\n help='specify the binary install location (default $PREFIX/bin)'),\n make_option('--libdir',\n default=None,\n help='specify the library install location (default $PREFIX/lib)'),\n make_option('-I', '--include',\n dest='includes',\n default=[],\n action='append',\n help='Add this path to the c header search path for all phases'),\n make_option('-L', '--library-path',\n dest='libpaths',\n default=[],\n action='append',\n help='Add this path to the c library search path for all phases'),\n make_option('--c-flag',\n dest='c_flags',\n default=[],\n action='append',\n help='Add this flag to the c compiler'),\n make_option('-g', '--debug',\n default=False,\n action='store_true',\n help='enable debugging for all phases'),\n make_option('--skip-tests',\n default=False,\n action='store_true',\n help='skip running tests'),\n ))\n\n group = parser.add_option_group('build phase options')\n group.add_options((\n make_option('--build-platform',\n help='specify the build phase platform'),\n make_option('--build-cc',\n help='specify the build phase c compiler'),\n make_option('--build-cxx',\n help='specify the build phase c++ compiler'),\n make_option('--build-include',\n dest='build_includes',\n default=[],\n action='append',\n help='Add this path to the c header search path for the build ' \\\n 'phase'),\n make_option('--build-library-path',\n dest='build_libpaths',\n default=[],\n action='append',\n help='Add this path to the c library search path for the build ' \\\n 'phase'),\n make_option('--build-c-flag',\n dest='build_c_flags',\n default=[],\n action='append',\n help='Add this flag to the c compiler for the build phase'),\n make_option('--build-c-debug',\n default=False,\n action='store_true',\n help='turn on c/c++ build phase debugging'),\n ))\n\n group = parser.add_option_group('host phase options')\n group.add_options((\n make_option('--host-platform',\n help='specify the host phase platform'),\n make_option('--host-cc',\n help='specify the host phase c compiler'),\n make_option('--host-cxx',\n help='specify the host phase c++ compiler'),\n make_option('--host-include',\n dest='host_includes',\n default=[],\n action='append',\n help='Add this path to the c header search path for the host ' \\\n 'phase'),\n make_option('--host-library-path',\n dest='host_libpaths',\n default=[],\n action='append',\n help='Add this path to the c library search path for the host ' \\\n 'phase'),\n make_option('--host-c-flag',\n dest='host_c_flags',\n default=[],\n action='append',\n help='Add this flag to the c compiler for the host phase'),\n make_option('--host-c-debug',\n default=False,\n action='store_true',\n help='turn on c/c++ host phase debugging'),\n make_option('--host-ocaml-debug',\n default=False,\n action='store_true',\n help='turn on ocaml debugging'),\n make_option('--host-ocamlc',\n help='specify the ocaml bytecode compiler'),\n make_option('--host-ocamlopt',\n help='specify the ocaml native compiler'),\n make_option('--host-ocamllex',\n help='specify the ocaml lexer'),\n make_option('--host-llvm-config',\n help='specify the llvm-config script'),\n ))\n\n group = parser.add_option_group('target phase options')\n group.add_options((\n make_option('--target-platform',\n help='specify the target phase platform'),\n make_option('--target-cc',\n help='specify the target phase c compiler'),\n make_option('--target-cxx',\n help='specify the target phase c++ compiler'),\n make_option('--target-include',\n dest='target_includes',\n default=[],\n action='append',\n help='Add this path to the c header search path for the target ' \\\n 'phase'),\n make_option('--target-library-path',\n dest='target_libpaths',\n default=[],\n action='append',\n help='Add this path to the c library search path for the target ' \\\n 'phase'),\n make_option('--target-c-debug',\n default=False,\n action='store_true',\n help='turn on c/c++ target phase debugging'),\n make_option('--target-c-flag',\n dest='target_c_flags',\n default=[],\n action='append',\n help='Add this flag to the c compiler for the target phase'),\n make_option('--target-sdl-config',\n help='specify the sdl-config script'),\n ))\n\ndef post_options(options, args):\n options.prefix = Path(options.prefix)\n options.bindir = Path(\n options.prefix / 'bin' if options.bindir is None else options.bindir)\n options.libdir = Path(\n options.prefix / 'lib' if options.libdir is None else options.libdir)\n\n if options.debug:\n options.buildroot = Path(options.buildroot, 'debug')\n else:\n options.buildroot = Path(options.buildroot, 'release')\n\n return options, args\n\n# ------------------------------------------------------------------------------\n\ndef make_c_builder(ctx, *args, includes=[], libpaths=[], flags=[], **kwargs):\n flags = list(chain(ctx.options.c_flags, flags))\n\n kwargs['platform_options'] = [\n # GRRR .. for clang\n ({'darwin'},\n {'warnings': ['all', 'fatal-errors', \n 'no-constant-logical-operand',\n 'no-array-bounds',\n ],\n 'flags': ['-fno-common'] + flags,\n 'optimize_flags': ['-fomit-frame-pointer']}),\n ({'posix'},\n {'warnings': ['all', 'fatal-errors'],\n 'flags': ['-fno-common', '-fno-strict-aliasing'] + flags,\n 'optimize_flags': ['-fomit-frame-pointer']}),\n ({'windows'}, {\n 'flags': ['/GR', '/MD', '/EHs', '/wd4291'] + flags,\n 'optimize_flags': ['/Ox']}),\n ]\n kwargs['includes'] = list(chain(ctx.options.includes, includes))\n kwargs['libpaths'] = list(chain(ctx.options.libpaths, libpaths))\n\n return Record(\n static=call('fbuild.builders.c.guess_static', ctx, *args, **kwargs),\n shared=call('fbuild.builders.c.guess_shared', ctx, *args, **kwargs))\n\ndef make_cxx_builder(ctx, *args, includes=[], libpaths=[], flags=[], **kwargs):\n flags = list(chain(ctx.options.c_flags, flags))\n\n kwargs['platform_options'] = [\n # GRRR .. for clang++\n ({'darwin'}, {\n 'warnings': ['fatal-errors', \n 'no-invalid-offsetof', \n 'no-logical-op-parentheses',\n 'no-bitwise-op-parentheses',\n 'no-parentheses-equality',\n 'no-parentheses',\n 'no-return-stack-address',\n 'no-tautological-compare',\n 'no-return-type-c-linkage',\n 'no-unused-variable',\n 'no-unused-function',\n 'no-c++11-narrowing',\n 'no-missing-braces',\n 'no-return-type-c-linkage',\n ],\n 'flags': ['-w','-fno-common', '-fno-strict-aliasing', '-std=c++11'] + flags,\n 'optimize_flags': ['-fomit-frame-pointer']}),\n ({'posix'}, {\n 'warnings': ['fatal-errors', 'no-invalid-offsetof','no-parentheses'],\n 'flags': ['-w','-fno-common', '-fno-strict-aliasing'] + flags,\n 'optimize_flags': ['-fomit-frame-pointer']}),\n ({'windows'}, {\n 'flags': ['/GR', '/MD', '/EHs', '/wd4291'] + flags,\n 'optimize_flags': ['/Ox']}),\n ]\n kwargs['includes'] = list(chain(ctx.options.includes, includes))\n kwargs['libpaths'] = list(chain(ctx.options.libpaths, libpaths))\n\n return Record(\n static=call('fbuild.builders.cxx.guess_static', ctx, *args, **kwargs),\n shared=call('fbuild.builders.cxx.guess_shared', ctx, *args, **kwargs))\n\ndef config_build(ctx):\n ctx.logger.log('configuring build phase', color='cyan')\n\n platform = call('fbuild.builders.platform.guess_platform', ctx,\n ctx.options.build_platform)\n return Record(\n ctx=ctx,\n platform=platform,\n c=make_c_builder(ctx, ctx.options.build_cc,\n platform=platform,\n debug=ctx.options.debug or ctx.options.build_c_debug,\n optimize=not (ctx.options.debug or ctx.options.build_c_debug),\n includes=ctx.options.build_includes,\n libpaths=ctx.options.build_libpaths,\n flags=ctx.options.build_c_flags),\n cxx=make_cxx_builder(ctx, ctx.options.build_cxx,\n platform=platform,\n debug=ctx.options.debug or ctx.options.build_c_debug,\n optimize=not (ctx.options.debug or ctx.options.build_c_debug),\n includes=ctx.options.build_includes,\n libpaths=ctx.options.build_libpaths,\n flags=ctx.options.build_c_flags))\n\ndef config_host(ctx, build):\n ctx.logger.log('configuring host phase', color='cyan')\n\n platform = call('fbuild.builders.platform.guess_platform', ctx,\n ctx.options.build_platform)\n\n if platform == build.platform:\n ctx.logger.log(\"using build's c and cxx compiler\", color='cyan')\n phase = build\n else:\n phase = Record(\n ctx=ctx,\n platform=platform,\n c=make_c_builder(ctx, fbuild.builders.host_cc,\n platform=platform,\n debug=ctx.options.debug or ctx.options.host_c_debug,\n optimize=not (ctx.options.debug or ctx.options.host_c_debug),\n includes=ctx.options.host_includes,\n libpaths=ctx.options.host_libpaths,\n flags=ctx.options.host_c_flags),\n cxx=make_cxx_builder(ctx, fbuild.buildesr.host_cxx,\n platform=platform,\n debug=ctx.options.debug or ctx.options.host_c_debug,\n optimize=not (ctx.options.debug or ctx.options.host_c_debug),\n includes=ctx.options.host_includes,\n libpaths=ctx.options.host_libpaths,\n flags=ctx.options.host_c_flags))\n\n phase.ocaml = call('fbuild.builders.ocaml.Ocaml', ctx,\n debug=ctx.options.debug or ctx.options.host_ocaml_debug,\n ocamlc=ctx.options.host_ocamlc,\n ocamlopt=ctx.options.host_ocamlopt,\n flags=['-w', 'yzex', '-warn-error', 'FPSU'],\n requires_at_least_version=(3, 11))\n\n phase.ocamllex = call('fbuild.builders.ocaml.Ocamllex', ctx,\n ctx.options.host_ocamllex)\n\n # we prefer the native ocaml as it's much faster\n if hasattr(phase.ocaml, 'ocamlopt'):\n phase.ocaml = phase.ocaml.ocamlopt\n else:\n phase.ocaml = phase.ocaml.ocamlc\n\n # We optionally support llvm\n try:\n llvm_config = call('fbuild.builders.llvm.LlvmConfig', ctx,\n ctx.options.host_llvm_config,\n requires_at_least_version=(2, 7))\n except fbuild.ConfigFailed:\n phase.llvm_config = None\n else:\n if llvm_config.ocaml_libdir().exists():\n #phase.llvm_config = llvm_config\n phase.llvm_config = None\n else:\n phase.llvm_config = None\n\n return phase\n\ndef config_target(ctx, host):\n ctx.logger.log('configuring target phase', color='cyan')\n\n platform = call('fbuild.builders.platform.guess_platform', ctx,\n ctx.options.target_platform)\n\n if platform == host.platform:\n ctx.logger.log(\"using host's c and cxx compiler\", color='cyan')\n phase = host\n else:\n phase = Record(\n ctx=ctx,\n platform=platform,\n c=make_c_builder(ctx, ctx.options.target_cc,\n platform=platform,\n debug=ctx.options.debug or ctx.options.target_c_debug,\n optimize=not (ctx.options.debug or ctx.options.target_c_debug),\n includes=ctx.options.target_includes,\n libpaths=ctx.options.target_libpaths,\n flags=ctx.options.target_c_flags),\n cxx=make_cxx_builder(ctx, ctx.options.target_cxx,\n platform=platform,\n debug=ctx.options.debug or ctx.options.target_c_debug,\n optimize=not(ctx.options.debug or ctx.options.target_c_debug),\n includes=ctx.options.target_includes,\n libpaths=ctx.options.target_libpaths,\n flags=ctx.options.target_c_flags))\n\n # We optionally support sdl\n try:\n phase.sdl_config = call('fbuild.builders.sdl.SDLConfig', ctx,\n ctx.options.target_sdl_config,\n requires_at_least_version=(1, 3))\n except (fbuild.ConfigFailed,OSError):\n phase.sdl_config = None\n\n return phase\n\n# ------------------------------------------------------------------------------\n\n@fbuild.db.caches\ndef prefix(ctx):\n prefix = Path(ctx.options.prefix)\n ctx.logger.check('install prefix', prefix, color='cyan')\n\n return prefix\n\n@fbuild.db.caches\ndef src_dir(ctx):\n return Path(__file__).parent\n\n# ------------------------------------------------------------------------------\n\n@fbuild.target.register()\ndef configure(ctx):\n \"\"\"Configure Felix.\"\"\"\n\n build = config_build(ctx)\n host = config_host(ctx, build)\n target = config_target(ctx, host)\n\n # Make sure the config directories exist.\n #(ctx.buildroot / 'host/config').makedirs()\n\n # copy the config directory for initial config\n # this will be overwritten by subsequent steps if\n # necessary\n #\n buildsystem.copy_to(ctx, ctx.buildroot/'host/config', Path('src/config/*.fpc').glob())\n # most of these ones are actually platform independent\n # just do the windows EXTERN to dllexport mapping\n # which is controlled by compile time switches anyhow\n # should probably move these out of config directory\n # they're put in config in case there really are any\n # platform mods.\n buildsystem.copy_to(ctx, ctx.buildroot/'host/lib/rtl',\n Path('src/config/target/*.hpp').glob())\n buildsystem.copy_to(ctx, ctx.buildroot/'host/lib/rtl',\n Path('src/config/target/*.h').glob())\n\n types = config_call('fbuild.config.c.c99.types',\n target.platform, target.c.static)\n\n\n # this is a hack: assume we're running on Unix.\n # later when Erick figures out how to fix this\n # we'd copy the win32 subdirectory entries instead\n if \"posix\" in target.platform:\n print(\"COPYING POSIX RESOURCE DATABASE\")\n buildsystem.copy_to(ctx,\n ctx.buildroot / 'host/config', Path('src/config/unix/*.fpc').glob())\n if types.voidp.size == 4:\n print(\"32 bit Unix\")\n buildsystem.copy_to(ctx,\n ctx.buildroot / 'host/config', Path('src/config/unix/unix32/*.fpc').glob())\n else:\n print(\"64 bit Unix\")\n buildsystem.copy_to(ctx,\n ctx.buildroot / 'host/config', Path('src/config/unix/unix64/*.fpc').glob())\n\n\n # enable this on win32 **instead** of the above to copy fpc files \n if \"windows\" in target.platform:\n print(\"COPYING WIN32 RESOURCE DATABASE\")\n buildsystem.copy_to(ctx,\n ctx.buildroot / 'host/config', Path('src/config/win32/*.fpc').glob())\n\n # enable this on solaris to clobber any fpc files \n # where the generic unix ones are inadequate\n #buildsystem.copy_to(ctx,\n # ctx.buildroot / 'config', Path('src/config/solaris/*.fpc').glob())\n\n # enable this on osx to clobber any fpc files \n # where the generic unix ones are inadequate\n if 'macosx' in target.platform:\n buildsystem.copy_to(ctx,\n ctx.buildroot / 'host/config', Path('src/config/macosx/*.fpc').glob())\n\n # extract the configuration\n iscr = call('buildsystem.iscr.Iscr', ctx)\n\n # convert the config into something iscr can use\n call('buildsystem.iscr.config_iscr_config', ctx, build, host, target)\n\n # re-extract packages if any of them changed\n ctx.scheduler.map(iscr, (src_dir(ctx)/'lpsrc/*.pak').glob())\n\n # overwrite or add *.fpc files to the config directory\n call('buildsystem.post_config.copy_user_fpcs', ctx)\n\n # make Felix representation of whole build config\n call('buildsystem.show_build_config.build',ctx)\n\n return Record(build=build, host=host, target=target), iscr\n\n# ------------------------------------------------------------------------------\n\ndef build(ctx):\n \"\"\"Compile Felix.\"\"\"\n\n # configure the phases\n phases, iscr = configure(ctx)\n\n # --------------------------------------------------------------------------\n # Compile the compiler.\n\n compilers = call('buildsystem.flx_compiler.build_flx_drivers', ctx,\n phases.host)\n\n # --------------------------------------------------------------------------\n # Compile the runtime dependencies.\n\n call('buildsystem.judy.build_runtime', phases.host, phases.target)\n call('buildsystem.re2.build_runtime', phases.target)\n\n # --------------------------------------------------------------------------\n # Build the standard library.\n\n # copy files into the library\n buildsystem.copy_dir_to(ctx, ctx.buildroot/'share', 'src/lib',\n pattern='*.{flx,flxh,fsyn,fdoc,files,html,sql,css,js,py,png}')\n \n for module in ( 'flx_stdlib',):\n call('buildsystem.' + module + '.build_flx', phases.target)\n\n # --------------------------------------------------------------------------\n # Compile the runtime drivers.\n\n drivers = call('buildsystem.flx_drivers.build', phases.host, phases.target)\n\n # --------------------------------------------------------------------------\n # Compile the builder.\n\n flx_builder = call('buildsystem.flx.build', ctx,\n compilers.flxg, phases.target.cxx.static, drivers)\n\n flx_pkgconfig = call('buildsystem.flx.build_flx_pkgconfig',\n phases.host, phases.target, flx_builder)\n flx = call('buildsystem.flx.build_flx', phases.host, phases.target, flx_builder)\n\n # --------------------------------------------------------------------------\n # now, try building a file\n\n felix = call('fbuild.builders.felix.Felix', ctx,\n exe=ctx.buildroot / 'host/bin/bootflx',\n debug=ctx.options.debug,\n flags=['--test=' + ctx.buildroot])\n\n call('buildsystem.plugins.build', phases.target, felix)\n\n return phases, iscr, felix\n\n","sub_path":"fbuildroot.py","file_name":"fbuildroot.py","file_ext":"py","file_size_in_byte":19578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"440074921","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport glob\nimport shutil\n\ntry:\n from setuptools import setup, Extension\n from setuptools.command.build_ext import build_ext\nexcept ImportError:\n from distutils.core import setup, Extension\n from distutils.command.build_ext import build_ext\n\ndef invoke_f2py(files, flags=[], wd=None):\n from numpy.f2py import main\n\n olddir = os.path.abspath(os.curdir)\n oldargv = list(sys.argv)\n try:\n if wd is not None:\n os.chdir(wd)\n sys.argv = ['f2py']\n sys.argv.extend(files)\n sys.argv.extend(flags)\n\n main()\n finally:\n sys.argv = oldargv\n os.chdir(olddir)\n\nclass build_fsps(build_ext):\n\n def run(self):\n # Generate the Fortran signature/interface.\n files = ['fsps.f90']\n flags = \" -m _fsps -h fsps.pyf --overwrite-signature\".split()\n print(\"Running f2py on {0} with flags {1}\".format(files, flags))\n invoke_f2py(['fsps.f90'], flags, wd='fsps')\n\n # Find the FSPS source files.\n fsps_dir = os.path.join(os.environ[\"SPS_HOME\"], \"src\")\n fns = [f for f in glob.glob(os.path.join(fsps_dir, \"*.o\"))\n if os.path.basename(f) not in [\"autosps.o\", \"simple.o\",\n \"lesssimple.o\"]]\n\n # Check to make sure that all of the required modules exist.\n flag = len(fns)\n flag *= os.path.exists(os.path.join(fsps_dir, \"sps_utils.mod\"))\n flag *= os.path.exists(os.path.join(fsps_dir, \"sps_vars.mod\"))\n if not flag:\n raise RuntimeError(\"You need to run make in $SPS_HOME/src first\")\n\n # Add the interface source files to the file list.\n fns += [\"fsps.f90\", \"fsps.pyf\"]\n\n # Compile the library.\n flags = '-c -I{0} --f90flags=-cpp --f90flags=-fPIC'.format(fsps_dir)\n if sys.platform.startswith(\"win\"):\n flags += \" --compiler=mingw32\"\n flags = flags.split()\n print(\"Running f2py on {0} with flags {1}\".format(fns, flags))\n invoke_f2py(fns, flags, wd='fsps')\n\n # Move the compiled library to the correct directory.\n infn = os.path.abspath(self.get_ext_filename(\"fsps._fsps\"))\n outfn = os.path.abspath(self.get_ext_fullpath(\"fsps._fsps\"))\n if infn != outfn:\n try:\n os.makedirs(os.path.dirname(outfn))\n except os.error:\n pass\n print(\"Copying {0} to {1}\".format(infn, outfn))\n shutil.copyfile(infn, outfn)\n\n\nif \"publish\" in sys.argv[-1]:\n os.system(\"python setup.py sdist upload\")\n sys.exit()\n\n\n# Hackishly inject a constant into builtins to enable importing of the\n# package before the library is built.\nif sys.version_info[0] < 3:\n import __builtin__ as builtins\nelse:\n import builtins\nbuiltins.__FSPS_SETUP__ = True\nfrom fsps import __version__ # NOQA\n\n# This is a fake extension that is used to trick distutils into building our\n# real library using the `build_fsps` function above even when `install` is\n# called.\next = Extension(\"fsps._fsps\", sources=[\"fsps/fsps.f90\"])\n\n# The final setup command. Note: we override the `build_ext` command with our\n# custom version from above.\nsetup(\n name=\"fsps\",\n url=\"https://github.com/dfm/python-fsps\",\n version=__version__,\n author=\"Dan Foreman-Mackey\",\n author_email=\"danfm@nyu.edu\",\n description=\"Python bindings for Charlie Conroy's FSPS.\",\n long_description=open(\"README.rst\").read(),\n packages=[\"fsps\"],\n package_data={\n \"\": [\"README.rst\", \"LICENSE.rst\", \"AUTHORS.rst\"],\n \"fsps\": [\"_fsps.so\"],\n },\n include_package_data=True,\n ext_modules=[ext],\n scripts=glob.glob(\"scripts/*.py\"),\n cmdclass={\n \"build_ext\": build_fsps,\n },\n classifiers=[\n # \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"489550946","text":"\nimport pandas as pd\nfrom pyecharts.charts import Kline\nfrom pyecharts.charts import Line, Bar, Grid, Pie,Scatter\nfrom pyecharts import options as opts\nfrom pyecharts.commons.utils import JsCode\nimport logging\nbsArea = []\n\ndef bspoint(data):\n bspoints = []\n global bsArea \n bsArea = []\n def bs(data):\n if data[\"count\"] > 0:\n bspoints.append(\n opts.MarkPointItem(\n name=\"B\",\n coord = [str(data[\"date\"]), data[\"bprice\"]],\n symbol_size = 12,\n value = str(round(data[\"signal\"],2)),\n itemstyle_opts = opts.ItemStyleOpts(color=\"#ec0000\"),\n )\n )\n print(\"bspoints and b \"+str(data[\"date\"])+\" / \"+str(data[\"bprice\"]))\n bspoints.append(\n opts.MarkPointItem(\n name=\"S\",\n coord = [str(int(data[\"sdate\"])), data[\"sprice\"]],\n symbol_size = 12,\n value = str(round(data[\"signal\"],2)),\n itemstyle_opts = opts.ItemStyleOpts(\n color=\"#00da3c\" if data[\"earnings\"]>0 else \"#f47920\",\n area_color=\"#00da3c\" if data[\"earnings\"]>0 else \"#f47920\"\n ),\n )\n )\n # print(\"bspoints and s \"+str(int(data[\"sdate\"]))+\" / \"+str(data[\"sprice\"]))\n bsArea.append(\n opts.MarkAreaItem(\n x=(str(data[\"date\"]), str(int(data[\"sdate\"]))),\n itemstyle_opts = opts.ItemStyleOpts(\n color=\"#ec0000\" if data[\"earnings\"]>0 else \"#00da3c\",\n opacity=0.1\n ),\n )\n )\n # print(\"bspoints and Area \"+str(data[\"date\"])+\" / \"+str(int(data[\"sdate\"])))\n\n data.apply(bs,axis=1)\n return bspoints\n\ndef maline(data,mas=[5,10,20,30]):\n if data.empty:\n return Line()\n line = Line()\n line.add_xaxis(data[\"date\"].astype('str').values.tolist())\n for item in mas:\n key = \"ma\"+str(item)\n if key not in data.columns.values.tolist():\n continue\n line.add_yaxis(\n series_name=\"ma\"+str(item),\n y_axis=data[\"ma\"+str(item)],\n is_smooth=True,\n is_hover_animation=False,\n linestyle_opts=opts.LineStyleOpts(width=3, opacity=0.5),\n label_opts=opts.LabelOpts(is_show=False),\n )\n return line\n\ndef maline(data,mas=[5,10,20,30]):\n if data.empty:\n return Line()\n line = Line()\n line.add_xaxis(data[\"date\"].astype('str').values.tolist())\n for item in mas:\n key = \"ma\"+str(item)\n if key not in data.columns.values.tolist():\n continue\n line.add_yaxis(\n series_name=\"ma\"+str(item),\n y_axis=data[\"ma\"+str(item)],\n is_smooth=True,\n is_hover_animation=False,\n linestyle_opts=opts.LineStyleOpts(width=3, opacity=0.5),\n label_opts=opts.LabelOpts(is_show=False),\n )\n return line\n\ndef kline(data, title = \"K线图\", height = \"250px\"):\n\n logging.debug(\"kline begin\")\n # logging.debug(data)\n if data.empty:\n return Kline(init_opts=opts.InitOpts(width=\"100%\", height= height))\n\n xaxis = data[\"date\"].astype('str').values.tolist()\n yaxis = data[[\"open\", \"close\", \"high\",\n \"low\"]].values.tolist()\n \n chart = Kline(init_opts=opts.InitOpts(width=\"100%\", height= height))\n chart.add_xaxis(xaxis)\n chart.add_yaxis(\n \"kline\",\n yaxis,\n itemstyle_opts=opts.ItemStyleOpts(\n color=\"#ec0000\",\n color0=\"#00da3c\",\n border_color=\"#8A0000\",\n border_color0=\"#008F28\",\n ),\n markline_opts=opts.MarkLineOpts(\n data=[opts.MarkLineItem(type_=\"max\", value_dim=\"close\")]\n ),\n markpoint_opts=opts.MarkPointOpts(\n data=bspoint(data)\n ),\n )\n chart = chart.overlap(maline(data))\n \n chart.set_global_opts(\n xaxis_opts=opts.AxisOpts(is_scale=True),\n yaxis_opts=opts.AxisOpts(\n is_scale=True,\n splitarea_opts=opts.SplitAreaOpts(\n is_show=True, areastyle_opts=opts.AreaStyleOpts(opacity=1)\n ),\n ),\n datazoom_opts=[opts.DataZoomOpts(\n pos_bottom=\"0px\",\n range_start= 100.00-(5000.00/len(xaxis)),\n range_end= 100.00,\n )],\n title_opts=opts.TitleOpts(title=title),\n )\n chart.set_series_opts(\n markarea_opts=opts.MarkAreaOpts(\n data=bsArea\n )\n )\n return chart\n\n\nif __name__ == '__main__':\n import sys\n sys.path.append('/Users/admin/Documents/GitHub/UGFAFAFA/code')\n from Analyse.back_trading import back_trading\n df = pd.read_csv(\"/Users/admin/Documents/GitHub/UGFAFAFA/data/output/damrey/002028.SZ/result.csv\")\n df = back_trading(df, begin=20200513,end=20210513,signal=\"signal\")\n print(kline(df).render(\"/Users/admin/Documents/github/UGFAFAFA/data/tem/kline000.html\"))\n","sub_path":"code/chart/kline.py","file_name":"kline.py","file_ext":"py","file_size_in_byte":5146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"244117646","text":"class Dean(object):\n id_dean = 0\n\n @staticmethod\n def set_idx(id_dean):\n \"\"\"func set var id_dean\n\n Args:\n id_dean (int): new idx for class\n \"\"\"\n Dean.id_dean = id_dean\n\n @staticmethod\n def status_id():\n \"\"\"func return variable id_dean\n\n Returns:\n int: current id_dean in class\n \"\"\"\n return Dean.id_dean\n\n @staticmethod\n def select_all(db):\n \"\"\"func return all deans data from db\n\n Args:\n db (TableDatabase): database that you want to search\n\n Returns:\n List: list of tuples of data\n \"\"\"\n cur = db.cursor_conn()\n cur.execute(\"SELECT * FROM dean\")\n\n return cur.fetchall()\n\n @staticmethod\n def get_lastrowid(db):\n \"\"\"func return last row id\n\n Args:\n db (TableDatabase): database that you want to search\n\n Returns:\n int: last row id\n \"\"\"\n cur = db.cursor_conn()\n cur.execute(\"SELECT * FROM dean\")\n\n return cur.lastrowid\n\n @staticmethod\n def create_tab(db):\n \"\"\"\n function create table dean\n \"\"\"\n\n sql = \"\"\"CREATE TABLE IF NOT EXISTS dean (\n id_dean INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n name TEXT NOT NULL,\n lastname TEXT NOT NULL,\n second_name TEXT,\n pesel INTEGER NOT NULL,\n email TEXT,\n place_of_residence TEXT NOT NULL\n );\n \"\"\"\n\n if db.get_conn() is not None:\n db.create_tab(sql)\n else:\n print(\"Error! Cant create dean table\")\n\n def __init__(self, id_dean=0, name='', lastname='', sec_name='', ssn=1000, email='', place_of_residence=''):\n \"\"\"Init Dean\n\n Args:\n id_dean (int, optional): id of dean. Defaults to 0.\n name (str, optional): dean name. Defaults to ''.\n lastname (str, optional): dean lastname. Defaults to ''.\n sec_name (str, optional): dean second name. Defaults to ''.\n ssn (int, optional): dean ssn. Defaults to 1000.\n email (str, optional): dean email. Defaults to ''.\n place_of_residence (str, optional): dean place. Defaults to ''.\n \"\"\"\n Dean.id_dean += 1\n # set id_dean automatically or manual\n if id_dean == 0:\n self.__id_dean = Dean.id_dean\n else:\n self.__id_dean = id_dean\n\n self.__name = name\n self.__lastname = lastname\n self.__sec_name = sec_name\n self.__ssn = ssn # social security number\n self.__email = email\n self.__place_of_residence = place_of_residence\n\n def insert(self, db):\n \"\"\"function insert data to db\n\n Args:\n db (TableDatabase): database that you want to fill\n \"\"\"\n sql = \"\"\"INSERT INTO dean(\n name,\n lastname,\n second_name,\n pesel,\n email,\n place_of_residence\n ) VALUES (?,?,?,?,?,?)\n \"\"\"\n\n values = (\n self.__name,\n self.__lastname,\n self.__sec_name,\n self.__ssn,\n self.__email,\n self.__place_of_residence\n )\n\n if db.get_conn() is not None:\n cur = db.cursor_conn()\n cur.execute(sql, values)\n else:\n print(\"Error! Cant insert in dean table\")\n\n def update(self, db):\n \"\"\"function update data to db\n\n Args:\n db (TableDatabase): database that you want to update\n \"\"\"\n sql = \"\"\"UPDATE dean SET\n name = ?,\n lastname = ?,\n second_name = ?,\n pesel = ?,\n email = ?,\n place_of_residence = ?\n WHERE id_dean = ?\n \"\"\"\n\n values = (\n self.__name,\n self.__lastname,\n self.__sec_name,\n self.__ssn,\n self.__email,\n self.__place_of_residence,\n self.__id_dean\n )\n\n if db.get_conn() is not None:\n cur = db.cursor_conn()\n cur.execute(sql, values)\n else:\n print(\"Error! Cant update in dean table\")\n\n def delete(self, db):\n \"\"\"function delete data to db\n\n Args:\n db (TableDatabase): database that you want to update\n \"\"\"\n sql = \"\"\"DELETE FROM dean WHERE id_dean = ?\"\"\"\n\n if db.get_conn() is not None:\n cur = db.cursor_conn()\n cur.execute(sql, (self.__id_dean,))\n else:\n print(\"Error! Cant delete in dean table\")\n\n def get_id(self):\n return self.__id_dean\n\n def get_name(self):\n return self.__name\n\n def get_lastname(self):\n return self.__lastname\n\n def get_sec_name(self):\n return self.__sec_name\n\n def get_ssn(self):\n return self.__ssn\n\n def get_email(self):\n return self.__email\n\n def get_place_of_residence(self):\n return self.__place_of_residence\n\n def set_id(self, id_dean):\n self.__id_dean = id_dean\n\n def set_name(self, name):\n self.__name = name\n\n def set_lastname(self, lastname):\n self.__lastname = lastname\n\n def set_sec_name(self, sec_name):\n self.__sec_name = sec_name\n\n def set_ssn(self, ssn):\n self.__ssn = ssn\n\n def set_email(self, email):\n self.__email = email\n\n def set_place_of_residence(self, place_of_residence):\n self.__place_of_residence = place_of_residence\n","sub_path":"Tables/Dean.py","file_name":"Dean.py","file_ext":"py","file_size_in_byte":5522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"579967603","text":"from torch import load\nfrom doom_environment import DoomEnvironment\nfrom utils import watch_agent\nfrom models import agent\nfrom time import sleep\nfrom hyperparameters import hp_basic_test as hp\n# from hyperparameters import hp_d_cor_test as hp\n# from hyperparameters import hp_def_c_test as hp\n# from hyperparameters import hp_h_gth_test as hp\n\nif __name__ == '__main__':\n print('---------------------------- vizDoom watching script ---------------------------')\n\n test_env = DoomEnvironment('scenarios/' + hp.scenario + '.cfg', False, hp.test_skiprate)\n test_env.make_visible()\n policy_net = agent[hp.agent](hp.scenario, 2 ** test_env.get_n_buttons(), hp.epsilon)\n policy_net.load_state_dict(load(\n 'logs/' + hp.scenario + '/' + hp.agent + '/model.pth',\n map_location=lambda storage, loc: storage)['policy_net_state'])\n policy_net.eval()\n print('scenario: {}, agent: {}'.format(hp.scenario, hp.agent))\n print('loaded model: {}'.format('logs/' + hp.scenario + '/' + hp.agent + '/model.pth'))\n print('agent\\'s epsilon: {}'.format(hp.epsilon))\n\n print('------------------------------- watch the model --------------------------------')\n print('n_episodes: {}'.format(hp.n_episodes))\n for _ in range(hp.n_episodes):\n reward, shaped = watch_agent(hp.scenario, policy_net, test_env)\n print('Episode {} done, reward: {}, shaped: {}'.format(_, reward, shaped))\n sleep(1.0)\n # if _ != hp.n_episodes - 1:\n # test_env.reset()\n print('Exit')\n","sub_path":"watching.py","file_name":"watching.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"482540113","text":"#Task 1\nuser_name = input(\"Enter your name: \")\nuser_age = input(\"Enter your age: \")\nuser_age = int(user_age)\nprint(user_name,\"to 100 years left\", 100 - user_age,\"years: \")\n#Task 2\nfirst_input = input(\"Enter first number: \")\nsecond_input = input(\"Enter second number: \")\nthird_input = input(\"Enter third number: \")\nfirst_input = int(first_input)\nsecond_input = int(second_input)\nthird_input = int(third_input)\nprint(\"Are entered numbers equal: \", first_input == second_input and first_input == third_input)\nprint(\"Are two numbers equal: \", first_input == second_input or first_input == third_input or second_input ==third_input)\n#Task 3\nfirst_number = input(\"Enter first number: \")\nsecond_number = input(\"Enter second number: \")\nfirst_number = int(first_number)\nsecond_number = int(second_number)\nprint(\"Entered number is greater 5: \", first_number + second_number > 5)\nprint(\"Entered number is less 5: \", first_number + second_number < 5)\nprint(\"Entered number is less 5: \", first_number + second_number == 5)\n#Task 4\nuser_mark = input(\"Enter your mark: \")\nuser_mark = int(user_mark)\nprint(\"You pass exam: \", user_mark > 35)","sub_path":"homework_lesson_5.py","file_name":"homework_lesson_5.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"110647451","text":"from block import Block\nfrom utils import Database\nimport pickle\n\nclass Blockchain():\n def __init__(self):\n self.db = Database()\n if self.db.get('latest'):\n self.height = int(self.db.get('latest'))\n self.prev_block = pickle.loads(self.db.get(int(self.height))) # gonna creat a new block\n else:\n genesis_block = Block(0, 'This is the Genesis block baeeeee', '')\n genesis_block.time = '0'\n self.prev_block = genesis_block.pow_of_block()\n self.db.put(0, pickle.dumps(self.prev_block))\n self.db.put('latest', 0)\n self.height = 0\n\n def add_block(self, data):\n new_block = Block(self.height + 1, data, self.prev_block.hash).pow_of_block()\n self.prev_block = new_block \n self.height = new_block.height\n self.db.put(self.prev_block.height, pickle.dumps(self.prev_block)) #\n self.db.put('latest', self.prev_block.height) #\n\n def print_chain(self):\n for num_of_block in range(self.height + 1):\n self.print_block(num_of_block)\n\n def print_block(self, num_of_block):\n if num_of_block <= self.height:\n block = pickle.loads(self.db.get(num_of_block))\n print('#{}'.format(block.height))\n print('Establish time: {}'.format(block.time))\n print('Previous hash: {}'.format(block.prev_block_hash))\n print('Data: {}'.format(block.data))\n print('Hash: {}'.format(block.hash))\n else:\n print('Error: the block {} does not EXIST!!!'.format(num_of_block))\n","sub_path":"blockchain.py","file_name":"blockchain.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"522410152","text":"# Time Complexity : O((N)*(N-3)*(N-6)...) so, approx. = O(N! - C)\n# Space Complexity : O(N*M) because we made the matrix, but stack-> O(N)\n# Did this code successfully run on Leetcode : Yes\n# Any problem you faced while coding this : No\n\n# Your code here along with comments explaining your approach\n#Approach and intuition:\n#0. each row needs to fit atleast one queen[ N queens on N*N board ], so we start placing queens in first cell of first row and then move to next row to find valid queen positions. If we can't find a valid spot in a row, backtrack and check the next valid row value for the queen in previous row.\n#1. check row, col and diagonal for valid positions\n#2. check position of other queens\n#3. How to validate a position? we need to check only for 3 cells(dirns: up, upper left diagonal, upper right diagonal) as we have placed valid queens till row-1!\n#4. Initiate matrix with \".\"s\n#5. When you want to insert a queen, replace \".\" with \"Q\" and vice cersa to remove it after backtracing\n#6. do o/p modification once a valid solution exists\n\nclass Solution:\n \n def __init__(self):\n self.output = []\n \n def solveNQueens(self, n: int) -> List[List[str]]:\n \n board = [[\" \" for _ in range(n)] for _ in range(n)]\n \n for i in range(n):\n for j in range(n):\n board[i][j] = '.'\n \n self.backtracking(board, n, 0)\n return self.output\n \n \n def backtracking(self, board, queensLeft, i):\n \n #base case\n if queensLeft <= 0:\n self.output.append(self.makeOutput(board))\n return\n \n #place queen, make recursive calls\n for j in range(len(board)):\n if(self.isValid(board, i, j)):\n board[i][j] = 'Q'\n self.backtracking(board, queensLeft-1, i+1)\n board[i][j] = '.'\n \n def makeOutput(self, board):\n nQList = []\n \n for i in range(len(board)):\n temp = \"\"\n for j in range(len(board)):\n temp += board[i][j]\n \n nQList.append(temp)\n \n return nQList\n \n def isValid(self, board, i, j):\n row = i\n col = j\n \n #upper column\n while row>=0:\n if board[row][col] == 'Q':\n return False\n row -= 1\n \n #left diagonal\n row = i\n col = j\n while row>=0 and col >= 0:\n if board[row][col]=='Q':\n return False\n row -= 1\n col -= 1\n \n #right diagonal\n row = i\n col = j\n while row>=0 and col < len(board):\n if board[row][col]=='Q':\n return False\n row -= 1\n col += 1\n \n return True\n \n \n \n ","sub_path":"nQueens_51.py","file_name":"nQueens_51.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"40430527","text":"from .blocks import Block, Input\nfrom .components import Component\nfrom .fields import ArrayField, BooleanField, StringField, TextField, ValidationError\n\n\nclass Message(Component):\n text = StringField()\n blocks = ArrayField(Block)\n attachments = ArrayField()\n thread_ts = StringField()\n mrkdwn = BooleanField()\n\n def __init__(\n self, text=None, blocks=None, attachments=None, thread_ts=None, mrkdwn=None\n ):\n super().__init__(text, blocks, attachments, thread_ts, mrkdwn)\n\n\nclass Modal(Component):\n type = StringField()\n title = TextField(max_length=24, plain=True)\n blocks = ArrayField(Block)\n close = TextField(max_length=24, plain=True)\n submit = TextField(max_length=24, plain=True)\n private_metadata = StringField(max_length=3000)\n callback_id = StringField(max_length=255)\n clear_on_close = BooleanField()\n notify_on_close = BooleanField()\n external_id = StringField()\n\n def __init__(\n self,\n title,\n blocks,\n close=None,\n submit=None,\n private_metadata=None,\n callback_id=None,\n clear_on_close=None,\n notify_on_close=None,\n external_id=None,\n ):\n if not submit and any(isinstance(b, Input) for b in blocks):\n raise ValidationError(\"You have to provide submit.\")\n\n super().__init__(\n \"modal\",\n title,\n blocks,\n close,\n submit,\n private_metadata,\n callback_id,\n clear_on_close,\n notify_on_close,\n external_id,\n )\n\n\nclass Home(Component):\n type = StringField()\n blocks = ArrayField(Block)\n private_metadata = StringField(max_length=3000)\n callback_id = StringField(max_length=255)\n external_id = StringField()\n\n def __init__(\n self, blocks, private_metadata=None, callback_id=None, external_id=None,\n ):\n super().__init__(\n \"home\", blocks, private_metadata, callback_id, external_id,\n )\n","sub_path":"blockkit/surfaces.py","file_name":"surfaces.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"445089984","text":"import sys\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\n\nfrom container_example import Ui_MainWindow\n\n\nclass ContainerWindow(QMainWindow, Ui_MainWindow):\n def __init__(self, parent=None):\n super().__init__(parent=parent)\n self.setupUi(self)\n\n\nif __name__ == \"__main__\":\n\n app = QApplication(sys.argv)\n win = ContainerWindow()\n win.show()\n sys.exit(app.exec_())\n","sub_path":"01_Qt_Designer使用/03_ 使用容器进行布局/container_example_run.py","file_name":"container_example_run.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"392170057","text":"# SPDX-License-Identifier: BSD-3-Clause\n# Copyright (c) 2021 Scipp contributors (https://github.com/scipp)\n# @author Neil Vaytet\n\nfrom .. import config\nfrom ..core import concatenate, values, dtype, units, nanmin, nanmax, histogram, \\\n full_like\nfrom ..core import Variable, DataArray\nfrom ..core import abs as abs_\nimport numpy as np\nfrom copy import copy\nimport io\n\n\ndef get_line_param(name=None, index=None):\n \"\"\"\n Get the default line parameter from the config.\n If an index is supplied, return the i-th item in the list.\n \"\"\"\n param = getattr(config.plot, name)\n return param[index % len(param)]\n\n\ndef to_bin_centers(x, dim):\n \"\"\"\n Convert array edges to centers\n \"\"\"\n return 0.5 * (x[dim, 1:] + x[dim, :-1])\n\n\ndef to_bin_edges(x, dim):\n \"\"\"\n Convert array centers to edges\n \"\"\"\n idim = x.dims.index(dim)\n if x.shape[idim] < 2:\n one = 1.0 * x.unit\n return concatenate(x[dim, 0:1] - one, x[dim, 0:1] + one, dim)\n else:\n center = to_bin_centers(x, dim)\n # Note: use range of 0:1 to keep dimension dim in the slice to avoid\n # switching round dimension order in concatenate step.\n left = center[dim, 0:1] - (x[dim, 1] - x[dim, 0])\n right = center[dim, -1] + (x[dim, -1] - x[dim, -2])\n return concatenate(concatenate(left, center, dim), right, dim)\n\n\ndef parse_params(params=None, defaults=None, globs=None, array=None):\n \"\"\"\n Construct the colorbar settings using default and input values\n \"\"\"\n from matplotlib.colors import Normalize, LogNorm, LinearSegmentedColormap\n from matplotlib import cm\n\n parsed = dict(config.plot.params)\n if defaults is not None:\n for key, val in defaults.items():\n parsed[key] = val\n if globs is not None:\n for key, val in globs.items():\n # Global parameters need special treatment because by default they\n # are set to None, and we don't want to overwrite the defaults.\n if val is not None:\n parsed[key] = val\n if params is not None:\n if isinstance(params, bool):\n params = {\"show\": params}\n for key, val in params.items():\n parsed[key] = val\n\n if parsed[\"norm\"] == \"log\":\n norm = LogNorm\n elif parsed[\"norm\"] == \"linear\":\n norm = Normalize\n else:\n raise RuntimeError(\"Unknown norm. Expected 'linear' or 'log', \"\n \"got {}.\".format(parsed[\"norm\"]))\n vmin = parsed[\"vmin\"]\n vmax = parsed[\"vmax\"]\n parsed[\"norm\"] = norm(vmin=vmin.value if vmin is not None else None,\n vmax=vmax.value if vmax is not None else None)\n\n # Convert color into custom colormap\n if parsed[\"color\"] is not None:\n parsed[\"cmap\"] = LinearSegmentedColormap.from_list(\n \"tmp\", [parsed[\"color\"], parsed[\"color\"]])\n else:\n parsed[\"cmap\"] = copy(cm.get_cmap(parsed[\"cmap\"]))\n\n if parsed[\"under_color\"] is None:\n parsed[\"cmap\"].set_under(parsed[\"cmap\"](0.0))\n else:\n parsed[\"cmap\"].set_under(parsed[\"under_color\"])\n if parsed[\"over_color\"] is None:\n parsed[\"cmap\"].set_over(parsed[\"cmap\"](1.0))\n else:\n parsed[\"cmap\"].set_over(parsed[\"over_color\"])\n\n return parsed\n\n\ndef vars_to_err(v):\n \"\"\"\n Convert variances to errors.\n \"\"\"\n with np.errstate(invalid=\"ignore\"):\n v = np.sqrt(v)\n np.nan_to_num(v, copy=False)\n return v\n\n\ndef find_log_limits(x):\n \"\"\"\n To find log scale limits, we histogram the data between 1.0-30\n and 1.0e+30 and include only bins that are non-zero.\n \"\"\"\n from .. import flatten, ones\n volume = np.product(x.shape)\n pixel = flatten(values(x.astype(dtype.float64)), to='pixel')\n weights = ones(dims=['pixel'], shape=[volume], unit='counts')\n hist = histogram(DataArray(data=weights, coords={'order': pixel}),\n bins=Variable(dims=['order'],\n values=np.geomspace(1e-30, 1e30, num=61),\n unit=x.unit))\n # Find the first and the last non-zero bins\n inds = np.nonzero((hist.data > 0.0 * units.counts).values)\n ar = np.arange(hist.data.shape[0])[inds]\n # Safety check in case there are no values in range 1.0e-30:1.0e+30:\n # fall back to the linear method and replace with arbitrary values if the\n # limits are negative.\n if len(ar) == 0:\n [vmin, vmax] = find_linear_limits(x)\n if vmin.value <= 0.0:\n if vmax.value <= 0.0:\n vmin = full_like(vmin, 0.1)\n vmax = full_like(vmax, 1.0)\n else:\n vmin = 1.0e-3 * vmax\n else:\n vmin = hist.coords['order']['order', ar.min()]\n vmax = hist.coords['order']['order', ar.max() + 1]\n return [vmin, vmax]\n\n\ndef find_linear_limits(x):\n \"\"\"\n Find variable min and max.\n \"\"\"\n return [\n values(nanmin(x).astype(dtype.float64)),\n values(nanmax(x).astype(dtype.float64))\n ]\n\n\ndef find_limits(x, scale=None, flip=False):\n \"\"\"\n Find sensible limits, depending on linear or log scale.\n \"\"\"\n if scale is not None:\n if scale == \"log\":\n lims = {\"log\": find_log_limits(x)}\n else:\n lims = {\"linear\": find_linear_limits(x)}\n else:\n lims = {\"log\": find_log_limits(x), \"linear\": find_linear_limits(x)}\n if flip:\n for key in lims:\n lims[key] = np.flip(lims[key]).copy()\n return lims\n\n\ndef fix_empty_range(lims, replacement=None):\n \"\"\"\n Range correction in case xmin == xmax\n \"\"\"\n dx = 0.0 * lims[0].unit\n if lims[0].value == lims[1].value:\n if replacement is not None:\n dx = 0.5 * replacement\n elif lims[0].value == 0.0:\n dx = 0.5 * lims[0].unit\n else:\n dx = 0.5 * abs_(lims[0])\n return [lims[0] - dx, lims[1] + dx]\n\n\ndef fig_to_pngbytes(fig):\n \"\"\"\n Convert figure to png image bytes.\n We also close the figure to prevent it from showing up again in\n cells further down the notebook.\n \"\"\"\n import matplotlib.pyplot as plt\n buf = io.BytesIO()\n fig.savefig(buf, format='png')\n plt.close(fig)\n buf.seek(0)\n return buf.getvalue()\n\n\ndef to_dict(meta):\n \"\"\"\n Convert a coords, meta, attrs or masks object to a python dict.\n \"\"\"\n return {name: var for name, var in meta.items()}\n","sub_path":"src/scipp/plotting/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":6398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"592487781","text":"# -*- coding: utf-8 -*-\n# Python wrapper for the Reaxys API\n#\n# Version: 1.1.0-beta.2\n#\n# Author: Dr. Sebastian Radestock, Elsevier\n# Author: Dr. Alexander Riemer, Elsevier\n# Author: Dr. Markus Fischer, Elsevier\n# Date: July 26th, 2019\n# Change Log 1.1.0-beta.1, July 26th, 2019\n# A. Support for Python 3\n# B. get_field_content modifications\n# B.1. returns values for elements with highlights\n# B.2. new method argument highlight_only. If True will return only a value if field contains highlights\n#\n# Change Log 1.1.0-beta.2, July 26th, 2019\n# A. Method retrieve now supports clustering hitsets\n# A.1. Added optional arguments dbname and context, that are required to formulate group by statements\n\nimport http.cookiejar, xml.dom.minidom, re\nfrom urllib.request import Request, urlopen\nimport socks\nimport socket\n\n\nclass Reaxys_API:\n\n def __init__(self, proxy=None, port=None):\n \n self.url = \"\"\n self.headers = {'Content-type' : 'text/xml; charset=\"UTF-8\"'}\n self.callername = \"\"\n self.sessionid = \"\"\n self.resultname = \"\"\n self.resultsize = \"\"\n self.citationset = \"\"\n self.citationcount = \"\"\n self.proxy = proxy\n self.port = port\n\n if proxy and port:\n socks.setdefaultproxy(\n socks.PROXY_TYPE_SOCKS5, self.proxy, self.port)\n socket.socket = socks.socksocket\n\n # Set True for verbose output:\n self.debug = False\n\n def _get_resultname(self, response_dom):\n \n #response_dom = xml.dom.minidom.parseString(response_xml)\n \n\n # Length of response_dom.getElementsByTagName(\"resultsname\") should always be 1.\n # Node resultsname should not conatin subnodes.\n try:\n resultname = response_dom.getElementsByTagName(\"resultname\")[0].childNodes[0].nodeValue\n except IndexError:\n resultname = None\n return resultname\n\n def _get_resultsize(self, response_dom):\n \n #response_dom = xml.dom.minidom.parseString(response_xml)\n\n # Length of response_dom.getElementsByTagName(\"resultsize\") should always be 1.\n # Node resultsize should not conatin subnodes.\n try:\n resultsize = response_dom.getElementsByTagName(\"resultsize\")[0].childNodes[0].nodeValue\n except IndexError:\n resultsize = None\n\n return resultsize\n\n def _get_citationset(self, response_dom):\n \n #response_dom = xml.dom.minidom.parseString(response_xml)\n\n # Length of response_dom.getElementsByTagName(\"citationset\") should always be 1.\n # Node citationset should not conatin subnodes. \n return response_dom.getElementsByTagName(\"citationset\")[0].childNodes[0].nodeValue\n\n def _get_citationcount(self, response_dom):\n \n #response_dom = xml.dom.minidom.parseString(response_xml)\n\n # Length of response_dom.getElementsByTagName(\"citationcount\") should always be 1.\n # Node citationcount should not conatin subnodes. \n return response_dom.getElementsByTagName(\"citationcount\")[0].childNodes[0].nodeValue\n\n def get_facts_availability(self, response_dom, field):\n\n facts_availability = \"0\"\n \n #response_dom = xml.dom.minidom.parseString(response_xml)\n\n facts = response_dom.getElementsByTagName(\"facts\")[0]\n for fact in facts.childNodes:\n if 'name=\"' + field + '\"' in fact.toxml():\n facts_availability = fact.childNodes[0].nodeValue.split(\"(\")[0]\n\n return facts_availability\n\n def get_field_content(self, response_dom, field, highlight_only=False):\n \n field_content = []\n \n #response_dom = xml.dom.minidom.parseString(response_xml)\n \n for element in response_dom.getElementsByTagName(field):\n\n # Concatenate text values if highlight is present\n if element.getAttribute('highlight') == 'true':\n field_content.append(\n ''.join([e.data\n if type(e) == xml.dom.minidom.Text\n else e.childNodes[0].data for e in element.childNodes]))\n\n # If node contains further sub-nodes: return full xml.\n elif len(element.childNodes) > 1 and highlight_only is False:\n field_content.append(element.toxml())\n\n # If node does not conatin further sub-nodes: return node value.\n elif len(element.childNodes) == 1 and highlight_only is False:\n field_content.append(element.childNodes[0].nodeValue)\n \n return field_content\n\n def connect(self, url, url_main, username, password, callername):\n \n self.url = url\n self.callername = callername\n cookies = http.cookiejar.CookieJar()\n \n connect_template = \"\"\"\n \n \n \n \n \\n\"\"\"\n payload = connect_template % (callername, username, password)\n data = payload.encode()\n\n # Header reset.\n self.headers = {'Content-type' : 'text/xml; charset=\"UTF-8\"'}\n\n # ELSAPI support\n self.headers['X-ELS-APIKey'] = callername\n self.headers['Accept'] = \"*/*\"\n request = Request(self.url, data=data, headers=self.headers)\n \n if self.debug:\n print('-----------------------\\nQuery headers from connect:')\n print(self.headers)\n print('-----------------------\\nQuery from connect:')\n print(payload)\n\n response = urlopen(request)\n response_xml = response.read()\n response_dom = xml.dom.minidom.parseString(response_xml)\n \n if self.debug:\n print('-----------------------\\nResponse headers from connect:')\n print(response.info())\n print('-----------------------\\nResponse from connect:')\n print(response_xml)\n\n # Get sessionid.\n \n element = response_dom.getElementsByTagName(\"sessionid\")\n self.sessionid = element[0].childNodes[0].nodeValue\n \n # Cookies are read from the response and stored in self.header\n # which is used as a request header for subsequent requests.\n cookies.extract_cookies(response, request)\n \n # Cookie handling 3.0: Simply store and resend ALL cookies received from server\n self.headers['Cookie'] = \"; \".join(re.findall(r\"(?<=Cookie ).*?=\\S*\", str(cookies)))\n\n def disconnect(self):\n \n disconnect_template = \"\"\"\n \n \n \n \n \\n\"\"\"\n payload = disconnect_template%(self.callername, self.sessionid)\n data = payload.encode()\n\n request = Request(self.url, data=data, headers=self.headers)\n\n if self.debug:\n print('-----------------------\\nQuery headers from disconnect:')\n print(self.headers)\n print('-----------------------\\nQuery from disconnect:')\n print(payload)\n\n response = urlopen(request)\n response_xml = response.read()\n \n if self.debug:\n print('-----------------------\\nResponse headers from disconnect:')\n print(response.info())\n print('-----------------------\\nResponse from disconnect:')\n print(response_xml)\n\n def select(self, dbname, context, where_clause, order_by, options):\n \n select_template = \"\"\"\n \n \n \n \n \n \n \n \n %s\n %s\n %s\n \n \\n\"\"\"\n payload = select_template%(self.callername, dbname, context, where_clause, order_by, options)\n data = payload.encode()\n request = Request(self.url, data=data, headers=self.headers)\n\n if self.debug:\n print('-----------------------\\nQuery headers from select:')\n print(self.headers)\n print('-----------------------\\nQuery from select:')\n print(payload)\n\n response = urlopen(request)\n response_xml = response.read()\n response_dom = xml.dom.minidom.parseString(response_xml)\n \n if self.debug:\n print('-----------------------\\nResponse headers from select:')\n print(response.info())\n print('-----------------------\\nResponse from select:')\n print(response_xml)\n\n \n self.resultname = self._get_resultname(response_dom)\n self.resultsize = self._get_resultsize(response_dom)\n \n if (\"NO_CORESULT\" not in options) and (\"C\" not in context):\n self.citationset = self._get_citationset(response_dom)\n self.citationcount = self._get_citationcount(response_dom)\n\n return response_dom\n\n def expand(self, dbname, first_item, last_item, where_clause):\n \n select_template = \"\"\"\n \n \n \n \n \n %s\n \n \\n\"\"\"\n payload = select_template%(self.callername, self.sessionid, dbname, first_item, last_item, where_clause)\n data = payload.encode()\n request = Request(self.url, data=data, headers=self.headers)\n\n if self.debug:\n print('-----------------------\\nQuery headers from expand:')\n print(self.headers)\n print('-----------------------\\nQuery from expand:')\n print(payload)\n \n response = urlopen(request)\n response_xml = response.read()\n response_dom = xml.dom.minidom.parseString(response_xml)\n \n if self.debug:\n print('-----------------------\\nResponse headers from expand:')\n print(response.info())\n print('-----------------------\\nResponse from expand:')\n print(response_xml)\n\n return response_dom\n\n def post(self, payload):\n\n data = payload.encode()\n request = Request(self.url, data=data, headers=self.headers)\n\n if self.debug:\n print('-----------------------\\nQuery headers from post:')\n print(self.headers)\n print('-----------------------\\nQuery from post:')\n print(payload)\n \n response = urlopen(request)\n response_xml = response.read()\n \n if self.debug:\n print('-----------------------\\nResponse headers from post:')\n print(response.info())\n print('-----------------------\\nResponse from post:')\n print(response_xml)\n\n def retrieve(self, resultname, select_items, first_item, last_item, order_by, group_by, group_item, options,\n dbname=None, context=None):\n # if group_by is given, please provide group_item, e.g. \"1\" or \"1,2\"\n \n if group_by != '':\n grouplist = 'grouplist=\"' + group_item + '\"'\n else:\n grouplist = ''\n\n db_template = ''\n if dbname is not None:\n db_template = 'dbname=\"%s\"' % dbname\n\n context_template = ''\n if context is not None:\n context_template = 'context=\"%s\"' % context\n\n\n select_item_template = \"\"\" %s\\n\"\"\"\n select_template = \"\"\"\n \n \n \n \\n\"\"\"\n for index in range (0,len(select_items)):\n select_template = select_template + select_item_template%(select_items[index])\n select_template = select_template + \"\"\" \n \n \n %s\n %s\n %s\n \n \\n\"\"\"\n payload = select_template % (\n self.callername, self.sessionid, db_template, context_template, resultname, grouplist,\n first_item, last_item, order_by, group_by, options)\n data = payload.encode()\n \n request = Request(self.url, data=data, headers=self.headers)\n\n if self.debug:\n print('-----------------------\\nQuery headers from retrieve:')\n print(self.headers)\n print('-----------------------\\nQuery from retrieve:')\n print(payload)\n \n response = urlopen(request)\n response_xml = response.read().decode()\n \n if self.debug:\n print('-----------------------\\nResponse headers from retrieve:')\n print(response.info())\n print('-----------------------\\nResponse from retrieve:')\n print(response_xml)\n\n return xml.dom.minidom.parseString(response_xml)\n #return response_xml\n","sub_path":"Reaxys API Python/src/Reaxys_API.py","file_name":"Reaxys_API.py","file_ext":"py","file_size_in_byte":13720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"422539323","text":"#!/usr/bin/python\nfrom __future__ import print_function\nimport os\nimport shutil\nimport bencode\nimport argparse\n\n\nargs = object()\n\ndef check_if_single_file_torrent(torrent_file_path):\n #return (path in download_dirs) # old version\n # new version\n with open (torrent_file_path, \"r\") as f:\n content = f.read()\n decoded_content = bencode.decode(content)\n result = not (\"files\" in decoded_content[\"info\"])\n return result\n\ndef get_dir_or_file_size(path):\n result = 0\n if os.path.isdir(path):\n total_size = 0\n for dirpath, dirnames, filenames in os.walk(path):\n for f in filenames:\n fp = os.path.join(dirpath, f)\n total_size += os.path.getsize(fp)\n result = total_size\n elif os.path.isfile(path):\n result = os.path.getsize(path) \n return result\n\ndef delete_path(path):\n debug(\"deleting \" + path)\n if os.path.isdir(path): \n shutil.rmtree(path) \n elif os.path.isfile(path):\n os.remove(path)\n\n\ndef format_size(size):\n if size >= 1024**3: # GB\n return str(size/(1024**3)) + \"GB\"\n if size >= 1024**2: # MB\n return str(size/(1024**2)) + \"MB\"\n if size >= 1024**1: # KB\n return str(size/(1024**1)) + \"KB\"\n return str(size) + \"B\"\n\ndef debug(msg):\n if args.debug_flag:\n print(msg)\n if args.pause_on_debug:\n print(\"Continue? \", end = \"\")\n raw_input()\n\ndef main():\n # parse arguments\n # TODO option to also look into the folders and delete unreferenced files there\n # TODO option to confirm every deletion\n # TODO quite option to just run without any confirmations\n # TODO option for save mode that just moves all unreferenced files into a target directory\n parser = argparse.ArgumentParser(description='Deletes files from rtorrent download directories that are not referenced in rtorrent', epilog='Github: github.com/ntv1000')\n parser.add_argument('--debug', dest='debug_flag', action='store_true', default=False, help='Debugging information will be displayed')\n parser.add_argument('--pause_on_debug', dest='pause_on_debug', action='store_true', default=False, help='Debugging information will be displayed')\n parser.add_argument('--dry', dest='dryrun_flag', action='store_true', default=False, help='All files that would be deleted will be listed, but not deleted')\n parser.add_argument('rtorrent_working_dir', metavar='WORKING_DIR', help='The working directory of your rtorrent instance')\n parser.add_argument('rtorrent_download_dirs', metavar='DOWNLOAD_DIR', nargs='+', help='The download directories that should be cleaned up')\n global args\n args = parser.parse_args()\n debug('debug_flag=' + str(args.debug_flag))\n debug('dryrun_flag=' + str(args.dryrun_flag))\n debug('rtorrent_working_dir=' + args.rtorrent_working_dir)\n debug('rtorrent_download_dirs=' + str(args.rtorrent_download_dirs))\n\n rtorrent_files = [os.path.join(args.rtorrent_working_dir, f) for f in os.listdir(args.rtorrent_working_dir) if os.path.isfile(os.path.join(args.rtorrent_working_dir, f)) and os.path.splitext(f)[1] == \".rtorrent\"]\n\n downloads = list() \n referenced = list()\n\n for dir in args.rtorrent_download_dirs:\n downloads += [os.path.join(dir, x) for x in os.listdir(dir)]\n\n print(\"found \" + str(len(downloads)) + \" downloaded files\")\n print(\"found \" + str(len(rtorrent_files)) + \" rtorrent files\")\n\n for rtorrent_file in rtorrent_files:\n #debug(\"rtorrent_file: \" + rtorrent_file)\n content = \"\"\n with open (rtorrent_file, \"r\") as f:\n content = f.read()\n if content != \"\":\n path = bencode.decode(content)[\"directory\"]\n # extrect the path to the tied torrent file (not really tied, rather the torrent file with the same name)\n torrent_file = os.path.splitext(rtorrent_file)[0]\n if os.path.exists(torrent_file):\n if check_if_single_file_torrent(torrent_file):\n # for \"single-file\"-torrent the filename has to be taken from the tied torrent_file\n with open (torrent_file, \"r\") as f:\n content = f.read()\n single_file_name = bencode.decode(content)[\"info\"][\"name\"]\n assert os.path.isfile(os.path.join(path, single_file_name))\n debug(\"single-file torrent: \" + single_file_name)\n referenced.append(os.path.join(path, single_file_name))\n else: \n debug(\"multi-file torrent: \" + path)\n referenced.append(path)\n else:\n print(\"ERROR - missing torrent file: '\" + torrent_file + \"' for rtorrent file '\" + rtorrent_file + \"'\")\n else:\n print(\"ERROR - empty file\")\n\n print(\"found \" + str(len([x for x in referenced])) + \" files that were referenced\")\n print(\"found \" + str(len(set(downloads) - set(referenced))) + \" files that were not referenced\")\n\n not_referenced = list(set(downloads) - set(referenced))\n #not_referenced = [x for x in downloads if x not in referenced]\n\n if len(not_referenced) > 0:\n sizes = [get_dir_or_file_size(x) for x in not_referenced]\n total_size = sum(sizes)\n print (\"deleting all unreferenced files will free up \" + format_size(total_size) + \" of storage\")\n\n if args.dryrun_flag:\n print(\"Not referenced files:\")\n for path in not_referenced:\n print(path)\n else:\n print(\"unreferenced files will now be deleted (WARNING: DELETED FILES ARE NOT RECOVERABLE) continue? (yes/no) \",end=\"\")\n input = raw_input()\n if input == \"yes\":\n for path in not_referenced:\n delete_path(path)\n else:\n print(\"there are no files that are not referenced in rtorrent - exiting...\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"delete_unused_files.py","file_name":"delete_unused_files.py","file_ext":"py","file_size_in_byte":5954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"642351565","text":"# -*- coding: utf-8 -*-\n\nimport os\nfrom xScrapyBots.config.AsstrConfig import *\n\n\nclass AsstrPipeline(object):\n\n def process_item(self, item, spider):\n if spider.name != \"asstrSpider\":\n return item\n name = item[NAME_ITEM]\n name = name + \".txt\"\n story = item[STORY_ITEM]\n path = item[PATH_ITEM]\n path = os.path.join(path, name)\n file = open(path, 'w')\n file.write(story)\n file.close()\n return item","sub_path":"xScrapyBots/pipelines/AsstrPipeline.py","file_name":"AsstrPipeline.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"375764700","text":"from flask import Flask, render_template, request\nfrom flaskext.babel import Babel\nimport time\n\napp = Flask(__name__)\napp.config.from_object('config.Settings')\nbabel = Babel(app)\n\n@app.route(\"/\")\ndef index():\n return render_template(\"home/index.html\");\n\n#@login_required\n@app.route(\"/add\", methods = ['POST', 'GET'])\ndef share():\n\tdata = {}\n\tdata['seed'] = time.time()\n\n\tif request.method == 'GET':\n\t\tdata['url'] = request.args.get('url')\n\t\tdata['text'] = request.args.get('text')\n\n\treturn render_template(\"share/index.html\", **data)\n\n@app.route(\"/\")\ndef redirect(short):\n\treturn \"redirect %s\" % short\n\nif __name__ == \"__main__\":\n app.run(**app.config['SERVER_CONFIG'])","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"194846914","text":"#!/usr/bin/python\nfrom os import listdir\nfrom os.path import isfile, join\n\nfilenames = [f for f in listdir(\"./\") if isfile(join(\"./\", f))]\n\nfor filename in filenames:\n with open(filename) as file:\n lines = file.readlines()\n with open(\"../\"+filename,'w') as out:\n out.write(\"NAME : \" + filename.split('.')[0] + \"\\n\" );\n out.write(\"COMMENT : \" + str(len(lines)) + \" locations in Brazil\\n\")\n out.write(\"COMMENT : Derived from IBGE datasets\\n\")\n out.write(\"TYPE : TSP\\n\")\n out.write(\"DIMENSION : \" + str(len(lines)) + \"\\n\")\n out.write(\"EDGE_WEIGHT_TYPE : EUC_2D\\n\")\n out.write(\"NODE_COORD_SECTION\\n\")\n for line in lines:\n out.write(line.replace(',',' '))\n out.write(\"EOF\")\n\n\n","sub_path":"instances/brazil/proccess.py","file_name":"proccess.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"228579409","text":"from setuptools import setup\nfrom setuptools.command.test import test as TestCommand\n\nimport xchange\n\n\nclass PyTest(TestCommand):\n user_options = [('pytest-args=', 'a', \"Arguments to pass to py.test\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = [\"--cov\", \"xchange\", \"tests/\"]\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n #import here, cause outside the eggs aren't loaded\n import sys, pytest\n errno = pytest.main(self.pytest_args)\n sys.exit(errno)\n\n\nsetup(\n name='xchange',\n version=xchange.__version__,\n description=(\"Many cryptocurrency exchange APIs, a single and unified API client\"),\n url='https://github.com/martinzugnoni/xchange',\n download_url=(\n \"https://github.com/martinzugnoni/xchange/tarball/{version}\".format(\n version=xchange.__version__)),\n author='Martin Zugnoni',\n author_email='martin.zugnoni@gmail.com',\n license='MIT',\n packages=[\n 'xchange',\n 'xchange.clients',\n 'xchange.constants',\n 'xchange.models'\n ],\n maintainer='Martin Zugnoni',\n install_requires=[\n 'requests==2.18.4',\n 'cached-property==1.4.2',\n ],\n tests_require=[\n 'requests==2.18.4',\n 'cached-property==1.4.2',\n 'pytest==3.5.1',\n 'pytest-cov==2.5.1',\n 'responses==0.9.0',\n ],\n zip_safe=False,\n cmdclass={'test': PyTest},\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"209793276","text":"# @copyright@\n# Copyright (c) 2006 - 2018 Teradata\n# All rights reserved. Stacki(r) v5.x stacki.com\n# https://github.com/Teradata/stacki/blob/master/LICENSE.txt\n# @copyright@\n#\n# @rocks@\n# Copyright (c) 2000 - 2010 The Regents of the University of California\n# All rights reserved. Rocks(r) v5.4 www.rocksclusters.org\n# https://github.com/Teradata/stacki/blob/master/LICENSE-ROCKS.txt\n# @rocks@\n\nimport stack.commands\nfrom stack.exception import ArgRequired\n\n\nclass Command(stack.commands.set.host.command):\n\t\"\"\"\n\tSets the mac address for named interface on host.\n\n\t\n\tHost name.\n\t\n\n\t\n\tName of the interface.\n\t\n\n\t\n\tThe mac address of the interface. Usually of the form dd:dd:dd:dd:dd:dd\n\twhere d is a hex digit. This format is not enforced. Use mac=NULL to\n\tclear the mac address.\n\t\n\n\t\n\tSets the MAC Address for the eth1 device on host backend-0-0.\n\t\n\t\"\"\"\n\t\n\tdef run(self, params, args):\n\n\t\t(interface, mac) = self.fillParams([\n\t\t\t('interface', None, True),\n\t\t\t('mac', None, True)\n\t\t\t])\n\n\t\tif not len(args):\n\t\t\traise ArgRequired(self, 'host')\n\n\t\tfor host in self.getHostnames(args):\n\t\t\tself.db.execute(\"\"\"\n\t\t\t\tupdate networks, nodes set \n\t\t\t\tnetworks.mac=NULLIF('%s','NULL') where\n\t\t\t\tnodes.name='%s' and networks.node=nodes.id and\n\t\t\t\tnetworks.device like '%s'\n\t\t\t\t\"\"\" % (mac, host, interface))\n\n","sub_path":"common/src/stack/command/stack/commands/set/host/interface/mac/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"228651565","text":"import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\n\nclass sliderdemo(QWidget):\n def __init__(self,min, parent = None):\n super(sliderdemo, self).__init__(parent)\n self.min = min\n layout = QVBoxLayout()\n self.l1 = QLabel(\"Hello\")\n self.l1.setAlignment(Qt.AlignCenter)\n layout.addWidget(self.l1)\n\n self.sl = QSlider(Qt.Horizontal)\n self.sl.setMinimum(self.min)\n self.sl.setMaximum(30)\n self.sl.setValue(20)\n self.sl.setTickPosition(QSlider.TicksBelow)\n self.sl.setTickInterval(5)\n\n layout.addWidget(self.sl)\n self.sl.valueChanged.connect(self.valuechange)\n self.setLayout(layout)\n self.setWindowTitle(\"SpinBox demo\")\n\n def valuechange(self):\n size = self.sl.value()\n self.l1.setText('{}'.format(size))\n # self.l1.setFont(QFont(\"Arial\",size))\n\ndef main(min):\n app = QApplication(sys.argv)\n ex = sliderdemo(min)\n ex.show()\n sys.exit(app.exec_())\n\nif __name__ == '__main__':\n main(5)\n","sub_path":"lab8/testslider.py","file_name":"testslider.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"466748390","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport keras\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Convolution2D\nfrom keras.utils import np_utils\n\n\ndef data_mnist():\n \"\"\"\n Preprocess MNIST dataset\n :return:\n \"\"\"\n\n # These values are specific to MNIST\n img_rows = 28\n img_cols = 28\n nb_classes = 10\n\n # the data, shuffled and split between train and test sets\n (X_train, y_train), (X_test, y_test) = mnist.load_data()\n\n if keras.backend.image_dim_ordering() == 'th':\n X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)\n X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)\n else:\n X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)\n X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)\n X_train = X_train.astype('float32')\n X_test = X_test.astype('float32')\n X_train /= 255\n X_test /= 255\n print('X_train shape:', X_train.shape)\n print(X_train.shape[0], 'train samples')\n print(X_test.shape[0], 'test samples')\n\n # convert class vectors to binary class matrices\n Y_train = np_utils.to_categorical(y_train, nb_classes)\n Y_test = np_utils.to_categorical(y_test, nb_classes)\n return X_train, Y_train, X_test, Y_test\n\n\ndef model_mnist(logits=False, input_ph=None, img_rows=28, img_cols=28,\n nb_filters=64, nb_classes=10):\n \"\"\"\n Defines MNIST model using Keras sequential model\n :param logits: If set to False, returns a Keras model, otherwise will also\n return logits tensor\n :param input_ph: The TensorFlow tensor for the input\n (needed if returning logits)\n (\"ph\" stands for placeholder but it need not actually be a placeholder)\n :return:\n \"\"\"\n model = Sequential()\n\n if keras.backend.image_dim_ordering() == 'th':\n input_shape = (1, img_rows, img_cols)\n else:\n input_shape = (img_rows, img_cols, 1)\n\n layers = [Dropout(0.2, input_shape=input_shape),\n Convolution2D(nb_filters, 8, 8,\n subsample=(2, 2),\n border_mode=\"same\"\n ),\n Activation('relu'),\n Convolution2D(nb_filters * 2, 6, 6, subsample=(2, 2),\n border_mode=\"valid\"),\n Activation('relu'),\n Convolution2D(nb_filters * 2, 5, 5, subsample=(1, 1)),\n Activation('relu'),\n Dropout(0.5),\n Flatten(),\n Dense(nb_classes)]\n for layer in layers:\n model.add(layer)\n if logits:\n logits_tensor = model(input_ph)\n model.add(Activation('softmax'))\n\n if logits:\n return model, logits_tensor\n else:\n return model\n","sub_path":"cleverhans/utils_mnist.py","file_name":"utils_mnist.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"39847848","text":"\"\"\"App for google flow.\r\n\r\nThis application supports the google flow with the following use-cases:\r\n- Reads relevat temperatures in house\r\n- car heater\r\n - Sets time\r\n - Turn on/off\r\n\r\n\"\"\"\r\nfrom enum import Enum\r\nfrom typing import Tuple\r\nimport re\r\n\r\nfrom globals import HouseModes\r\nimport appdaemon.plugins.hass.hassapi as hass\r\n\r\nclass Intent(Enum):\r\n \"\"\"Provide enum for supported intents.\"\"\"\r\n TEMPERATURE = 'temperature'\r\n CAR_HEATER_TIME = 'car_heater_time'\r\n HOUSE_STATUS = 'house_status'\r\n TURN_ON = 'turn_on'\r\n TURN_OFF = 'turn_off'\r\n\r\nclass DialogFlow(hass.Hass):\r\n \"\"\"Proved dialog flow use-cases.\"\"\"\r\n def initialize(self) -> None:\r\n self._temperatur_sensorer = self.args.get('temperatur', {})\r\n self._heater_switch = self.args.get('heater_switch', {})\r\n self._rooms = self.args.get('rooms', {})\r\n self._tv = self.args.get('tv', {})\r\n \r\n self.register_endpoint(self.__api_call, 'dialogflow')\r\n\r\n def __api_call(self, data: dict) -> Tuple[dict, int]:\r\n \"\"\"Define endpoint for google dialog flow api to call\"\"\"\r\n intent = dlgflow_get_intent(data)\r\n self.log(\"INTENT : {}\".format(intent))\r\n \r\n if intent is None:\r\n self.log(\"DialogFlow error encountered: Result is empty\")\r\n return \"\", 201\r\n \r\n response = dlgflow_response(self.__respond_intent(intent, data))\r\n return response, 200\r\n\r\n def __respond_intent(self, intent: str, data: dict) -> str:\r\n \"\"\"Choose the correct action depedning on intent type.\"\"\"\r\n if intent == Intent.TEMPERATURE.value:\r\n return self.__respond_temperature(data)\r\n elif intent == Intent.CAR_HEATER_TIME.value:\r\n return self.__respond_car_heater_set_time(data)\r\n elif intent == Intent.HOUSE_STATUS.value:\r\n return self.__respond_house_status(data)\r\n elif intent == Intent.TURN_ON.value:\r\n return self.__respond_turn_on(data)\r\n elif intent == Intent.TURN_OFF.value:\r\n return self.__respond_turn_off(data)\r\n else:\r\n return \"

Känner inte igen kommandot.Snälla försök igen.

\"\r\n\r\n def __respond_turn_on(self, data: dict) -> str:\r\n \"\"\"Turn on device.\"\"\"\r\n device = dlgflow_get_parameter(data, 'devices')\r\n if not device:\r\n return \"Vad vill du slå på?\"\r\n device = device.replace(\" \", \"-\")\r\n self.log(\"Turn on device {}\".format(device))\r\n \r\n if device == \"Tv\":\r\n self.log_to_logbook('DialogFlow', \"Slår på TV {}\".format(self.friendly_name(self._tv)))\r\n self.turn_on(entity_id=self._tv)\r\n elif device == \"motorvärmare\":\r\n self.log_to_logbook('DialogFlow', \"Slår på motorvärmaren {}\".format(self.friendly_name(self._heater_switch)))\r\n self.turn_on(entity_id=self._heater_switch)\r\n elif device in self._rooms:\r\n self.log_to_logbook('DialogFlow', \"Slår på {}\".format(self.friendly_name(self._rooms[device])))\r\n self.turn_on(entity_id=self._rooms[device])\r\n else:\r\n return \"Vet inte hur jag slår på {}\".format(device)\r\n \r\n return \"Slår på {}\".format(device)\r\n\r\n def __respond_turn_off(self, data: dict) -> str:\r\n \"\"\"Turn off device.\"\"\"\r\n device = dlgflow_get_parameter(data, 'devices')\r\n if not device:\r\n return \"Vad vill du stänga av?\"\r\n device = device.replace(\" \", \"-\")\r\n self.log(\"Turn off device {}\".format(device))\r\n \r\n if device == \"Tv\":\r\n self.log_to_logbook('DialogFlow', \"Slår av TV {}\".format(self.friendly_name(self._tv)))\r\n self.turn_off(entity_id=self._tv)\r\n elif device == \"motorvärmare\":\r\n self.log_to_logbook('DialogFlow', \"Slår av motorvärmaren {}\".format(self.friendly_name(self._heater_switch)))\r\n self.turn_off(entity_id=self._heater_switch)\r\n elif device in self._rooms:\r\n self.log_to_logbook('DialogFlow', \"Slår av {}\".format(self.friendly_name(self._rooms[device])))\r\n self.turn_off(entity_id=self._rooms[device])\r\n else:\r\n return \"Vet inte hur jag stänger av {}\".format(device)\r\n \r\n return \"Stänger av {}\".format(device)\r\n\r\n def __respond_temperature(self, data: dict) -> str:\r\n \"\"\"Respond with temperatures around the house.\"\"\"\r\n temp_outside = round(float(self.get_state(self._temperatur_sensorer['ute'])))\r\n temp_inside = round(float(self.get_state(self._temperatur_sensorer['inne'])))\r\n return \"

Temperaturen ute är {} grader.Innetemperaturen är {} grader.

\".format(temp_outside, temp_inside)\r\n\r\n def __respond_car_heater_set_time(self, data: dict) -> str:\r\n \"\"\"Sets time for car heater depending on parameter.\"\"\"\r\n date_time_parameter = dlgflow_get_parameter(data, 'date-time')\r\n if date_time_parameter:\r\n self.log(\"TIME: {}\".format(date_time_parameter['date_time']))\r\n time = self.convert_utc(date_time_parameter['date_time'])\r\n self.set_state(entity_id='input_number.car_heater_dep_time_hour', state=time.hour)\r\n self.set_state(entity_id='input_number.car_heater_dep_time_minutes', state=time.minute)\r\n return \"Sätter motorvärmare till tiden {}:{}\".format(time.hour, time.minute)\r\n else:\r\n return \"

Förstår inte tiden.Försök igen.

\"\r\n\r\n def __respond_car_heater_status(self, data: dict) -> str:\r\n \"\"\"Turn on/off heater.\"\"\"\r\n command = dlgflow_get_parameter(data, 'command')\r\n if command:\r\n if command == 'Stäng av':\r\n self.turn_off(entity_id=self._heater_switch)\r\n return \"Stänger av motorvärmaren.\"\r\n elif command == 'Sätt på':\r\n self.turn_on(entity_id=self._heater_switch)\r\n return \"Sätter på motorvärmaren i tre timmar.\"\r\n else:\r\n return \"

Förstår inte kommandot.Försök igen.

\"\r\n\r\n def __respond_house_status(self, data: dict) -> str:\r\n command = dlgflow_get_parameter(data, 'house_status')\r\n if command:\r\n if command == 'god natt':\r\n self.set_state(entity_id='input_select.house_mode_select', state=HouseModes.night.value)\r\n return \"Sov gott\"\r\n elif command == 'god morgon':\r\n self.set_state(entity_id='input_select.house_mode_select', state=HouseModes.morning.value)\r\n return \"God morgon.\"\r\n elif command == 'god kväll':\r\n self.set_state(entity_id='input_select.house_mode_select', state=HouseModes.evening.value)\r\n return \"God kväll.\"\r\n elif command == 'god dag':\r\n self.set_state(entity_id='input_select.house_mode_select', state=HouseModes.day.value)\r\n return \"Goddag, ha det bra.\"\r\n else:\r\n return \"

Förstår inte kommandot {} för husstatus.Försök igen.

\".format(command)\r\n else:\r\n return \"

Förstår inte kommandot för husstatus.Försök igen.

\"\r\n\r\n \r\n\r\ndef clean_tags(text_with_tags):\r\n \"\"\"Clean all tags from the ssml.\"\"\"\r\n cleanr = re.compile('<.*?>')\r\n cleantext = re.sub(cleanr, '', text_with_tags)\r\n return cleantext\r\n\r\ndef dlgflow_get_parameter(data, parameter: str):\r\n \"\"\"Return parameter from webhook api v2.0\"\"\"\r\n return data['queryResult']['parameters'].get(parameter, None)\r\n\r\ndef dlgflow_get_intent(data) -> str:\r\n \"\"\"Return the intent from webhook api v2.0\"\"\"\r\n return data['queryResult']['action']\r\n\r\ndef dlgflow_response(message):\r\n \"\"\"Return dialogflow fullfilment response v2.0 API using ssml.\r\n \r\n ssml is a way to get more natural sounding voice. See\r\n https://developers.google.com/actions/reference/ssml\r\n to view what it can do in detail.\r\n \"\"\"\r\n return \\\r\n {\r\n 'fulfillmentText': clean_tags(message),\r\n 'fulfillmentMessages': [\r\n {\r\n 'platform': \"ACTIONS_ON_GOOGLE\",\r\n 'simpleResponses': {\r\n 'simpleResponses': [\r\n { \r\n 'ssml': \"{}\".format(message)\r\n }\r\n ] \r\n } \r\n \r\n \r\n }\r\n ],\r\n \"source\": \"appdaemon\"\r\n }","sub_path":"appdaemon/apps/google/dialogflow.py","file_name":"dialogflow.py","file_ext":"py","file_size_in_byte":8574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"333530823","text":"# Simple demo of of the PCA9685 PWM servo/LED controller library.\n# This will move channel 0 from min to max position repeatedly.\n# Author: Kareem Elzeftawy\n\n#set_pwm(channel, on, off)\n#channel: The channel that should be updated with the new values (0..15)\n#on: The tick (between 0..4095) when the signal should transition from low to high\n#off:the tick (between 0..4095) when the signal should transition from high to low\n\n#set_pwm_freq(freq)\n#freq: A number representing the frequency in Hz, between 40 and 1000\n\nfrom __future__ import division\nimport time\n\n# Import the PCA9685 module.\nimport Adafruit_PCA9685\n\n# Uncomment to enable debug output.\n#import logging\n#logging.basicConfig(level=logging.DEBUG)\n\n# Initialise the PCA9685 using the default address (0x40).\npwm = Adafruit_PCA9685.PCA9685()\n\n# Alternatively specify a different address and/or bus:\n#pwm = Adafruit_PCA9685.PCA9685(address=0x41, busnum=2)\n\n# Configure min and max servo pulse lengths\nservo_min = 150 # Min pulse length out of 4096\nservo_max = 600 # Max pulse length out of 4096\n\n# Set frequency to 60hz, good for servos.\npwm.set_pwm_freq(60)\n\ndef Motor1():\n # Move servo on channel O between extremes.\n \n for pulselen in range (servo_min, servo_max, +1):\n pwm.set_pwm(0, 0, pulselen)\n\n time.sleep(500)\n \n for pulselen in range (servo_max, servo_min, -1):\n pwm.set_pwm(0, 0, pulselen)\n\n time.sleep(500)\n\ndef Motor2():\n # Move servo on channel 15 between extremes.\n \n for pulselen in range (servo_min, servo_max, +1):\n pwm.set_pwm(15, 0, pulselen)\n\n time.sleep(500)\n \n for pulselen in range (servo_max, servo_min, -1):\n pwm.set_pwm(15, 0, pulselen)\n\n time.sleep(500)\n\nwhile True:\n print('Moving servo1 on channel 0, press Ctrl-C to quit...')\n Motor1()\n print('Moving servo2 on channel 0, press Ctrl-C to quit...')\n Motor2()\n","sub_path":"Firmware/Test_two_Servo_Motors_using_Adafruit_driver.py","file_name":"Test_two_Servo_Motors_using_Adafruit_driver.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"545449664","text":"# Create a function that takes a list as a parameter,\n# and returns a new list with every second element from the orignal list\n# It should raise an error if the parameter is not a list\n# example: [1, 2, 3, 4, 5] should produce [2, 4]\n\ndef get_every_second(basic_list):\n if type(basic_list) is list:\n selected_numbers = list()\n for i in range(len(basic_list)):\n if i % 2 != 0:\n selected_numbers.append(basic_list[i])\n return selected_numbers\n else:\n return 'Give me list'\n\nmy_list = [1, 2, 3, 4, 5]\nprint(get_every_second(my_list))\n","sub_path":"first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"449982309","text":"import numpy as np\n\nclass Augment:\n def __init__(self,rotate_range):\n self.rotate=[]\n \n def __call__(self,batch_imgs):\n return batch_imgs\n\ndef AugFunc(b_imgs):\n # b_imgs[b,h,w,c]\n # extent 4 edge in h/w\n expand_imgs=np.pad(b_imgs,((0,0),(4,4),(4,4),(0,0)),mode='constant')\n new_imgs=np.zeros_like(b_imgs)\n b=b_imgs.shape[0]\n w_rand=np.random.randint(0,8,b)\n h_rand=np.random.randint(0,8,b)\n f_rand=np.random.randint(0,2,b)\n for i in range(b): \n temp=expand_imgs[i,h_rand[i]:h_rand[i]+32,w_rand[i]:w_rand[i]+32,:]\n if f_rand[i] == 1:\n new_imgs[i,:]=np.flip(temp,1)\n else:\n new_imgs[i,:]=temp\n return new_imgs","sub_path":"Augment.py","file_name":"Augment.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"262876841","text":"from __future__ import division\n\nimport Tkinter\nimport datetime\nimport urllib\n\ndef connected(): # prueft, ob Internetverbindung vorhanden (google.de erreichbar)\n try:\n urllib.urlopen(\"http://www.google.de\")\n except IOError:\n return False\n else:\n return True\n \n \nclass Programmdurchlauf(object):\n\n def __init__(self, logdatei):\n\n self._stop = False\n self._logdatei = logdatei\n \n with open(self._logdatei, \"a\") as datei: # schreibt Datum und Uhrzeit des Programmstarts in Datei, ausserdem, ob Internetverbindung vorhanden\n jetzt = datetime.datetime.now()\n datei.write(jetzt.strftime(\"%d.%m.%Y\\n\"))\n self._inet = connected()\n if self._inet:\n datei.write(jetzt.strftime(\"%H:%M:%S - Internet da\\n\"))\n self._online = 1\n self._offline = 0\n else:\n datei.write(jetzt.strftime(\"%H:%M:%S - Internet weg\\n\"))\n self._online = 0\n self._offline = 1\n\n def gui_erstellen(self): # erstellt die GUI\n self._root = Tkinter.Tk() \n self._root.title(\"inetconnection\")\n self._stop_button = Tkinter.Button(self._root, text=\"Quit\", command = self.ende)\n self._start_button = Tkinter.Button(self._root, text=\"Start\", command = self.check_inet_connection)\n self._start_button.pack()\n self._stop_button.pack()\n self._label = Tkinter.Label(self._root, text=\"bitte auf Start klicken\")\n self._label.pack()\n self._root.mainloop()\n \n def check_inet_connection(self): # prueft jede Minute die Internetverbindung, schreibt bei Aenderung Ergebnis in Datei und zaehlt Online- und Offline-Minuten\n if not self._stop:\n self._label.config(text = \"Programm lauft...\")\n with open(self._logdatei, \"a\") as datei:\n jetzt = datetime.datetime.now()\n inet_neu = connected()\n if inet_neu:\n if inet_neu != self._inet:\n datei.write(jetzt.strftime(\"%H:%M:%S - Internet da\\n\"))\n self._online = self._online + 1\n else:\n if inet_neu != self._inet:\n datei.write(jetzt.strftime(\"%H:%M:%S - Internet weg\\n\"))\n self._offline = self._offline + 1\n self._inet = inet_neu\n self._root.after(60000, self.check_inet_connection)\n\n def ende(self): # beendet die Schleife zur Pruefung der Interntverbindung, schreibt gesamte Online- und Offline-Zeit in Datei\n self._stop = True\n jetzt = datetime.datetime.now()\n with open(self._logdatei, \"a\") as datei:\n datei.write(jetzt.strftime(\"%H:%M:%S - Ende\\n\"))\n self._onlineprozent = round(self._online / (self._online + self._offline) * 100, 1)\n self._offlineprozent = round(self._offline / (self._online + self._offline) * 100, 1)\n datei.write(\"online: {} Minuten ({}%), offline: {} Minuten ({}%)\\n\".format(self._online, self._onlineprozent, self._offline, self._offlineprozent))\n self._label.config(text = \"Programm kann geschlossen werden\")\n\n\ndef main():\n a = Programmdurchlauf(\"inetconnection.txt\")\n a.gui_erstellen()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"inetconnection.pyw","file_name":"inetconnection.pyw","file_ext":"pyw","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"194749448","text":"from os import system\n\n#definition d'une fonction qui enleve tout le texte du terminal\ndef clear():\n '''clear console'''\n return system('cls')\n\nw, h = 7, 6\n#w : largeur du tableau\n#h : hauteur du tableau\ncase_vide = '.'\nespacement_cases_v = ' '\n\nMatrix = [[case_vide for x in range(w)] for y in range(h)] \npion = 'X'\n\n\nwhile True:\n\n colonne_choisit = int(input(\"quelle colonne ? \\nentrer un nombre de 1 à 7: \"))-1\n clear()\n\n for i in range(0,6):\n\n #print(\" le i est \" , i, 'et le matrix[i][col est ]',Matrix[i][colonne_choisit])\n if Matrix[i][colonne_choisit] == case_vide and i == 5:\n Matrix[5][colonne_choisit] = pion\n break\n \n if Matrix[0][colonne_choisit] == pion: \n print('you can\\'t overflow the board, that\\'s rude >:(')\n break\n \n if Matrix[i][colonne_choisit] != case_vide:\n Matrix[i-1][colonne_choisit] = pion\n break\n \n\n\n #recherche de gagné sur les colonnes\n for x in range(2):\n for y in range(6):\n if pion == Matrix[x][y] == Matrix[x+1][y] == Matrix[x+2][y] == Matrix[x+3][y]:\n print(\"pion:{} ={} = {} = {} = {}\".format(pion,Matrix[x][y],Matrix[x+1][y],Matrix[x+2][y],Matrix[x+3][y]))\n print(\"collonne\")\n gagné = True\n\n #recherche de gagné sur les lignes\n for x in range(6):\n for y in range(3):\n if pion == Matrix[x][y] == Matrix[x][y+1] == Matrix[x][y+2] == Matrix[x][y+3]:\n print(\"pion:{} ={} = {} = {} = {}\".format(pion,Matrix[x][y],Matrix[x][y+1],Matrix[x][y+2],Matrix[x][y+3]))\n print(\"ligne \")\n gagné = True\n #if pion == Matrix[5][0] == Matrix[4][0] == Matrix[3][0] == Matrix[2][0]:\n # print(pion,Matrix[5][0],Matrix[4][0])\n # print(\"ok cuumer\")\n \n#affiche le tableau \n for i in range(6):\n print(espacement_cases_v.join(map(str, Matrix[i])),) #convertie chaque caractere de matrix en str et le regroupe en une ligne séparé par 2espaces \n \n","sub_path":"puissance4_v3-test.py","file_name":"puissance4_v3-test.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"361108284","text":"import struct\n\nimport numpy as np\n\n\nDEFAULT_BLOCK_SIZE = 1 << 22 #: Decompressed block size in bytes, 4MiB\n\n\ndef validate(compression):\n \"\"\"\n Validate the compression string.\n\n Parameters\n ----------\n compression : str, bytes or None\n\n Returns\n -------\n compression : str or None\n In canonical form.\n\n Raises\n ------\n ValueError\n \"\"\"\n if not compression or compression == b'\\0\\0\\0\\0':\n return None\n\n if isinstance(compression, bytes):\n compression = compression.decode('ascii')\n\n compression = compression.strip('\\0')\n if compression not in ('zlib', 'bzp2', 'lz4', 'blsc', 'input'):\n raise ValueError(\n \"Supported compression types are: 'zlib', 'bzp2', 'lz4', 'blsc', or 'input'\")\n\n return compression\n\n\nclass Lz4Compressor:\n def __init__(self, block_api):\n self._api = block_api\n\n def compress(self, data):\n output = self._api.compress(data, mode='high_compression')\n header = struct.pack('!I', len(output))\n return header + output\n\n\nclass Lz4Decompressor:\n def __init__(self, block_api):\n self._api = block_api\n self._size = 0\n self._pos = 0\n self._buffer = b''\n\n def decompress(self, data):\n if not self._size:\n data = self._buffer + data\n if len(data) < 4:\n self._buffer += data\n return b''\n self._size = struct.unpack('!I', data[:4])[0]\n data = data[4:]\n self._buffer = bytearray(self._size)\n if self._pos + len(data) < self._size:\n self._buffer[self._pos:self._pos + len(data)] = data\n self._pos += len(data)\n return b''\n else:\n offset = self._size - self._pos\n self._buffer[self._pos:] = data[:offset]\n data = data[offset:]\n self._size = 0\n self._pos = 0\n output = self._api.decompress(self._buffer)\n self._buffer = b''\n return output + self.decompress(data)\n\n\nclass BloscCompressor:\n def __init__(self, blosc, typesize=1, clevel=1, shuffle='shuffle', cname='zstd', nthreads=1, blocksize=512*1024):\n self.blosc = blosc\n self.typesize = typesize # dtype size in bytes, e.g. 8 for int64\n self.clevel = clevel # compression level, usually only need lowest for zstd\n self.cname = cname # compressor name, default zstd, good performance/compression tradeoff\n if shuffle == 'shuffle':\n self.shuffle = blosc.SHUFFLE\n elif shuffle == 'bitshuffle':\n self.shuffle = blosc.BITSHUFFLE\n else:\n self.shuffle = blosc.NOSHUFFLE\n\n # These could someday be user-configurable\n blosc.set_nthreads(nthreads)\n blosc.set_blocksize(blocksize)\n\n #print(f'blosc configured with typesize {typesize}, shuffle {shuffle}, blocksize {blocksize/(1<<20):.2f} MB')\n\n\n def compress(self, data):\n if data.nbytes > 2147483631: # ~2 GB\n # This should never happen, because we compress in blocks that are 4 MiB\n raise ValueError(\"data blocks must be smaller than 2147483631 bytes due to internal blosc limitations\")\n if self.typesize == 'auto':\n this_typesize = data.itemsize\n else:\n this_typesize = self.typesize\n assert this_typesize != 1\n compressed = self.blosc.compress(data, typesize=this_typesize, clevel=self.clevel, shuffle=self.shuffle, cname=self.cname)\n header = struct.pack('!I', len(compressed))\n # TODO: this probably triggers a data copy, feels inefficient. Probably have to add output array arg to blosc to fix\n return header + compressed # bytes type\n\nimport time\nclass BloscDecompressor:\n tottime = 0.\n def __init__(self, blosc, nthreads=1):\n self.blosc = blosc\n self._size = 0\n self._pos = 0\n self._buffer = None\n self._partial_len = b''\n\n blosc.set_nthreads(nthreads)\n\n def decompress_into(self, data, out):\n bytesout = 0\n data = memoryview(data) # don't copy on slice\n while len(data):\n if not self._size:\n # Don't know the (compressed) length of this block yet\n if len(self._partial_len) + len(data) < 4:\n self._partial_len += data\n break # we've exhausted the data\n if self._partial_len:\n # If we started to fill a len key, finish filling it\n remaining = 4-len(self._partial_len)\n if remaining:\n self._partial_len += data[:remaining]\n data = data[remaining:]\n self._size = struct.unpack('!I', self._partial_len)[0]\n self._partial_len = b''\n else:\n # Otherwise just read the len key directly\n self._size = struct.unpack('!I', data[:4])[0]\n data = data[4:]\n \n if len(data) < self._size or self._buffer is not None:\n # If we have a partial block, or we're already filling a buffer, use the buffer\n if self._buffer is None:\n self._buffer = np.empty(self._size, dtype=np.byte) # use numpy instead of bytearray so we can avoid zero initialization\n self._pos = 0\n newbytes = min(self._size - self._pos, len(data)) # don't fill past the buffer len!\n self._buffer[self._pos:self._pos+newbytes] = np.frombuffer(data[:newbytes], dtype=np.byte)\n self._pos += newbytes\n data = data[newbytes:]\n \n if self._pos == self._size:\n start = time.perf_counter()\n n_thisout = self.blosc.decompress_ptr(memoryview(self._buffer), out.ctypes.data + bytesout)\n BloscDecompressor.tottime += time.perf_counter() - start\n bytesout += n_thisout\n self._buffer = None\n self._size = 0\n else:\n # We have at least one full block\n start = time.perf_counter()\n n_thisout = self.blosc.decompress_ptr(memoryview(data[:self._size]), out.ctypes.data + bytesout)\n BloscDecompressor.tottime += time.perf_counter() - start\n bytesout += n_thisout\n data = data[self._size:]\n self._size = 0\n\n return bytesout\n\n\ndef _get_decoder(compression, **kwargs):\n if compression == 'zlib':\n try:\n import zlib\n except ImportError:\n raise ImportError(\n \"Your Python does not have the zlib library, \"\n \"therefore the compressed block in this ASDF file \"\n \"can not be decompressed.\")\n return zlib.decompressobj()\n elif compression == 'bzp2':\n try:\n import bz2\n except ImportError:\n raise ImportError(\n \"Your Python does not have the bz2 library, \"\n \"therefore the compressed block in this ASDF file \"\n \"can not be decompressed.\")\n return bz2.BZ2Decompressor()\n elif compression == 'lz4':\n try:\n import lz4.block\n except ImportError:\n raise ImportError(\n \"lz4 library in not installed in your Python environment, \"\n \"therefore the compressed block in this ASDF file \"\n \"can not be decompressed.\")\n return Lz4Decompressor(lz4.block)\n elif compression == 'blsc':\n try:\n import blosc\n except ImportError:\n raise ImportError(\n 'blosc library not installed in your Python environment, '\n 'therefore the compressed block in this ASDF file '\n 'can not be decompressed. Install with: \"pip install python-blosc\"')\n return BloscDecompressor(blosc, **kwargs)\n else:\n raise ValueError(\n \"Unknown compression type: '{0}'\".format(compression))\n\n\ndef _get_encoder(compression, **kwargs):\n '''\n `compression` is the name of the compression,\n `typesize` is the size in bytes of the data type. This information is used\n to increase the effectiveness of the compression. Presently only used for `blosc`.\n '''\n if compression == 'zlib':\n try:\n import zlib\n except ImportError:\n raise ImportError(\n \"Your Python does not have the zlib library, \"\n \"therefore the block in this ASDF file \"\n \"can not be compressed.\")\n return zlib.compressobj()\n elif compression == 'bzp2':\n try:\n import bz2\n except ImportError:\n raise ImportError(\n \"Your Python does not have the bz2 library, \"\n \"therefore the block in this ASDF file \"\n \"can not be compressed.\")\n return bz2.BZ2Compressor()\n elif compression == 'lz4':\n try:\n import lz4.block\n except ImportError:\n raise ImportError(\n \"lz4 library in not installed in your Python environment, \"\n \"therefore the block in this ASDF file \"\n \"can not be compressed.\")\n return Lz4Compressor(lz4.block)\n elif compression == 'blsc':\n try:\n import blosc\n except ImportError:\n raise ImportError(\n \"blosc library not installed in your Python environment, \"\n \"therefore the block in this ASDF file \"\n 'can not be compressed. Install with: \"pip install python-blosc\"')\n return BloscCompressor(blosc, **kwargs)\n else:\n raise ValueError(\n \"Unknown compression type: '{0}'\".format(compression))\n\n\ndef to_compression_header(compression):\n \"\"\"\n Converts a compression string to the four byte field in a block\n header.\n \"\"\"\n if not compression:\n return b''\n\n if isinstance(compression, str):\n return compression.encode('ascii')\n\n return compression\n\n\ndef decompress(fd, used_size, data_size, compression):\n \"\"\"\n Decompress binary data in a file\n\n Parameters\n ----------\n fd : generic_io.GenericIO object\n The file to read the compressed data from.\n\n used_size : int\n The size of the compressed data\n\n data_size : int\n The size of the uncompressed data\n\n compression : str\n The compression type used.\n\n Returns\n -------\n array : numpy.array\n A flat uint8 containing the decompressed data.\n \"\"\"\n buffer = np.empty((data_size,), np.uint8)\n\n compression = validate(compression)\n decoder = _get_decoder(compression, **global_decompression_options)\n\n i = 0\n for block in fd.read_blocks(used_size):\n if hasattr(decoder, 'decompress_into'):\n i += decoder.decompress_into(block, out=buffer[i:])\n else:\n decoded = decoder.decompress(block)\n if i + len(decoded) > data_size:\n raise ValueError(\"Decompressed data too long\")\n buffer.data[i:i+len(decoded)] = decoded\n i += len(decoded)\n\n if hasattr(decoder, 'flush'):\n decoded = decoder.flush()\n if i + len(decoded) > data_size:\n raise ValueError(\"Decompressed data too long\")\n buffer[i:i+len(decoded)] = decoded\n i += len(decoded)\n \n if hasattr(decoder, '_buffer'):\n assert decoder._buffer is None\n if i != data_size:\n raise ValueError(\"Decompressed data wrong size\")\n #print(BloscDecompressor.tottime)\n\n return buffer\n\n\nglobal_compression_options = {}\ndef set_compression_options(**kwargs):\n global global_compression_options\n global_compression_options = kwargs.copy()\n\nglobal_decompression_options = {}\ndef set_decompression_options(**kwargs):\n global global_decompression_options\n global_decompression_options = kwargs.copy()\n\n\ndef compress(fd, data, compression, block_size=DEFAULT_BLOCK_SIZE):\n \"\"\"\n Compress array data and write to a file.\n\n Parameters\n ----------\n fd : generic_io.GenericIO object\n The file to write to.\n\n data : buffer\n The buffer of uncompressed data.\n\n compression : str\n The type of compression to use.\n\n block_size : int, optional\n Input data will be split into blocks of this size (in bytes) before compression.\n \"\"\"\n compression = validate(compression)\n #if type(data) is np.memmap:\n # raise NotImplementedError(\"memmap doesn't know about itemsize!\") # TODO\n #print(type(data), data.itemsize)\n #if data.itemsize == 1:\n # raise ValueError(\"itemsize detection failed\") # TODO: propagate this information down in all cases!\n\n #typesize = data.itemsize\n #shuffle = 'bitshuffle'\n #if typesize == 8:\n # shuffle = 'bitshuffle'\n #print(f'using typesize {typesize}, shuffle {shuffle}')\n\n block_size = global_compression_options.pop('asdf_block_size', block_size)\n encoder = _get_encoder(compression, **global_compression_options)\n\n # We can have numpy arrays here. While compress() will work with them,\n # it is impossible to split them into fixed size blocks without converting\n # them to bytes.\n if isinstance(data, np.ndarray):\n #data = data.tobytes()\n data = memoryview(data.reshape(-1)) # TODO: is it okay to use a view instead of a copy here?\n\n nelem = block_size // data.itemsize\n for i in range(0, len(data), nelem):\n fd.write(encoder.compress(data[i:i+nelem]))\n if hasattr(encoder, \"flush\"):\n fd.write(encoder.flush())\n\n\ndef get_compressed_size(data, compression, block_size=DEFAULT_BLOCK_SIZE):\n \"\"\"\n Returns the number of bytes required when the given data is\n compressed.\n\n Parameters\n ----------\n data : buffer\n\n compression : str\n The type of compression to use.\n\n block_size : int, optional\n Input data will be split into blocks of this size (in bytes) before the compression.\n\n Returns\n -------\n bytes : int\n \"\"\"\n compression = validate(compression)\n encoder = _get_encoder(compression, typesize=data.itemsize)\n\n l = 0\n for i in range(0, len(data), block_size):\n l += len(encoder.compress(data[i:i+block_size]))\n if hasattr(encoder, \"flush\"):\n l += len(encoder.flush())\n\n return l\n","sub_path":"asdf/compression.py","file_name":"compression.py","file_ext":"py","file_size_in_byte":14505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"425027768","text":"import os\nimport sqlite3\nfrom urllib.parse import urlparse\n\nimport psycopg2\nfrom sqlalchemy import create_engine\n\nimport pandas as pd\n\n\ndef create_tables():\n commands = (\n '''\n BEGIN;\n DROP TABLE IF EXISTS review;\n DROP TABLE IF EXISTS job;\n DROP TABLE IF EXISTS employer;\n CREATE TABLE employer (\n id SERIAL,\n name character varying,\n description text,\n address character varying,\n city character varying,\n state character varying,\n zip integer,\n CONSTRAINT employer_pkey PRIMARY KEY (id)\n );\n CREATE TABLE job (\n id SERIAL,\n title character varying,\n description text,\n salary integer,\n employer_id integer,\n CONSTRAINT job_pkey PRIMARY KEY (id),\n CONSTRAINT fk_employer_id FOREIGN KEY (employer_id)\n REFERENCES public.employer (id) MATCH SIMPLE\n ON UPDATE NO ACTION\n ON DELETE NO ACTION\n );\n CREATE TABLE review\n (\n id SERIAL,\n review text,\n rating integer,\n title character varying,\n date date,\n status character varying,\n employer_id integer,\n CONSTRAINT review_pkey PRIMARY KEY (id),\n CONSTRAINT fk_employer_id FOREIGN KEY (employer_id)\n REFERENCES public.employer (id) MATCH SIMPLE\n ON UPDATE NO ACTION\n ON DELETE NO ACTION\n );\n '''\n )\n\n\n conn = None\n try:\n uri = os.environ['HEROKU_DB_URI']\n uri = urlparse(uri)\n db = f'dbname={uri.path[1:]} user={uri.username} password={uri.password} host={uri.hostname}'\n conn = psycopg2.connect(db)\n cur = conn.cursor()\n cur.execute(commands)\n cur.close()\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n\ndef insert_data():\n con = sqlite3.connect('db/jobs.sqlite')\n tables = ['employer', 'job', 'review']\n #uri = os.environ['HEROKU_DB_URI']\n uri = 'postgres://cgmmupxrjlnuan:8dc73c9cdd9fdff80dbbabbe9688cd3f6978c0e6122cc506be3f251e835af66d@ec2-54-197-48-79.compute-1.amazonaws.com:5432/d3pqa056b627vd'\n for table in tables:\n # Import table in a DataFrame\n query = f\"SELECT * FROM {table}\"\n df = pd.read_sql(query, con)\n \n # Discard id column of each table\n df = df.iloc[:, 1:]\n \n # Export DataFrame to table in postgresql database\n engine = create_engine(uri)\n df.to_sql(table, engine, index=False, if_exists='append')\n\n con.close()\n\nif __name__ == '__main__':\n create_tables()\n insert_data()\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"145219122","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 19 16:39:57 2015\n\n@author: adrien Bufort\n\"\"\"\n\n## Ok this time .. we save the script\n## We import the dataset\n\nimport pandas as pd\nimport numpy as np\nimport xgboost as xgb\nimport matplotlib.pyplot as plt\nfrom sklearn import ensemble\nfrom sklearn import preprocessing\n \n#%%\n# We import the data into proper format\ntrain = pd.read_csv(\"/Volumes/Allen/R/KaggleCrime/train.csv\")\ntest = pd.read_csv(\"/Volumes/Allen/R/KaggleCrime/test.csv\")\n\n#%%\n\n## We look at some value\nprint(train.head())\nprint(train.dtypes)\n\ndef getDays(i):\n Days = (float(i[5:7])-1)*30 + float(i[8:10])\n return(Days)\n \ndef getTimeInDays(i):\n TimeInDays = float(i[11:13])\n return(TimeInDays)\n \ndef getStreet(Adress):\n return(Adress.split()[len(Adress.split())-2])\n \ndef getTypeOfStreet(Adress):\n return(Adress.split()[len(Adress.split())-1])\n \n \ndef deleteNoStreet(trainStreet,unique,counts):\n uniqueToDelete = unique[counts < 1000]\n \n for i in uniqueToDelete:\n trainStreet[trainStreet == i] = 'other3'\n \n return(trainStreet)\n#%% \ndef deleteNameStreet(trainStreet,unique,counts):\n uniqueToDelete = unique[counts < 1000]\n \n for i in uniqueToDelete:\n trainStreet[trainStreet == i] = 'other2'\n \n return(trainStreet)\n#%%\nprint(train['Address'][1].split()[len(train['Address'][1].split())-1])\n\ntrainStreet = np.asarray(map(getTypeOfStreet,train.Address))\ntrainNameStreet = np.asarray(map(getStreet,train.Address))\n\nunique, counts = np.unique(trainStreet, return_counts=True)\n\ntrainStreet = deleteNoStreet(trainStreet,unique,counts)\n\n#%%\nunique, counts = np.unique(trainNameStreet, return_counts=True)\n\ntrainNameStreet = deleteNameStreet(trainNameStreet,unique,counts)\nprint(len(np.unique(trainNameStreet)))\n\n\n#%%\n\nprint(np.unique(trainNameStreet))\n\n#%%\n## We get some information about the period\ntrain_Days = np.asarray(map(getDays,train.Dates))\ntrain_TimeInDays = np.asarray(map(getTimeInDays,train.Dates)) \n\n\n\n#%%\n\n# Work on the data\ntrain_TimeInDaysCos = np.cos(train_TimeInDays*2*np.pi/23)\ntrain_TimeInDaysSin = np.sin(train_TimeInDays*2*np.pi/23)\n\ntrain_DaysCos = np.cos(train_Days*2*np.pi/365)\ntrain_DaysSin = np.sin(train_Days*2*np.pi/365)\n\ntest_Days = np.asarray(map(getDays,test.Dates))\ntest_TimeInDays = np.asarray(map(getTimeInDays,test.Dates)) \n\ntest_TimeInDaysCos = np.cos(test_TimeInDays*2*np.pi/23)\ntest_TimeInDaysSin = np.sin(test_TimeInDays*2*np.pi/23)\n\ntest_DaysCos = np.cos(test_Days*2*np.pi/365)\ntest_DaysSin = np.sin(test_Days*2*np.pi/365)\n\n#%%\n\ntrain['DaysCos'] = train_DaysCos\ntrain['TimeInDaysCos'] = train_TimeInDaysCos\ntrain['DaysSin'] = train_DaysSin\ntrain['TimeInDaysSin'] = train_TimeInDaysSin\n\ntest['DaysCos'] = test_DaysCos\ntest['TimeInDaysCos'] = test_TimeInDaysCos\ntest['DaysSin'] = test_DaysSin\ntest['TimeInDaysSin'] = test_TimeInDaysSin\n\nprint(train.dtypes)\n\n#%% Some functon here\n\ndef plot_value(train_vl):\n train_vl.value_counts().plot(kind='bar')\n\ndef plotting_all(train_Location):\n train_Location_Cat = train_Location.Category.unique()\n plt.plot()\n \n for i in range(len(train_Location_Cat)):\n plt.plot(train_Location.X[train_Location.Category == train_Location_Cat[i]],train_Location.Y[train_Location.Category == train_Location_Cat[i]],'o')\n\ndef converToInt(trainCat):\n Encode = preprocessing.LabelEncoder()\n valuesEncode = Encode.fit_transform(trainCat)\n return(valuesEncode)\n \ndef postprocessing(pred):\n\n nrow = pred.shape[0]\n \n for i in range(nrow):\n pred[i,] = normalize(pred[i,])\n \n return(pred)\n \ndef normalize(row):\n row[row < 0.001] = 0.001 \n row = row / sum(row) # The initiale value\n return(row)\n \n\n#%%\n\nplot_value(train.PdDistrict)\n\n\n#%%\n\ntrain_new = train\ntest_new = test\n\n#plt.plot(train_new.X,train_new.Y,'o')\n#plt.plot()\nd = {'X' : train_new.X,'Y' : train_new.Y,'Category' : train_new.Category}\ntrain_Location = pd.DataFrame(d)\n\n#%% Ok now we are getting serious : we will use xgboost and randomForest to\n# make prediction\n\ntrainForTraining = train_new.drop(['Dates','Descript','Resolution','Address'],axis=1)\n\n#inter_Districts = pd.get_dummies(train_new[['PdDistrict']])\n#inter_DayOfWeek = pd.get_dummies(train_new[['DayOfWeek']])\n#inter_Street = pd.get_dummies(trainStreet)\ninter_NameStreet = converToInt(trainNameStreet)\n\n#trainForTraining['DayOfWeek'] = converToInt(trainForTraining['DayOfWeek'])\ntrainForTraining['PdDistrictX'] = converToInt(trainForTraining['PdDistrict'])\ntrainForTraining['Category'] = converToInt(trainForTraining['Category'])\ntrainForTraining['NameStreet'] = converToInt(trainNameStreet)\ntrainForTraining['Street'] = converToInt(trainStreet)\ntrainForTraining['DayOfWeek'] = converToInt(train_new[['DayOfWeek']])\n\n#%%\n#trainForTraining = pd.concat([trainForTraining, inter_Districts], axis=1)\n#trainForTraining = pd.concat([trainForTraining, inter_DayOfWeek], axis=1)\n#trainForTraining = pd.concat([trainForTraining, inter_Street], axis=1,join='inner')\n\nprint(trainForTraining.dtypes)\n#%%\n\ntrainForTraining = trainForTraining.drop(['PdDistrict','DayOfWeek'],axis=1)\n\n#testForTraining = test_new.drop(['Dates','Address','Id'],axis=1)\n#testForTraining['DayOfWeek'] = converToInt(testForTraining['DayOfWeek'])\n#testForTraining['PdDistrict'] = converToInt(testForTraining['PdDistrict'])\n\nprint(trainForTraining.dtypes)\n\nprint(trainForTraining.shape)\n## We add the information about the days\n#%%\n\n## We make a resampling here\n\ntrain_X = trainForTraining.drop('Category',axis=1)\ntrain_Y = trainForTraining.Category.values \n\n\n\n#test_X = testForTraining\n\nprint(train_X.dtypes)\n#%%\n\nprint(np.where(np.isnan(train_X.as_matrix()) == True))\n\n\n#%%\nxg_train = xgb.DMatrix( train_X, label=train_Y)\n\n\n#%%\n# setup parameters for xgboost\nparam = {}\n# use softmax multi-class classification\nparam['objective'] = 'multi:softprob'\n# scale weight of positive examples\nparam['eta'] = 0.5\nparam['max_depth'] = 6\nparam['silent'] = 1\nparam['nthread'] = 2\nparam['num_class'] = 39\nparam['eval_metric'] = 'mlogloss'\nparam['subsample'] = 0.5\nparam['colsample_bytree'] = 0.5\n\n\nnum_round = 10\n\nxgb.cv(param, xg_train, num_round,nfold = 2,seed = 0)\n\n#%%\nbst_train = xgb.train(param,xg_train,num_round)\n\n#%%\npred = bst_train.predict(xg_test)\n\n#%%\nprint(pred)\npred = postprocessing(pred)\n## In this part we estimate out error for this data set\n\n#%% We know what's going on here\n\nd = {columns[i] : pred[:,i] for i in range(pred.shape[1])}\nd['Id'] = test_new['Id']\n\ntest_sample = pd.DataFrame(d)\n\ntest_sample.to_csv(\"xgboostSubmission.csv\")\n\n#%% Here we try a random forest classifer\n\n# Initialization\nrf = ensemble.RandomForestClassifier(n_jobs=-1, n_estimators=20,max_depth=6)\n\n# Now we try something new\nrf.fit(train_X,train_Y)\n\n#%%\npred = rf.predict_proba(train_X)\n\n#%%\n\n# We have to modify the train_Y (to see the order)\nprint(train_Y)\nprint(train_new.Category)\nY = pd.get_dummies(train_new[['Category']])\n\nprint(Y.shape)\nprint(pred.shape)\n\n#%%\ndef mLogErrorLine(x1):\n p = np.abs(x1)\n\n return(np.sum(p*np.log(p)))\n \n\ndef mLogError(pred,Y):\n diff = (pred - Y).as_matrix()\n diff[diff < 0.001] = 0.001\n mError = 0\n\n for i in range(pred.shape[0]):\n\n mError = mError + mLogErrorLine(diff[i,:])\n\n mError_tot = np.sum(mError) / pred.shape[0]\n print(mError_tot)\n \n return(mError_tot)\n \nmLogError(pred,Y)\n\n#%% Now we can do ... I have no idea what to do next ...\n\n\n\n\n\n\n","sub_path":"SF_crime/XgboostTesting.py","file_name":"XgboostTesting.py","file_ext":"py","file_size_in_byte":7411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"594036326","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\"\"\"\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\n\nimport logging\n\nfrom p2p0mq.constants import HEART_BEAT_INTERVAL, HEART_BEAT_MAX_INTERVAL, HEART_BEAT_SLOW_DOWN\n\nlogger = logging.getLogger('p2p0mq.peer')\n\n\n# The peer was created but no connection attempt has ben made.\nINITIAL = 1\n# We connected the socket and we have send the hello message.\nCONNECTING = 2\n# The hello packet was acknowledged.\n# The heart beat is returned in a timely fashion.\nCONNECTED = 3\n# We cannot connect to this peer directly but we can\n# send messages to it by using the via parameter.\nROUTED = 4\n# The heart beat was not returned in time. We were at some\n# point in the past connected to this peer.\nUNREACHABLE = -1\n# We attempted a connection to this peer and we failed.\n# Unreachable peers will decay to this state after some time.\nNO_CONNECTION = -2\n\n\nclass Peer(object):\n \"\"\"\n A peer represented in our application.\n\n Attributes:\n uuid:\n The unique identification for this peer.\n host (str):\n The host where this peer resides.\n port (int):\n The port of this peer on the host.\n db_id:\n The id of the peer in the database.\n conn_state:\n The state of the connection with this peer. Can be;\n\n * *INITIAL* (1): the state of the peer upon creation;\n * *CONNECTING* (2): after the socket was connected and a\n hello message has been send to the peer;\n * *CONNECTED* (3): the client has returned the hello message\n and heart-beats are returned in aa timely fashion;\n * *ROUTED* (4): we can reach this peer but we need to use\n the via option;\n * *UNREACHABLE* (-1): The heart beat was not returned in time.\n We were at some point in the past connected to this peer;\n * *NO_CONNECTION* (-2): We attempted a connection to this peer and\n we failed. Unreachable peers will decay to this state after\n some time.\n\n via:\n When the state is *ROUTED* the messages destined to this peer\n will be sent to this address.\n\n next_heart_beat_time:\n The time when next heart beat has been scheduled.\n last_heart_beat_time:\n Last time we have received a heart beat from this peer.\n slow_heart_beat_down:\n When we're not seeing replies from a peer we gradually\n increase the time between heart beats. This parameter\n holds the number of seconds to increase at next faaailure.\n next_ask_around_time:\n Next time when we're going to ask peers about this peer.\n last_ask_around_time:\n Last time we have asked about this peer.\n\n \"\"\"\n def __init__(self, uuid=None, host=None, port=None, db_id=None):\n \"\"\"\n Constructor.\n\n Arguments:\n uuid:\n The unique identification for this peer.\n host (str):\n The host where this peer resides.\n port (int):\n The port of this peer on the host.\n db_id:\n The id of the peer in the database.\n \"\"\"\n super(Peer, self).__init__()\n self.uuid = uuid\n self.host = host\n self.port = port\n self.db_id = db_id\n self.conn_state = INITIAL\n self.via = None\n\n # Indicates the responsiveness of the peer.\n self.next_heart_beat_time = None\n self.last_heart_beat_time = None\n self.slow_heart_beat_down = 0\n\n # Don't ask too often about missing peers.\n self.next_ask_around_time = None\n self.last_ask_around_time = None\n\n def __str__(self):\n return \"Peer(%r)\" % self.uuid\n\n def __repr__(self):\n return \"Peer(uuid=%r, host=%r, port=%r, db_id=%r, nhbt=%r, lhbt=%r, \" \\\n \"state=%r, via=%r)\" % (\n self.uuid, self.host, self.port, self.db_id,\n self.next_heart_beat_time, self.last_heart_beat_time,\n self.state, self.via\n )\n\n def __hash__(self):\n return hash(self.uuid)\n\n @property\n def address(self):\n return self.host if self.port is None else 'tcp://%s:%d' % (\n self.host, self.port)\n\n @property\n def state_initial(self):\n \"\"\" The peer was created but no connection attempt has ben made. \"\"\"\n return self.conn_state == INITIAL\n\n @state_initial.setter\n def state_initial(self, value):\n \"\"\" The peer was created but no connection attempt has ben made. \"\"\"\n self.conn_state = INITIAL\n\n @property\n def state_connecting(self):\n \"\"\" We connected the socket and we have send the hello message. \"\"\"\n return self.conn_state == CONNECTING\n\n @state_connecting.setter\n def state_connecting(self, value):\n \"\"\" We connected the socket and we have send the hello message. \"\"\"\n self.conn_state = CONNECTING\n\n @property\n def state_connected(self):\n \"\"\" The hello packet was acknowledged. The heart beat is returned in\n a timely fashion. \"\"\"\n return self.conn_state == CONNECTED\n\n @state_connected.setter\n def state_connected(self, value):\n \"\"\" The hello packet was acknowledged. The heart beat is returned in\n a timely fashion. \"\"\"\n self.conn_state = CONNECTED\n\n @property\n def state_routed(self):\n \"\"\" We cannot connect to this peer directly but we can\n send messages to it by using the via parameter. \"\"\"\n return self.conn_state == ROUTED\n\n @state_routed.setter\n def state_routed(self, value):\n \"\"\" We cannot connect to this peer directly but we can\n send messages to it by using the via parameter. \"\"\"\n self.conn_state = ROUTED\n\n @property\n def state_unreachable(self):\n \"\"\" The heart beat dis not returned in time. We were at some\n point in the past connected to this peer. \"\"\"\n return self.conn_state == UNREACHABLE\n\n @state_unreachable.setter\n def state_unreachable(self, value):\n \"\"\" The heart beat dis not returned in time. We were at some\n point in the past connected to this peer. \"\"\"\n self.conn_state = UNREACHABLE\n\n @property\n def state_no_connection(self):\n \"\"\" We attempted a connection to this peer and we failed.\n Unreachable peers will decay to this state after some time. \"\"\"\n return self.conn_state == NO_CONNECTION\n\n @state_no_connection.setter\n def state_no_connection(self, value):\n \"\"\" We attempted a connection to this peer and we failed.\n Unreachable peers will decay to this state after some time. \"\"\"\n self.conn_state = NO_CONNECTION\n\n @property\n def state(self):\n return Peer.state_to_string(self.conn_state)\n\n @staticmethod\n def state_to_string(state):\n if state == INITIAL:\n return 'INITIAL'\n elif state == CONNECTED:\n return 'CONNECTED'\n elif state == ROUTED:\n return 'ROUTED'\n elif state == UNREACHABLE:\n return 'UNREACHABLE'\n elif state == NO_CONNECTION:\n return 'NO CONNECTION'\n else:\n raise ValueError\n\n @property\n def needs_reconnect(self):\n \"\"\" Tell if this peer should be reconnected.\"\"\"\n return self.conn_state in (INITIAL, NO_CONNECTION, UNREACHABLE)\n\n @property\n def does_heart_beat(self):\n \"\"\" Tell if this peer is a valid destination for a\n heart-beat based on its state.\"\"\"\n return self.conn_state in (CONNECTED, ROUTED, UNREACHABLE)\n\n def reset_heart_beat(self, app):\n self.next_heart_beat_time = app.tick + HEART_BEAT_INTERVAL\n self.slow_heart_beat_down = 0\n self.last_heart_beat_time = app.tick\n\n def schedule_heart_beat(self, app):\n self.next_heart_beat_time = \\\n app.tick + HEART_BEAT_INTERVAL + self.slow_heart_beat_down\n self.slow_heart_beat_down = \\\n min(self.slow_heart_beat_down + HEART_BEAT_SLOW_DOWN,\n HEART_BEAT_MAX_INTERVAL)\n\n def become_connected(self, message, app):\n \"\"\"\n Sets the status of the peer based on the data in the message.\n\n The method is used by the heart-beat and connector concerns\n to update the state of the peer as part of the processing of\n incoming messages. We set the state to either CONNECTED or ROUTED\n based on the path the message arrived on and will reset\n the heart-beat timer.\n\n Arguments:\n message (Message):\n The message to inspect.\n app (LocalPeer):\n Manager instance.\n \"\"\"\n assert message.source == self.uuid\n if message.source == message.previous_hop:\n self.state_connected = True\n self.via = None\n logger.debug(\"%s is now a direct connection\", self)\n else:\n self.state_routed = True\n self.via = message.previous_hop\n logger.debug(\"%s is now a proxied connection\", self)\n self.reset_heart_beat(app)\n","sub_path":"p2p0mq/peer.py","file_name":"peer.py","file_ext":"py","file_size_in_byte":9171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"323421496","text":"import time\nfrom functools import wraps\n\"\"\" Including severial widgets oftenly used.\n\n@display_time: A decorator shows function's running time.\n\"\"\"\n\n\ndef display_time(function):\n @wraps(function)\n def function_timer(*args, **kwargs):\n t0 = time.time()\n result = function(*args, **kwargs)\n t1 = time.time()\n print(\"\" %\n (function.__name__, str((t1 - t0) * 1000)))\n return result\n return function_timer\n","sub_path":"Generate/widget.py","file_name":"widget.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"525306494","text":"class Person(object):\n def __init__(self, name, gender):\n self.name = name\n self.gender = gender\n def whoAmI(self):\n return 'I am a Person,my name is %s'%self.name\n\nclass Student(Person):\n def __init__(self, name, gender, score):\n super(Student, self).__init__(name, gender) # 初始化父类,否则,继承自 Person 的 Student 将没有 name 和 gender。\n self.score = score\n def whoAmI(self):\n return 'I am a Person,my name is %s'%self.name\n\nclass Teacher(Person):\n def __init__(self, name, gender, course):\n super(Teacher, self).__init__(name, gender)\n self.course = course\n def whoAmI(self):\n return 'I am a Person,my name is %s'%self.name\n\np = Person('Tom', 'Male')\ns = Student('Bob', 'Male', 88)\nt = Teacher('Alice', 'Female', 'English')\nprint(t.name)\nprint(t.course)\nprint(isinstance(p, Person))\nprint(isinstance(p, Student))\nprint(isinstance(p, Teacher))\nprint(isinstance(t, object))\nprint(type(s)) # get the var type\nprint(dir(s)) # get the var attr\n\ngetattr(s, 'name', 'not exit')\nsetattr(s, 'name', 'Adam') #set the new name\n \n","sub_path":"SPIDER/Project_bak/autotest/apitest/practice/extend_test.py","file_name":"extend_test.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"398588346","text":"import random\nimport sys\nimport time\nimport pygame\n\n# Check for initializing errors\ncheck_error = pygame.init()\nif check_error[1] > 0:\n print('(!) had {0} initializing errors, existing...'.format(check_error[1]))\n sys.exit(-1)\nelse:\n print('(+) Pygame successfully initialized!')\n\nplaySurface = pygame.display.set_mode((720, 460)) # Screen size\npygame.display.set_caption('Snake Game') # Title Windows (Game name)\n\n# colors\nred = pygame.Color(255, 0, 0) # Game over\ngreen = pygame.Color(0, 255, 0) # Snake\nblack = pygame.Color(0, 0, 0) # Score\nwhite = pygame.Color(255, 255, 255) # Background\nbrown = pygame.Color(165, 42, 42) # Food\n\n# Frames per Second\nfpsController = pygame.time.Clock()\n\n# Important variables (snake position)\nsnakePos = [100, 50] # snake position initial (X, Right Y, left)\nsnakeBody = [[100, 50], [90, 50], [80, 50]]\n# Random food position\nfoodPos = [random.randrange(1, 72)*10, random.randrange(1, 46)*10]\nfoodSpawn = True\n\ndirection = 'RIGHT' # Initialize to Right\nchange_to = direction # Chance to\n\nscore = 0\n\n\n# Game over function\ndef game_over():\n\n my_font = pygame.font.SysFont('monaco', 72)\n GOsurf = my_font.render('Game Over', True, red)\n GOrect = GOsurf.get_rect()\n GOrect.mid_top = (360, 15) # White Game over on middle top\n playSurface.blit(GOsurf, GOrect)\n show_score(0)\n pygame.display.update()\n time.sleep(5)\n pygame.quit() # Exit to pygame\n sys.exit() # Exit to console\n\n\n# Score game Function\ndef show_score(choice=1):\n\n sfont = pygame.font.SysFont('monaco', 24)\n ssurf = sfont.render('Score: {}'.format(score), True, black)\n srect = ssurf.get_rect()\n\n if choice == 1:\n srect.mid_top = (80, 10)\n else:\n srect.mid_top = (360, 120)\n playSurface.blit(ssurf, srect)\n\n\n# Main logic of game\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RIGHT or event.key == ord('d'):\n change_to = 'RIGHT'\n if event.key == pygame.K_LEFT or event.key == ord('a'):\n change_to = 'LEFT'\n if event.key == pygame.K_UP or event.key == ord('w'):\n change_to = 'UP'\n if event.key == pygame.K_DOWN or event.key == ord('s'):\n change_to = 'DOWN'\n if event.key == pygame.K_ESCAPE:\n pygame.event.post(pygame.event.Event(pygame.QUIT))\n\n # Validation of direction (validate direction prevents you\n # from going right and left at the same time and so on)\n if change_to == 'RIGHT' and not direction == 'LEFT':\n direction = 'RIGHT'\n if change_to == 'LEFT' and not direction == 'RIGHT':\n direction = 'LEFT'\n if change_to == 'UP' and not direction == 'DOWN':\n direction = 'UP'\n if change_to == 'DOWN' and not direction == 'UP':\n direction = 'DOWN'\n\n # Update snake position [X, Y] (Every food grows 10)\n if direction == 'RIGHT':\n snakePos[0] += 10\n if direction == 'LEFT':\n snakePos[0] -= 10\n if direction == 'UP':\n snakePos[1] -= 10\n if direction == 'DOWN':\n snakePos[1] += 10\n\n# Snake body mechanism\n snakeBody.insert(0, list(snakePos))\n if snakePos[0] == foodPos[0] and snakePos[1] == foodPos[1]:\n score += 1\n foodSpawn = False\n else:\n snakeBody.pop()\n\n # Food Spawn\n if foodSpawn is not True:\n foodPos = [random.randrange(1, 72)*10, random.randrange(1, 46)*10]\n foodSpawn = True\n\n # background color\n playSurface.fill(white)\n\n # Draw Snake\n for pos in snakeBody:\n pygame.draw.rect(playSurface, green, pygame.Rect(pos[0], pos[1], 10, 10))\n\n # Draw Food\n pygame.draw.rect(playSurface, brown, pygame.Rect(foodPos[0], foodPos[1], 10, 10))\n\n # Limit game will be game over when pass the wall\n if snakePos[0] > 710 or snakePos[0] < 0:\n game_over()\n if snakePos[1] > 460 or snakePos[1] < 0:\n game_over()\n\n # Self hit\n for block in snakeBody[1:]:\n if snakePos[0] == block[0] and snakePos[1] == block[1]:\n game_over()\n\n # Common stuff\n show_score()\n pygame.display.update()\n fpsController.tick(23) # Speed of snake\n","sub_path":"PygameProjects/pyGame_cursos/Udemy - Python Game Development : Creating a Snake Game/Snake_Game.py","file_name":"Snake_Game.py","file_ext":"py","file_size_in_byte":4319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"516525139","text":"#encoding: utf-8\n'''\nCreated on 14 oct. 2016\n\n@author: andre\n'''\nimport numpy as np\nfrom neurons.neuronal import *\n\n# Ejemplo XOR function\ndef funcionXOR(nhs=[4],w=None,N=0.5,M=0.2,imprimir=True):\n conjunto_entrenamiento_xor = [\n [[0,0], [0]],\n [[0,1], [1]],\n [[1,0], [1]],\n [[1,1], [0]]\n ]\n # Atributos esenciales\n ni = len(conjunto_entrenamiento_xor[0][0])\n no = len(conjunto_entrenamiento_xor[0][1])\n\n # Creamos una red neuronal\n n = RedNeuronal(ni,nhs,no,w)\n # Entrenamiento de la red neuronal con el algoritmo de retropropagación\n n.entrenamiento(conjunto_entrenamiento_xor, 1000, N, M, imprimir)\n # Testeo del resultado\n salidas = n.test(conjunto_entrenamiento_xor, imprimir)\n errores = n.testError(conjunto_entrenamiento_xor, imprimir)\n \n return salidas, errores\n\n\n# Ejemplo SUM function\ndef funcionSUM(nhs=[5],w=None,N=0.1,M=0.1,imprimir=True):\n # Dos sumandos aleatorios\n sumandos = np.random.uniform(-0.5,0.5, (10,2))\n objetivo = (sumandos[:,0] + sumandos[:,1]).reshape(10,1)\n conjunto_entrenamiento_sum = list(zip(sumandos,objetivo))\n \n # Atributos esenciales\n ni = len(conjunto_entrenamiento_sum[0][0])\n no = len(conjunto_entrenamiento_sum[0][1])\n\n # Creamos una red neuronal\n n = RedNeuronal(ni,nhs,no,w)\n # Entrenamiento de la red neuronal con el algoritmo de retropropagación\n n.entrenamiento(conjunto_entrenamiento_sum, 1000, N, M, imprimir,tangente=True)\n \n # Testeo del resultado\n sumandos2 = np.random.uniform(-0.5,0.5, (10,2))\n objetivo2 = (sumandos2[:,0] + sumandos2[:,1]).reshape(10,1)\n conjunto_pruebas_sum = list(zip(sumandos2,objetivo2))\n salidas = n.test(conjunto_pruebas_sum, imprimir)\n errores = n.testError(conjunto_pruebas_sum, imprimir)\n \n return salidas, errores\n\n\n\nif __name__ == '__main__':\n funcionXOR()\n funcionSUM()","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"383629269","text":"from flaskblog import create_app\nfrom flaskblog.models import Tag\nfrom flaskblog import db\n\napp = create_app()\n\n@app.context_processor\ndef context_processor():\n tags = Tag.query.all()\n for tag in tags:\n if not tag.posts:\n db.session.delete(tag)\n db.session.commit()\n return dict(tags=tags)\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"384588405","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndef plot_stop_curves(early,early_err,late,late_err,uniform,uniform_err,title=\"\",observed=None,save=False,path=None):\n\tssds = [200,250,300,350,400]\n\tplt.xlabel(\"Stop Signal Delay (ms)\")\n\n\tplt.ylabel(\"P(Stop)\")\n\tplt.title(title)\n\tplt.xlim(200,400)\n\tplt.ylim(0,)\n\n\tif not(observed):\n\t\tplt.errorbar(ssds, early, yerr=early_err,color='g')\n\t\tplt.errorbar(ssds, late, yerr=late_err,color='b')\n\t\tplt.errorbar(ssds, uniform, yerr=uniform_err,color='r')\n\t\t\n\t\tplt.plot(ssds,early, label = \"early\", color='g')\n\t\tplt.plot(ssds,uniform, label = \"uniform\",color='r')\n\t\tplt.plot(ssds,late, label = \"late\", color='b')\n\n\tif observed:\n\t\t#err = [0.012, 0.024, 0.036, 0.022, 0.022] #uniform\n\t\t#sim_err = [0.016287, 0.02001158, 0.02256082, 0.01706919, 0.01224172] #uniform\n\t\t#err = [0.013,0.032, 0.028, 0.018, 0.014] #early\n\t\t#sim_err = [0.024, 0.020, 0.025, 0.014, 0.012] #early \n\t\tplt.errorbar(ssds, observed, yerr=err,color='g')\n\t\tplt.errorbar(ssds,early, yerr=sim_err,color='b')\n\t\tplt.plot(ssds,observed, label = 'observed', color='g')\n\t\tplt.plot(ssds,uniform, label = 'uniform', color='b')\n\t\n\tplt.xticks(ssds)\n\n\tplt.legend(loc='upper right')\n\n\tif save:\n\t\tplt.savefig(path)\n\t\n\tplt.show()\n\ndef plot_vs_observed(simulated,sim_err,observed,observed_err,save=False,path=None,name=\"Model\",title=\"\"):\n\tssds = [200,250,300,350,400]\n\tplt.xlabel(\"Stop Signal Delay (ms)\")\n\n\tplt.ylabel(\"P(Stop)\")\n\n\tplt.title(title)\n\n\tplt.xlim(200,400)\n\tplt.ylim(0,)\n\n\n\tplt.errorbar(ssds, observed, yerr=observed_err,color='g')\n\tplt.errorbar(ssds,simulated, yerr=sim_err,color='b')\n\tplt.plot(ssds,observed, label = 'observed', color='g')\n\tplt.plot(ssds,simulated, label = name, color='b')\n\t\n\tplt.xticks(ssds)\n\n\tplt.legend(loc='upper right')\n\n\tif save:\n\t\tplt.savefig(path)\n\t\n\tplt.show()\n\n\ndef plot_all(sim_e,sim_e_err,e,e_err,sim_u,sim_u_err,u,u_err,sim_l,sim_l_err,l,l_err):\n\tssds = [200,250,300,350,400]\n\tplt.xlabel(\"Stop Signal Delay (ms)\")\n\n\tplt.ylabel(\"P(Stop)\")\n\n\tplt.xlim(200,400)\n\tplt.ylim(0,)\n\n\n\tplt.plot(ssds,sim_e, color=\"blue\", label=\"Early\")\n\tplt.errorbar(ssds,sim_e, yerr=sim_e_err,color=\"blue\")\n\n\tplt.plot(ssds,e,color=\"blue\",linestyle=\"--\")\n\tplt.errorbar(ssds, e, yerr=e_err, linestyle=\"--\", color=\"blue\")\n\n\tplt.plot(ssds,sim_u,color=\"red\",label=\"Uniform\")\n\tplt.errorbar(ssds,sim_u, yerr=sim_u_err,color=\"red\")\n\n\tplt.plot(ssds,u,color=\"red\",linestyle=\"--\")\n\tplt.errorbar(ssds, u, yerr=u_err, linestyle=\"--\", color=\"red\")\n\n\tplt.plot(ssds,sim_l, color=\"black\", label=\"Late\")\n\tplt.errorbar(ssds,sim_l, yerr=sim_l_err,color=\"black\")\n\n\tplt.plot(ssds,l,color=\"black\",linestyle=\"--\")\n\tplt.errorbar(ssds, l, yerr=l_err, linestyle=\"--\", color=\"black\")\n\t#plt.errorbar(ssds, sim_u, yerr=sim_u_err, label = \"Uniform (model)\")\n\t#plt.errorbar(ssds, u, yerr=u_err, label = \"Uniform (observed)\")\n\t#plt.errorbar(ssds, sim_l, yerr=sim_l_err, label = \"Late (model)\")\n\t#plt.errorbar(ssds, l, yerr=l_err, label = \"Late (observed)\")\n\n\t#plt.plot(ssds,simulated, label = name, color='b')\n\t\n\tplt.xticks(ssds)\n\n\tplt.legend(loc='upper right')\n\n\t#if save:\n\tplt.savefig('graphs/stops_all_no_learning.png')\n\t\n\tplt.show()\n\n\nif __name__ == \"__main__\":\n\tsim_unif = [0.892,0.739,0.527,0.162,0.058]#[0.90197238, 0.76628698, 0.51113624, 0.15731108, 0.06667802] #uniform\n\tobserved_unif = [0.9725, 0.87, 0.365, 0.105, 0.0425] #uniform\n\terr_uniform = [0.012, 0.024, 0.036, 0.022, 0.022] #uniform\n\tsim_err_uniform = [0.009,0.014,0.015,0.009,0.006]#[0.016287, 0.02001158, 0.02256082, 0.01706919, 0.01224172] #uniform\n\n\tobserved_early = [.955,.805,.295,.07,.0375] #early\n\tsim_early = [0.8803, 0.729, 0.478, 0.159, 0.0642] #early\n\terr_early = [0.013,0.032, 0.028, 0.018, 0.014] #early\n\tsim_err_early = [0.017,0.019,0.024,0.0186,0.013]#[0.009,0.016,0.014,0.006,0.005] #early \n\n\tobserved_late = [0.967, 0.893, 0.516, 0.220, 0.121]\n\terr_late = [0.012, 0.027, 0.048, 0.040, 0.029]\n\tsim_late = [0.91478651,0.827,0.562,0.207,0.117]#[0.924,0.819,0.632,0.263,0.129]\n\tsim_err_late = [0.0117,0.0208,0.030,0.01788,0.0177]#[0.008,0.012,0.024,0.015,0.009] \n\n\n\t#plot_vs_observed(sim_late,sim_err_late,observed_late,err_late,save=True,path=\"graphs/late_stops.png\",name=\"model\",title=\"Late\")\n\n\t#plot_all(sim_early,sim_err_early,observed_early,err_early,sim_unif,sim_err_uniform,observed_unif,err_uniform,sim_late,sim_err_late,observed_late,err_late)\n\tplot_stop_curves(sim_early,sim_err_early,sim_late,sim_err_late,sim_unif,sim_err_uniform,title=\"Stop Curves (models)\", save=True,path=\"graphs/stop_curves_models_learning_only.png\")","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":4541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"232233750","text":"import boto3\nimport botocore.exceptions\nfrom sys import stderr\n\n\n_CLIENTS = {}\n\n\ndef get_client(service, region=None, arn=None, session_name=None):\n \"\"\"Return (cached) boto3 clients for this service and this region\"\"\"\n if arn is not None and session_name is not None:\n try:\n credentials = boto3.Session(region_name=region).client('sts').assume_role(RoleArn=arn, RoleSessionName=session_name)\n if (service, region) not in _CLIENTS:\n _CLIENTS[(service, region)] = boto3.Session(region_name=region,\n aws_access_key_id=credentials[\"Credentials\"][\"AccessKeyId\"],\n aws_secret_access_key=credentials[\"Credentials\"][\"SecretAccessKey\"],\n aws_session_token=credentials[\"Credentials\"][\"SessionToken\"]).client(service)\n return _CLIENTS[(service, region)]\n except botocore.exceptions.ClientError as err:\n print(err, file=stderr)\n if (service, region) not in _CLIENTS:\n _CLIENTS[(service, region)] = boto3.Session(region_name=region).client(service)\n return _CLIENTS[(service, region)]\n","sub_path":"aws_list_all/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"174302569","text":"#!usr/bin/env python \n#-*- coding:utf-8 _*- \n\"\"\" \n@author:Administrator \n@file: pick.py \n@time: 2018/07/30 \n\"\"\"\nimport json\n\nd = dict(name='Bob', age=20, score=80)\nstr1 = json.dumps(d)\nprint('序列化后%s'% str1)\n\nstr2 = json.loads(str1)\nprint('反序列化后%s' % str2)\n\nobj = dict(name='小明明', age=18)\ns = json.dumps(obj, ensure_ascii=False)\nprint(s)","sub_path":"pick.py","file_name":"pick.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"125984367","text":"import json\nimport pytest\n\nfrom pathlib import Path\nfrom ..utils import provit_uri, load_jsonld, walk_up\n\n\ndef test_provit_uri():\n assert (\n provit_uri(\"test123abc\") == \"http://vocab.ub.uni-leipzig.de/provit/test123abc\"\n )\n\n\ndef test_load_jsonld(tmp_path):\n test_file = tmp_path / \"test_123\"\n assert load_jsonld(test_file) == (None, None)\n test_file.touch()\n assert load_jsonld(test_file) == (None, None)\n with open(test_file, \"w\") as tfile:\n json.dump([], tfile)\n assert load_jsonld(test_file) == (None, None)\n with open(test_file, \"w\") as tfile:\n json.dump([1, 2, 3], tfile)\n with pytest.raises(IOError):\n load_jsonld(test_file)\n\n\ndef test_walk_up(tmp_path):\n str_path = str(tmp_path.resolve()).split(\"/\")\n for p, path in enumerate(walk_up(tmp_path)):\n if p != len(str_path) - 1:\n assert path == \"/\".join(str_path[: len(str_path) - p])\n else:\n assert path == \"/\"\n","sub_path":"provit/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"84310354","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef benchi_f(x):\n n=int(x.shape[0]) #xの次元\n out=0\n for i in range(n):\n out = out + x[i]**2\n return out\n\n\nx = np.arange(-5,5,0.01)\ny = np.arange(-5,5,0.01) \n#X = np.c_[x, y].T\nX = np.meshgrid(x,y)\nZ=np.zeros(1000)\nfor i in range(1000):\n Z[i] = benchi_f(X[:,i])\nfig = plt.figure()\nax = Axes3D(fig)\nax.plot_wireframe(X[0,:],X[1,:],Z)\nplt.show()","sub_path":"連続/PSO/test_plot.py","file_name":"test_plot.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"357548237","text":"class Solution(object):\n def longest_common_prefix(self, a,b):\n i = 0\n while i < len(a) and i < len(b) and a[i] == b[i]:\n i += 1\n\n return i\n def wordsAbbreviation(self, words):\n \"\"\"\n :type dict: List[str]\n :rtype: List[str]\n \"\"\"\n\n\n from collections import defaultdict\n groups = defaultdict(list)\n\n abbr_words = [\"\"] * len(words)\n\n for index, word in enumerate(words):\n key = (word[0] , len(word) , word[-1])\n groups[key].append((word, index))\n\n for key, value in groups.items():\n first, size, last = key\n\n value.sort()\n\n # find max lcp\n lcp = [0] * len(value)\n\n for i in range(1, len(value)):\n curr_group_word = value[i][0]\n prev_group_word = value[i-1][0]\n lcp[i] = self.longest_common_prefix(curr_group_word, prev_group_word)\n lcp[i-1] = max(lcp[i-1],lcp[i])\n\n # abbreviate\n for i, (group_word, index) in enumerate(value):\n if size - lcp[i] <= 3:\n abbr_words[index] = group_word\n else:\n abbr_words[index] = group_word[:lcp[i]+1] + str(len(group_word) - lcp[i] -2) + group_word[-1]\n\n\n return abbr_words\n\n\nwords = [\"like\", \"god\", \"internal\", \"me\", \"internet\", \"interval\", \"intension\", \"face\", \"intrusion\"]\nS = Solution()\nprint(words)\nprint(S.wordsAbbreviation(words))\n","sub_path":"datastructures_algorithms/String/String-WordAbbreviation.py","file_name":"String-WordAbbreviation.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"187445352","text":"import os, re\nfrom utilities3 import basic\npath = os.path.dirname(basic.__file__)\nfrom Bio import SeqIO\nfrom Bio.Alphabet import IUPAC\nfrom Bio.Data.CodonTable import unambiguous_dna_by_id as codon_table\n\n## reference feature table\nf_in = os.path.join( path, 'reference.ft')\ndata = basic.readcsv(f_in,sep='\\t')[1:]\nfeature_table = [ i for i in data if i[0]=='CDS' and i[-1]!='pseudo']\n\n## list of reference genes with no additional site to the reference genome\n# the sequences include the stop codon\nf_in = os.path.join( path, 'noadditionalsitegenes.txt')\nwith open(f_in) as flob:\n genes_wo_additionalsites = [ i.strip('\\n') for i in flob]\n\n## list of genes with same site in all refseq strain as the reference \n# the sequences include the stop codon\nf_in = os.path.join( path, 'samesitegenes.txt')\nwith open(f_in) as flob:\n genes_w_samesites = [ i.strip('\\n') for i in flob]\n\n## list of core genes from refseq strains with high nucleotide diversity\nf_in = os.path.join( path, 'highdivgenes.txt')\nwith open(f_in) as flob:\n refhdgenes = [ i.strip('\\n') for i in flob]\n\n## list of core genes from wgs strains with high nucleotide diversity\nf_in = os.path.join( path, 'raw_highdivgenes.txt')\nwith open(f_in) as flob:\n rawhdgenes = [ i.strip('\\n') for i in flob]\n\n## dict of sequence records of genes from the reference genome\nf_in = os.path.join( path, 'reference.fna')\nseqrecs = SeqIO.parse(f_in,'fasta',alphabet=IUPAC.unambiguous_dna)\n# regular expression object to extract gene name\nrc = re.compile('\\[gene=([a-z]{3}[A-Z]?)\\]')\n# function using above object to extract gene name from the header\ndef extractgenename(header):\n m = rc.search(header)\n if m is None: return \n else: gname = m.group(1) \n return gname\nseqrecord = {}\nfor rec in seqrecs:\n gname = extractgenename(rec.description)\n if gname is not None: \n seqrecord[gname] = rec.seq\n\n## Synonymous and Non-synonymous site count for all codons according to CodonTable(Bacteria)\ndef codonsitecount(ct = codon_table[11],nucs = 'ACGT',ixs=range(3)):\n table = ct.forward_table\n stops = ct.stop_codons\n codons = table.keys()\n out = { k:() for k in codons}\n for codon in codons:\n # corresponding amino acid\n aa = table[codon]\n\n # Get Synonymous Site Count\n # initialize list to hold fraction of synonymous changes at each site of a codon with zeros\n f = [ 0 for _ in ixs]\n # for every site in the codon\n for i in ixs:\n # produce all possible alternate codons 1 mutation away\n altc = [ ''.join( [ n if x == i else codon[x] \\\n for x in ixs]) for n in nucs if n != codon[i]]\n # corresponding amino acids for above codons\n alta = [ table[c] for c in altc if c not in stops]\n # fraction of above codons coding for same amino acid\n f[i] = float(alta.count(aa))/len(alta)\n S = round(sum(f),2)\n\n # Get non-synonymous site count\n N = 3 - S\n \n # Get count of 4-fold degenerate sites\n # alternate codons with 1 mutation at the last site\n altc = [ codon[:2]+n for n in nucs \\\n if n != codon[2]]\n # corresponding amino acids for above codons\n alta = [ table[c] for c in altc if c not in stops]\n # check if all 3 codons code for the same amino acid as original\n if alta.count(aa) == 3: \n F = 1\n else:\n F = 0\n out[codon] = (S,N,F)\n return out\n\n## sites count for all reference genes\ndef genesitecount(rec=seqrecord,sites=codonsitecount()):\n out = {}\n for k,v in rec.items():\n # nucleotide sequence\n nseq = str(v)[:-3]\n # length of the sequence OR total no. of sites\n L = len(nseq)\n # corresponding codon sequence\n cseq = [ nseq[x:x+3] for x in range(0,L,3)]\n # list of tuples of site counts for each codon\n counts = [ sites[codon] for codon in cseq]\n # transpose and sum to get total site count of each type\n (S,N,F) = ( round(sum(i),2) for i in zip(*counts))\n # recalculate N by subtracting S from total sites\n # to fix decimal errors\n N = round(L-S,2)\n out[k] = [L,S,N,F]\n return out\n\n### Code to make lists of TFs and target genes ###\ndef reg_genes():\n # paths\n fnet = os.path.join( path, 'network_tf_tu.txt')\n ftu = os.path.join( path, 'TUSet.txt')\n ftf = os.path.join( path, 'tf_exp_regulon.txt')\n\n # load data\n net = basic.readcsv(fnet,sep='\\t')\n net = [ [ i[0], i[1].split('[')[0]] for i in net]\n\n tus = basic.readcsv(ftu,sep='\\t')\n # convert above to dict of TU name and constituent genes\n tus = { i[1]:i[3].split(',') for i in tus}\n\n exptf = basic.readcsv(ftf,sep='\\t')\n # convert above to dict of TF name with gene names\n tf_gene = { i[1]:i[2] for i in exptf}\n\n ## generate TRN of genes\n trn = []\n for row in net:\n tf = tf_gene[ row[0]]\n tg = tus[ row[1]]\n trn.append( [ tf, tg])\n # remove duplicates\n trn = basic.dedup(trn)\n # remove those with no tf name\n trn = [ i for i in trn if i[0] != '']\n # make a nested list of tf and leader genes\n tfld = [ [ i[0], i[1][0]] for i in trn]\n # remove duplicates\n tfld = basic.dedup(tfld)\n # add new rows per gene for tfs with > 1 gene\n edata = []\n for row in tfld:\n if ',' in row[0]:\n comp = row[0].split(', ')\n for c in comp:\n entry = [ c, row[1]]\n edata.append( entry)\n else:\n entry = row\n edata.append(entry)\n # remove duplicate rows\n dedata = basic.dedup(edata)\n # TFs\n t1 = set( i[0] for i in dedata)\n # leader genes\n t2 = set( i[1] for i in dedata if i[1] not in t1)\n # other targets\n t3 = set( j for i in trn for j in i[1]\\\n if j not in (t1|t2))\n\n # Only keep genes known to have no additional sites\n fun = lambda x: x in genes_wo_additionalsites\n out = {'tf':t1,'leader':t2,'rest':t3}\n out = { k:list(filter(fun,v)) for k,v in out.items()}\n return out\n\n## Function to get E coli's TRN\n# in which TFs have been removed from TUs\ndef get_trn():\n ## regulonDB files\n fnet = os.path.join( path, 'network_tf_tu.txt')\n ftu = os.path.join( path, 'TUSet.txt')\n ftf = os.path.join( path, 'tf_exp_regulon.txt')\n\n # load network\n net = basic.readcsv( fnet, sep='\\t')\n # load info on TUs \n tus = basic.readcsv( ftu, sep='\\t')\n # load info on TFs\n tf_data = basic.readcsv( ftf, sep='\\t') \n\n # no. of rows in the network\n R = len(net)\n\n # extract tu names from the network\n tu_names = [ re.sub('\\[.+\\]','',i[1]) for i in net]\n\n # get targets' gene names corresponding to above tu names\n target_genes = [ 'NA' for _ in range(R)]\n for x,name in enumerate(tu_names):\n for row in tus:\n if name == row[1]:\n target_genes[x] = row[3]\n break\n\t\n # get gene names corresponding to the TF's present in the network\n tf_genes = ['NA' for _ in range(R)]\n for x in range(R):\n for row in tf_data:\n if net[x][0] == row[1]:\n tf_genes[x] = row[2]\n break\n\t\n # bring tf and targets together\n regmap = [ [ tf_genes[r], target_genes[r]] for r in range(R)]\n\t\n # remove entries with missing tf's gene names or segmented genes \n regmap = [ i for i in regmap if i[0] != '' and \\\n\t not any('_' in j for j in i) ]\n\t\n # remove duplicates & split by tfs\n regmap = { k:[ i[1].split(',') for i in v] for k,v in \\\n basic.split_data( basic.dedup(regmap), 0).items()}\n\n # remove tus which are contained in another tu\n regmap = { k:basic.rm_contained(v) for k,v in regmap.items()}\n \n # remove genes absent from the list of genes without additional sites\n regmap = { k:list( filter( None, [ [ j for j in i if j in \\\n genes_wo_additionalsites] for i in v ])) \\\n for k,v in regmap.items() if k in\\\n genes_wo_additionalsites}\n \n # remove tfs with no genes left\n regmap = { k:v for k,v in regmap.items() if len(v) > 0}\n \n\n ## remove TFs from TUs\n # TFs\n tfs = regmap.keys()\n # no. of TFs\n nf = len(regmap)\n # initialize final output dictionary\n out = {}\n for f in tfs:\n tus = regmap[f]\n # no. of TUs\n ntu = len(tus)\n # initialize list to hold modified TUs\n modus = [ [] for i in range(ntu)] \n for i in range(ntu):\n # list of genes in the TU\n tgs = tus[i]\n # remove tfs from the above list of target genes\n modus[i] = [ g for g in tgs if g not in tfs]\n # remove empty TUs\n modus = [ u for u in modus if len(u) >= 1]\n # if there is at least 1 TU left, make an entry in the modified TRN\n if len(modus) >= 1:\n out[f] = modus\n return out\n\n## dict of physico-chemical classes of amino acids \n# and associated function to test whether a change is conservative\nfrom Bio.SeqUtils import IUPACData\naa3to1 = IUPACData.protein_letters_3to1\nf_in = os.path.join( path, 'aa_classes.csv')\naa_class = { i[0]:list(filter(None,i[1:])) for i in \\\n zip(*basic.readcsv(f_in))}\n# convert 3 letter codes to 1 letter\naa_class = { k:[aa3to1[i] for i in v] for k,v in aa_class.items()}\n# function\ndef is_conservative(original,mutant,classdict = aa_class):\n o = original\n m = mutant\n # find original's class\n oc = [ k for k,v in classdict.items() if o in v][0]\n # check if mutant is in the original's class\n if m in classdict[oc]: \n return True\n else:\n return False\n\n## Make a dict of reference genes with a list of their four-fold degenerate sites\nfrom utilities3.specific import n2c,codon_table\ncsites = codonsitecount()\nct = codon_table[11]\nstops = ct.stop_codons\ngene_ffd = {}\nfor k,v in seqrecord.items():\n # generate codon sequence\n cseq = n2c(v)[:-1]\n # skip the gene if there are any interemediate stop codons \n if any( i in stops for i in cseq): continue\n # 0-based index of codons with ffd sites\n entry = [ x for x,i in enumerate(cseq) if csites[i][2] == 1]\n gene_ffd[k] = entry\n\n","sub_path":"utilities3/Data.py","file_name":"Data.py","file_ext":"py","file_size_in_byte":10243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"544520025","text":"import os\nimport random\nimport sqlite3\nimport hashlib\nimport json\n\nfrom flask import Flask, request, render_template, redirect, url_for, jsonify\nfrom flask_cors import CORS\nimport flask_login\n\napp = Flask(__name__)\napp.secret_key = \"placeholder key\" # TODO: Load secret key from a config file that isn\"t stored in the repo\nCORS(app)\nsqlConnection = sqlite3.connect(\"app.db\", check_same_thread=False, isolation_level=None)\ncursor = sqlConnection.cursor()\nloginManager = flask_login.LoginManager()\nloginManager.init_app(app)\n\n## Static web pages\n@app.route(\"/\")\ndef staticMainPage():\n\treturn redirect(url_for(\"staticIndexPage\"))\n\n@app.route(\"/index\")\ndef staticIndexPage():\n\treturn render_template(\"index.html\")\n\n@app.route(\"/signup\")\ndef staticSignupPage():\n\tif flask_login.current_user.is_authenticated:\n\t\treturn redirect(url_for(\"staticMainPage\"))\n\n\treturn render_template(\"signup.html\")\n\n@app.route(\"/login\")\ndef staticLoginPage():\n\tif flask_login.current_user.is_authenticated:\n\t\treturn redirect(url_for(\"staticMainPage\"))\n\n\treturn render_template(\"login.html\")\n\n@app.route(\"/map\")\ndef staticMapPage():\n\treturn render_template(\"map.html\")\n\n@app.route(\"/profile\")\n@flask_login.login_required\ndef staticProfilePage():\n\treturn render_template(\"profile.html\")\n\n@app.route(\"/all_listings\")\ndef staticAllListingsPage():\n\treturn render_template(\"listings.html\")\n\t\n@app.route(\"/createlisting\")\ndef staticListingsPage():\n\treturn render_template(\"create_listing.html\")\n\n## User stuff\nclass User(flask_login.UserMixin):\n\tdef __init__(self, userId = None):\n\t\tself.userId = userId\n\n\t\n\tdef get_id(self):\n\t\tcursor.execute(\"SELECT * FROM user WHERE id = ?\", (self.userId,))\n\t\tuserRow = cursor.fetchone()\n\t\tif userRow is None:\n\t\t\treturn None\n\n\t\treturn str(userRow[0])\n\t\t\n\n@loginManager.user_loader\ndef userLoader(userId):\n\tuserRow = cursor.execute(\"SELECT * FROM user WHERE id = ?\", (userId,))\n\tif userRow is None:\n\t\treturn None\n\n\treturn User(userId)\n\n@loginManager.request_loader\ndef requestLoader(req):\n\temail = req.form.get(\"email\")\n\tif email is None:\n\t\treturn None\n\n\tcursor.execute(\"SELECT id, password_hash, salt FROM user WHERE email = ?\", (email,))\n\tuserRow = cursor.fetchone()\n\tif userRow is None:\n\t\treturn None\n\n\tuser = User(userRow[0])\n\n\tpasswordHash = hashlib.pbkdf2_hmac(\"sha256\", req.form[\"password\"].encode(), userRow[2], 10000)\n\n\tif passwordHash != userRow[1]:\n\t\treturn None\n\n\treturn user\n\n\n## API endpoints\n@app.route(\"/api/users\")\ndef getUsers():\n\tcursor.execute(\"SELECT id, firstName, lastName, email, address, state, city, zipcode FROM user\")\n\treturn jsonify(cursor.fetchall())\n\n@app.route(\"/api/signup\", methods=[\"POST\"])\ndef newUser():\n\tplainPassword = request.form[\"password\"]\n\tfirstName = request.form[\"firstName\"]\n\tlastName = request.form[\"lastName\"]\n\temail = request.form.get(\"email\")\n\t# TODO: Implement checks for structure and integrity (I.E. first/last name is present, email is valid)\n\n\tuserId = random.getrandbits(63)\n\tsalt = os.urandom(32);\n\n\tpasswordHash = hashlib.pbkdf2_hmac(\"sha256\", plainPassword.encode(), salt, 10000)\n\n\tcursor.execute(\"SELECT * FROM user WHERE email = ?\", (email,))\n\tif cursor.fetchone() is not None:\n\t\treturn json.dumps({\"success\": False}), 409\n\n\tcursor.execute(\"INSERT INTO user VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\",\n\t\t\t(userId , passwordHash, salt, firstName, lastName,\n\t\t\temail, request.form[\"address\"], request.form[\"city\"],\n\t\t\trequest.form[\"state\"], request.form[\"zipcode\"])\n\t\t\t)\n\n\tflask_login.login_user(User(userId), remember = True)\n\n\treturn json.dumps({\"success\": True}), 200\n\n@app.route(\"/api/login\", methods=[\"POST\"])\ndef login():\n\t#if not flask_login.current_user.is_anonymous:\n\t#\treturn json.dumps({\"success\": True}), 200\n\n\tuser = requestLoader(request)\n\tif user is None:\n\t\treturn json.dumps({\"success\": False}), 400\n\n\tflask_login.login_user(user, remember = True)\n\treturn json.dumps({\"success\": True}), 200\n\n@app.route(\"/api/logout\", methods=[\"POST\"])\n@flask_login.login_required\ndef logout():\n\tflask_login.logout_user()\n\treturn json.dumps({\"success\": True}), 200\n\n@app.route(\"/api/createlisting\", methods=[\"POST\"])\n@flask_login.login_required\ndef createListing():\n\n\tcursor.execute(\"INSERT INTO listing VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\",\n\t\t\t(random.getrandbits(63) , flask_login.current_user.get_id(), request.form[\"name\"], request.form[\"description\"], request.form[\"price\"],\n\t\t\trequest.form[\"bedcount\"], request.form[\"bathcount\"], request.form[\"address\"],\n\t\t\trequest.form[\"city\"], request.form[\"state\"], request.form[\"zipcode\"],\n\t\t\trequest.form[\"smoking\"], request.form[\"internet\"])\n\t\t\t)\n\treturn json.dumps({\"success\": True}), 200\n\nif __name__ == \"__main__\":\n\tapp.run()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"592936162","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\n\nAUTHOR = 'Evgeny Pogorelov'\nSITENAME = AUTHOR\nSITEURL = 'localhost:8000'\nSITETITLE = AUTHOR\nSITESUBTITLE = 'Side projects and writings'\nSITEDESCRIPTION = \"Side projects and writings\"\nSITELOGO = SITEURL + '/images/profile.jpeg'\nFAVICON = SITEURL + '/images/favicon.ico'\n\nBROWSER_COLOR = '#333'\nROBOTS = 'index, follow'\n\nCC_LICENSE = {\n 'name': 'Creative Commons Attribution-ShareAlike',\n 'version': '4.0',\n 'slug': 'by-sa'\n}\n\nCOPYRIGHT_YEAR = 2022\n\nTHEME = \"./themes/Flex/\"\nPATH = 'content'\nTIMEZONE = 'America/New_York'\n\nDEFAULT_LANG = 'en'\nOG_LOCALE = 'en_US'\nLOCALE = 'en_US'\n\nDATE_FORMATS = {\n 'en': '%B %d, %Y',\n}\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\nUSE_FOLDER_AS_CATEGORY = False\nMAIN_MENU = True\nHOME_HIDE_TAGS = True\n\n# Social widget\nSOCIAL = (('linkedin', 'https://www.linkedin.com/in/evgenypogorelov/en'),\n ('github', 'https://github.com/pogoetic'),\n ('twitter', 'https://twitter.com/pogoetic'),\n ('rss', '//pogoetic.github.io/feeds/all.atom.xml'))\n\nMENUITEMS = (('Archives', '/archives.html'),\n ('Categories', '/categories.html'),\n ('Tags', '/tags.html'),)\n\n#LINKS = (('about', 'http://evgenypogorelov.com'),)\n\nDEFAULT_PAGINATION = 10\n\nMARKUP = ('md', 'ipynb')\n\n#PLUGIN_PATHS = ['./plugins']\n\nfrom pelican_jupyter import markup as nb_markup\nPLUGINS = [nb_markup, 'sitemap']\n\n\nSITEMAP = {\n 'format': 'xml',\n 'priorities': {\n 'articles': 0.6,\n 'indexes': 0.6,\n 'pages': 0.5,\n },\n 'changefreqs': {\n 'articles': 'monthly',\n 'indexes': 'daily',\n 'pages': 'monthly',\n }\n}\n\nEXTRA_PATH_METADATA = {\n #'extra/custom.css': {'path': 'static/custom.css'},\n 'extra/CNAME': {'path': 'CNAME'}\n}\n#CUSTOM_CSS = 'static/custom.css'\n\nMAIN_MENU = True\n\nSTATIC_PATHS = ['images', 'extra']\n\n# if you create jupyter files in the content dir, snapshots are saved with the same\n# metadata. These need to be ignored. \nIGNORE_FILES = [\".ipynb_checkpoints\"] \n\nDISQUS_SITENAME = 'evgenysblog-1'\nGOOGLE_ANALYTICS = \"G-DT82FP28W3\"\n#GOOGLE_TAG_MANAGER = \"GTM-WBFQQB6\"\n\n# Uncomment following line if you want document-relative URLs when developing\nRELATIVE_URLS = True","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"276528746","text":"#data loading and bunching; defines the dataloader and items\n#contains the basic item Sequence, the basic itemlist SeqList, \n\nfrom fastai import *\nfrom fastai.text import *\nfrom .iterators import *\nfrom .transform import *\n\nsupported_seqfiletypes = ['.fastq', '.fna', '.fasta', '.ffn', '.faa']\n\nseqfiletype_to_iterator = {\n '.fastq': FastqIterator,\n '.fna': FastaIterator,\n '.fasta': FastaIterator,\n '.ffn': FastaIterator,\n '.faa': FastaIterator\n}\n\ndef _get_bio_files(parent, p, f, extensions):\n p = Path(p)#.relative_to(parent)\n if isinstance(extensions,str): extensions = [extensions]\n low_extensions = [e.lower() for e in extensions] if extensions is not None else None\n res = [p/o for o in f if not o.startswith('.')\n and (extensions is None or f'.{o.split(\".\")[-1].lower()}' in low_extensions)]\n return res\n\ndef get_bio_files(path:PathOrStr, extensions:Collection[str]=None, recurse:bool=True, exclude:Optional[Collection[str]]=None,\n include:Optional[Collection[str]]=None, presort:bool=False, followlinks:bool=False)->FilePathList:\n \"Return list of files in `path` that have a suffix in `extensions`; optionally `recurse`.\"\n low_extensions = [e.lower() for e in extensions] if extensions is not None else None\n if recurse:\n res = []\n for i,(p,d,f) in enumerate(os.walk(path, followlinks=followlinks)):\n # skip hidden dirs\n if include is not None and i==0: d[:] = [o for o in d if o in include]\n elif exclude is not None and i==0: d[:] = [o for o in d if o not in exclude]\n else: d[:] = [o for o in d if not o.startswith('.')]\n res += _get_bio_files(path, p, f, extensions)\n if presort: res = sorted(res, key=lambda p: _path_to_same_str(p), reverse=False)\n return res\n else:\n f = [o.name for o in os.scandir(path) if o.is_file()]\n res = _get_bio_files(path, path, f, extensions)\n if presort: res = sorted(res, key=lambda p: _path_to_same_str(p), reverse=False)\n return res\n\ndef check_seqfiletype(filename:PathOrStr, extensions:Collection[str]=supported_seqfiletypes):\n if isinstance(filename, Path): \n seqfiletype = filename.suffix\n else:\n seqfiletype = f'.{filename.split(\".\")[-1].lower()}' \n assert seqfiletype in extensions, \"Input sequence file type %r is not supported.\" % seqfiletype\n return seqfiletype\n\ndef get_items_from_seqfile(filename:PathOrStr, extensions:Collection[str]=supported_seqfiletypes, max_seqs:int=None, skiprows:int=0, ksize:int=None):\n seqfiletype = check_seqfiletype(filename, extensions)\n iterator = seqfiletype_to_iterator[seqfiletype]\n with open(filename, \"r\") as handle:\n items = []\n row = 0 \n for title, seq, qual, offset in iterator(handle):\n #get (filename, offset, length of sequence) for each read and add to items\n row += 1\n if len(seq)>=ksize and row>skiprows:\n items.append(seq)\n if max_seqs and (row-skiprows)>=max_seqs: \n break\n handle.close()\n return items\n\ndef get_count_from_seqfile(filename:PathOrStr, extensions:Collection[str]=supported_seqfiletypes, max_seqs:int=None):\n seqfiletype = check_seqfiletype(filename, extensions)\n iterator = seqfiletype_to_iterator[seqfiletype]\n with open(filename, \"r\") as handle:\n count = 0\n for title, seq, qual, offset in iterator(handle):\n count += 1\n if max_seqs and count >= max_seqs: \n break\n handle.close()\n return count\n\ndef extract_from_header(filename:PathOrStr, func:Callable, extensions:Collection[str]=supported_seqfiletypes, max_seqs:int=None):\n '''\n Extract the value returned by 'func' from each sequence in 'filename'.\n\n Parameters\n ---------\n filename\n A string or pathlib Path of sequence file from which to extract values.\n \n func\n A function that accepts a sequence file header string and returns the value desired to be extracted.\n\n extensions\n A collection of accepted sequence file types. Currently supported seqfiletypes are: ['.fastq', '.fna', '.fasta', '.ffn', '.faa']\n\n max_seqs\n An int indicating the maximum number of sequences from which to apply *func*.\n '''\n seqfiletype = check_seqfiletype(filename, extensions)\n iterator = seqfiletype_to_iterator[seqfiletype]\n with open(filename, \"r\") as handle:\n extracts = []\n row = 0\n for title, seq, qual, offset in iterator(handle):\n extract = func(title)\n extracts.append(extract)\n row += 1\n if max_seqs and row >= max_seqs: \n break\n handle.close()\n return extracts\n\ndef get_df_from_files(files, max_seqs_per_file, skiprows, header, delimiter):\n df = pd.DataFrame()\n for csv_name in files:\n chunk = pd.read_csv(csv_name, nrows=max_seqs_per_file, skiprows=range(1,skiprows+1), header=header, delimiter=delimiter)\n df = df.append(chunk)\n return df\n\nclass OpenSeqFileProcessor(PreProcessor):\n \"`PreProcessor` that opens the filenames and read the sequences. This is used if creating BioTextList from_folder, because need to know the path of each input for splitting.\"\n def __init__(self, ds:ItemList=None, extensions:Collection[str]=supported_seqfiletypes, max_seqs:int=None, skiprows:int=0, ksize:int=None):\n self.extensions = extensions\n self.max_seqs = max_seqs\n self.ksize = ksize\n self.skiprows=skiprows\n\n def process(self, ds:Collection): \n readitems = []\n for item in ds.items:\n readitems.extend(self.process_one(item))\n ds.items = readitems\n\n def process_one(self,item): \n return get_items_from_seqfile(item, extensions=self.extensions, max_seqs=self.max_seqs, skiprows=self.skiprows, ksize=self.ksize) if isinstance(item, Path) else [item]\n\nclass BioTextList(TextList):\n \"A TextList for biological sequence data.\"\n def __init__(self, items:Iterator, vocab:BioVocab=None, pad_idx:int=1, sep=' ', **kwargs):\n super().__init__(items, **kwargs)\n\n def label_for_lm(self, **kwargs):\n self.__class__ = BioLMTextList\n kwargs['label_cls'] = LMLabelList\n return self.label_const(0, **kwargs)\n\n def label_from_df_for_regression(self, cols:IntsOrStrs=1, label_cls:Callable=FloatList, **kwargs):\n '''\n Label `self.items` from the values in `cols` in `self.inner_df`.\n\n Parameters\n ---------\n cols\n An int or string indicating from which column (by index or name) to derive label. Default 1.\n \n label_cls\n A callable indicating class of labels. For regression, these are most likely floats. Default FloatList.\n '''\n labels = self.inner_df.iloc[:,df_names_to_idx(cols, self.inner_df)]\n assert labels.isna().sum().sum() == 0, f\"You have NaN values in column(s) {cols} of your dataframe, please fix it.\"\n return self._label_from_list(labels, label_cls=label_cls, **kwargs)\n \n \n @classmethod\n def from_seqfile(cls, filename:PathOrStr, path:PathOrStr='.', extensions:Collection[str]=supported_seqfiletypes, max_seqs_per_file:int=None, skiprows:int=0, ksize:int=None, **kwargs)->'TextList':\n '''\n Creates a BioTextList from a single sequence file (e.g. .fastq, .fasta, etc.)\n\n Parameters\n ---------\n filename\n A string or pathlib Path of sequence file from which to create the BioTextList.\n \n path\n A string or pathlib Path indicating root path for BioTextList. Default '.'\n\n extensions\n A collection of accepted sequence file types. Currently supported seqfiletypes are: ['.fastq', '.fna', '.fasta', '.ffn', '.faa']\n\n max_seqs_per_file\n An int indicating the maximum number of sequences to include in BioTextList. Default None (use all sequences).\n\n skiprows\n An int indicating number of sequences to skip in file before extracting to BioTextList. Default 0.\n\n ksize\n An int indicating kmer size of token (each sequence in BioTextList must be >= ksize). Default None.\n '''\n #get (filename, offset) tuple for each read and add to items\n items = get_items_from_seqfile(filename=filename, extensions=extensions, max_seqs=max_seqs_per_file, skiprows=skiprows, ksize=ksize)\n \n return cls(items=items, path=path, **kwargs)\n\n @classmethod\n def from_folder(cls, path:PathOrStr='.', vocab:Vocab=None, extensions:Collection[str]=supported_seqfiletypes, \n max_seqs_per_file:int=None, skiprows:int=0, recurse:bool=True, processor:PreProcessor=None, **kwargs) -> 'TextList':\n '''\n Creates a BioTextList from all sequence files in a folder.\n\n Parameters\n ---------\n path\n A string or pathlib Path indicating folder from which to extract files. Default '.' \n \n vocab\n A BioVocab or Vocab indicating vocabulary to use in tokenization. Default None.\n\n extensions\n A collection of supported sequence file types. Currently, these are: ['.fastq', '.fna', '.fasta', '.ffn', '.faa']\n\n max_seqs_per_file\n An int indicating maximum number of sequences per file to include in BioTextList. Default None (all sequences included).\n\n skiprows\n An int indicating number of sequences in each file to skip before reading into BioTextList. Default 0.\n\n recurse\n A boolean indicating whether to recurse through subdirectories in 'path'.\n\n processor\n A PreProcessor or collection of PreProcessors to pass to BioTextList. Default of None results in [OpenSeqFileProcessor, BioTokenizeProcessor, BioNumericalizeProcessor].\n '''\n #get list of files in `path` with seqfile suffixes. `recurse` determines if we search subfolders.\n files = get_bio_files(path=path, extensions=extensions, recurse=recurse)\n #define processor with OpenSeqFileProcessor since items are now a list of filepaths rather than Seq objects\n processor = ifnone(processor, [OpenSeqFileProcessor(max_seqs=max_seqs_per_file, skiprows=skiprows), BioTokenizeProcessor(), BioNumericalizeProcessor(vocab=vocab)])\n\n return cls(items=files, path=path, processor=processor, **kwargs)\n\n def label_from_fname(self, label_cls:Callable=None, max_seqs_per_file:int=None, extensions:Collection[str]=supported_seqfiletypes, **kwargs) -> 'LabelList':\n '''\n Label `self.items` with filename from which it was sourced. \n\n Parameters\n ---------\n label_cls\n A callable indicating class of labels. Default None (will be inferred). \n\n max_seqs_per_file\n An int indicating maximum number of sequences to include in databunch. Default None (all sequences used).\n\n extensions\n A collection of supported sequence file types. Currently, these are: ['.fastq', '.fna', '.fasta', '.ffn', '.faa']\n '''\n #give label to each filename depending on the filename\n #items need to be a list of filenames, as imported from from_folder (not from_seqfile) \n labels = []\n for o in self.items:\n #extract label from filename\n label = \".\".join((o.parts if isinstance(o, Path) else o.split(os.path.sep))[-1].split(\".\")[0:-1])\n #number of times should repeat that label\n count = get_count_from_seqfile(filename=o, extensions=extensions, max_seqs=max_seqs_per_file)\n labels.extend([label]*count)\n classes = list(set(labels))\n #kwargs = {dict(classes=classes),**kwargs}\n\n return self._label_from_list([labels],label_cls=label_cls, classes=classes, **kwargs)\n\n def label_from_header(self, func:Callable, label_cls:Callable=None, max_seqs_per_file:int=None, extensions:Collection[str]=supported_seqfiletypes, **kwargs) -> 'LabelList':\n '''\n Label sequences from their sequence file header using a custom function. \n\n Parameters\n ---------\n func\n A function that accepts a sequence file header string and returns the value desired to be extracted.\n\n label_cls\n A callable indicating class of labels. Default None (will be inferred). \n\n max_seqs_per_file\n An int indicating maximum number of sequences to include in databunch. Default None (all sequences used).\n\n extensions\n A collection of supported sequence file types. Currently, these are: ['.fastq', '.fna', '.fasta', '.ffn', '.faa']\n '''\n #items need to be a list of filenames, as imported from from_folder (not from_seqfile) \n labels = []\n for o in self.items:\n #extract label from filename\n extracts = extract_from_header(filename=o, func=func, extensions=extensions, max_seqs=max_seqs_per_file)\n labels.extend(extracts)\n\n return self._label_from_list(labels,label_cls=label_cls, **kwargs)\n\nclass BioItemLists(ItemLists):\n def __getattr__(self, k):\n ft = getattr(self.train, k)\n if not isinstance(ft, Callable): return ft\n fv = getattr(self.valid, k)\n assert isinstance(fv, Callable)\n def _inner(*args, **kwargs):\n self.train = ft(*args, from_item_lists=True, **kwargs)\n assert isinstance(self.train, LabelList)\n kwargs['label_cls'] = self.train.y.__class__\n self.valid = fv(*args, from_item_lists=True, **kwargs)\n self.__class__ = BioLabelLists\n self.process()\n return self\n return _inner\n\nclass BioLabelLists(LabelLists):\n \"A `LabelList` for each of `train` and `valid` (optional `test`).\"\n def get_processors(self):\n \"Read the default class processors if none have been set.\"\n default_xp = get_lol_processor()\n default_yp = []\n #enable separate processors for train and valid set (intended for reading different numbers of sequences from files in train and valid sets)\n xp = ifnone(self.train.x.processor, default_xp)\n yp = ifnone(self.train.y.processor, default_yp)\n v_xp = ifnone(self.valid.x.processor, default_xp)\n v_yp = ifnone(self.valid.y.processor, default_yp)\n return xp,yp,v_xp,v_yp\n\n def process(self):\n \"Process the inner datasets.\"\n xp,yp,v_xp,v_yp = self.get_processors()\n #process train\n self.lists[0].process(xp,yp,name='train')\n #process valid\n self.lists[1].process(v_xp,v_yp,name='valid')\n #process test if it exists with the same processor as valid\n if len(self.lists)>2:\n self.lists[2].process(v_xp,v_yp,name='test')\n #progress_bar clear the outputs so in some case warnings issued during processing disappear.\n for ds in self.lists:\n if getattr(ds, 'warn', False): warn(ds.warn)\n return self\n\nclass BioDataBunch(TextDataBunch):\n \"Create a databunch from biological sequencing data.\"\n @classmethod\n def from_folder(cls, path:PathOrStr, train:str='train', valid:str='valid', test:Optional[str]=None, valid_pct:float=0.2,\n extensions:Collection[str]=supported_seqfiletypes, recurse:bool=True, ksize:int=None,\n max_seqs_per_file:int=None, val_maxseqs:int=None, skiprows:int=0, val_skiprows:int=0,\n classes:Collection[Any]=None, tokenizer:Tokenizer=None, vocab:Vocab=None, chunksize:int=10000, max_vocab:int=60000,\n min_freq:int=2, include_bos:bool=True, include_eos:bool=False, seed:int=None, **kwargs):\n '''\n Create a `BioDataBunch` from fasta/sequence files in folders. \n\n Parameters\n ---------\n path\n A string or pathlib Path indicating root directory to use in databunch. \n\n train\n A string indicating folder containing training data. Default 'train'.\n\n valid\n A string indicating folder containing validation data. Default 'valid'. \n\n test\n An optional string indicating folder containing test data. Default None.\n\n valid_pct\n A float indicating percentage of sequences to be split into validation set (if no 'train' and 'valid' paths provided).\n\n extensions\n A collection of supported sequence file types. Currently, these are: ['.fastq', '.fna', '.fasta', '.ffn', '.faa']\n\n recurse\n A boolean indicating whether to recurse through subdirectories in 'path'. Default True.\n\n ksize\n An int indicating kmer size to use in tokenization. Default None. \n\n max_seqs_per_file\n An int indicating maximum number of sequences from each sequence file in training set to include in databunch. Default None (all sequences used).\n\n val_maxseqs\n An int indicating maximum number of sequences from each sequence file in validation set to include in databunch. Default None (all sequences used).\n\n skiprows\n An int indicating number of sequences in each file in training set to skip before reading into databunch. Default 0.\n\n val_skiprows\n An int indicating number of sequences in each file in validation set to skip before reading into databunch. Default 0.\n\n classes\n A collection indicating class labels to use. Default None (will be inferred).\n\n tokenizer\n A BioTokenizer or Tokenizer to use for tokenization. Default None.\n\n vocab\n A BioVocab or Vocab to use for numericalization. Default None.\n\n chunksize\n An int indicating chunksize to use. Will only be used to auto-generate transforms if no tokenizer or vocab provided. Default 10000.\n\n max_vocab\n An int indicating maximum vocab items to include. Will only be used to auto-generate transforms if no tokenizer or vocab provided. Default 60000.\n\n min_freq\n An int indicating minimum occurrence of each token to be included in vocab. Will only be used to auto-generate transforms if no tokenizer or vocab provided. Default 2.\n\n include_bos\n A boolean indicating whether to include 'beginning of sequence' special token. Will only be used to auto-generate transforms if no tokenizer or vocab provided. Default True.\n\n include_eos\n A boolean indicating whether to include 'end of sequence' special token. Will only be used to auto-generate transforms if no tokenizer or vocab provided. Default False.\n\n seed\n Seed to use when splitting data by valid_pct. Default None.\n '''\n path = Path(path).absolute()\n\n if train and valid:\n processor = [OpenSeqFileProcessor(extensions=extensions, max_seqs=max_seqs_per_file, ksize=ksize, skiprows=skiprows)] + get_lol_processor(tokenizer=tokenizer, vocab=vocab, chunksize=chunksize, max_vocab=max_vocab,\n min_freq=min_freq, include_bos=include_bos, include_eos=include_eos)\n v_processor = [OpenSeqFileProcessor(extensions=extensions, max_seqs=val_maxseqs, ksize=ksize, skiprows=val_skiprows)] + get_lol_processor(tokenizer=tokenizer, vocab=vocab, chunksize=chunksize, max_vocab=max_vocab,\n min_freq=min_freq, include_bos=include_bos, include_eos=include_eos)\n \n src = BioItemLists(path, BioTextList.from_folder(path=Path(path)/Path(train), vocab=vocab, extensions=extensions, max_seqs_per_file=max_seqs_per_file, skiprows=skiprows, recurse=recurse, processor=processor),\n BioTextList.from_folder(path=Path(path)/Path(valid), vocab=vocab, extensions=extensions, max_seqs_per_file=val_maxseqs, skiprows=val_skiprows, recurse=recurse, processor=v_processor))\n\n else:\n processor = [OpenSeqFileProcessor(extensions=extensions, max_seqs=max_seqs_per_file, ksize=ksize)] + get_lol_processor(tokenizer=tokenizer, vocab=vocab, chunksize=chunksize, max_vocab=max_vocab,\n min_freq=min_freq, include_bos=include_bos, include_eos=include_eos)\n src = BioTextList.from_folder(path=path, vocab=vocab, extensions=extensions, max_seqs_per_file=max_seqs_per_file, recurse=recurse, processor=processor) \n src = src.split_by_rand_pct(valid_pct=valid_pct, seed=seed)\n\n src = src.label_for_lm() if cls==BioLMDataBunch else src.label_from_folder(classes=classes)\n if test is not None: src.add_test_folder(path/test)\n return src.databunch(**kwargs)\n\n @classmethod\n def from_seqfile(cls, path:PathOrStr, filename:PathOrStr, test_filename:Optional[str]=None, extensions:Collection[str]=supported_seqfiletypes,\n max_seqs_per_file:int=None,skiprows:int=0,valid_pct:float=0.2, seed:int=None, \n vocab:BioVocab=None, tokenizer:BioTokenizer=None,\n chunksize:int=10000, max_vocab:int=60000, min_freq:int=2, include_bos:bool=None, include_eos:bool=None,\n label_func:Callable=None, **kwargs:Any):\n '''\n Create a `BioDataBunch` from a single sequence file. Not recommended for classifiers - you need to provide a fairly complicated labeling function.\n\n Parameters\n ---------\n path\n A string or pathlib Path indicating root directory to use in databunch. \n\n filename\n A string or pathlib Path indicating sequence file from which to derive items. \n\n test_filename\n An optional string indicating sequence file from which to derive test data. Default None.\n\n extensions\n A collection of supported sequence file types. Currently, these are: ['.fastq', '.fna', '.fasta', '.ffn', '.faa']\n\n max_seqs_per_file\n An int indicating maximum number of sequences to include in databunch. Default None (all sequences used).\n\n skiprows\n An int indicating number of sequences to skip before reading into databunch. Default 0.\n\n valid_pct\n A float indicating percentage of sequences to be split into validation set.\n\n seed\n Seed to use when splitting data by valid_pct. Default None.\n\n vocab\n A BioVocab or Vocab to use for numericalization. Default None.\n \n tokenizer\n A BioTokenizer or Tokenizer to use for tokenization. Default None.\n\n chunksize\n An int indicating chunksize to use. Will only be used to auto-generate transforms if no tokenizer or vocab provided. Default 10000.\n\n max_vocab\n An int indicating maximum vocab items to include. Will only be used to auto-generate transforms if no tokenizer or vocab provided. Default 60000.\n\n min_freq\n An int indicating minimum occurrence of each token to be included in vocab. Will only be used to auto-generate transforms if no tokenizer or vocab provided. Default 2.\n\n include_bos\n A boolean indicating whether to include 'beginning of sequence' special token. Will only be used to auto-generate transforms if no tokenizer or vocab provided. Default True.\n\n include_eos\n A boolean indicating whether to include 'end of sequence' special token. Will only be used to auto-generate transforms if no tokenizer or vocab provided. Default False.\n\n label_func\n A function that accepts a sequence item and returns the value desired to be extracted.\n\n '''\n\n processor = get_lol_processor(tokenizer=tokenizer, vocab=vocab, chunksize=chunksize, max_vocab=max_vocab,\n min_freq=min_freq, include_bos=include_bos, include_eos=include_eos)\n \n src = BioTextList.from_seqfile(filename=filename, path=path, extensions=extensions, max_seqs_per_file=max_seqs_per_file, skiprows=skiprows, processor=processor)\n src = src.split_by_rand_pct(valid_pct=valid_pct, seed=seed)\n src = src.label_for_lm() if cls==BioLMDataBunch else src.label_from_func(label_func, **kwargs)\n\n if test_filename is not None: src.add_test(BioTextList.from_seqfile(test_filename, path))\n\n return src.databunch(**kwargs)\n\n @classmethod\n def from_df(cls, path:PathOrStr, train_df:DataFrame, valid_df:DataFrame, test_df:Optional[DataFrame]=None,\n tokenizer:Tokenizer=None, vocab:Vocab=None, classes:Collection[str]=None, \n text_cols:IntsOrStrs=1,label_cols:IntsOrStrs=0, \n label_delim:str=None, chunksize:int=10000, max_vocab:int=60000,\n min_freq:int=2, include_bos:bool=True, include_eos:bool=False,\n **kwargs) -> DataBunch:\n\n '''\n Create a `BioDataBunch` from dataframes. Recommended for classifiers.\n\n Parameters\n ---------\n path\n A string or pathlib Path indicating root directory to use in databunch. \n\n train_df\n A dataframe containing the training data.\n\n valid_df\n A dataframe containing the validation data.\n\n test_df\n An optional dataframe containing the test data.\n\n tokenizer\n A BioTokenizer or Tokenizer to use for tokenization. Default None.\n\n vocab\n A BioVocab or Vocab to use for numericalization. Default None.\n\n classes\n A collection indicating class labels to use. Default None (will be inferred).\n\n text_cols\n An int or string indicating from which column (by index or name) to derive sequence data. Default 1.\n\n label_cols\n An int or string indicating from which column (by index or name) to derive label. Default 0.\n\n label_delim\n A string indicating label delimiter, if any. Default None.\n\n chunksize\n An int indicating chunksize to use. Will only be used to auto-generate transforms if no tokenizer or vocab provided. Default 10000.\n\n max_vocab\n An int indicating maximum vocab items to include. Will only be used to auto-generate transforms if no tokenizer or vocab provided. Default 60000.\n\n min_freq\n An int indicating minimum occurrence of each token to be included in vocab. Will only be used to auto-generate transforms if no tokenizer or vocab provided. Default 2.\n\n include_bos\n A boolean indicating whether to include 'beginning of sequence' special token. Will only be used to auto-generate transforms if no tokenizer or vocab provided. Default True.\n\n include_eos\n A boolean indicating whether to include 'end of sequence' special token. Will only be used to auto-generate transforms if no tokenizer or vocab provided. Default False.\n '''\n\n path = Path(path).absolute()\n processor = get_lol_processor(tokenizer=tokenizer, vocab=vocab, chunksize=chunksize, max_vocab=max_vocab,\n min_freq=min_freq, include_bos=include_bos, include_eos=include_eos)\n \n if classes is None and is_listy(label_cols) and len(label_cols) > 1: classes = label_cols\n src = ItemLists(path, TextList.from_df(train_df, path, cols=text_cols, processor=processor),\n TextList.from_df(valid_df, path, cols=text_cols, processor=processor))\n if cls==TextLMDataBunch: src = src.label_for_lm()\n else: \n if label_delim is not None: src = src.label_from_df(cols=label_cols, classes=classes, label_delim=label_delim)\n else: src = src.label_from_df(cols=label_cols, classes=classes)\n if test_df is not None: src.add_test(TextList.from_df(test_df, path, cols=text_cols))\n\n return src.databunch(**kwargs)\n\n @classmethod\n def from_multiple_csv(cls, path:PathOrStr, train:str=None, valid:str=None, test:Optional[str]=None, valid_pct:float=0.2, \n text_cols:IntsOrStrs=1, label_cols:IntsOrStrs=0, \n max_seqs_per_file:int=None, valid_max_seqs:int=None, skiprows:int=0, val_skiprows:int=0,\n delimiter:str=None, header='infer', label_delim:str=None,\n extensions:Collection[str]=['.csv'], recurse:bool=False, \n classes:Collection[str]=None, \n tokenizer:BioTokenizer=None, vocab:BioVocab=None, \n chunksize:int=10000, max_vocab:int=60000, min_freq:int=2, include_bos:bool=None, include_eos:bool=None, \n seed:int=None, **kwargs) -> DataBunch:\n\n '''\n Create a `BioDataBunch` from multiple csv files. Recommended for classifiers. \n\n Parameters\n ---------\n path\n A string or pathlib Path indicating root directory to use in databunch. \n\n train\n A string indicating folder containing training data. Default 'train'.\n\n valid\n A string indicating folder containing validation data. Default 'valid'. \n\n test\n An optional string indicating folder containing test data. Default None.\n\n valid_pct\n A float indicating percentage of sequences to be split into validation set (if no 'train' and 'valid' paths provided).\n\n text_cols\n An int or string indicating from which column (by index or name) to derive sequence data. Default 1.\n\n label_cols\n An int or string indicating from which column (by index or name) to derive label. Default 0.\n\n max_seqs_per_file\n An int indicating maximum number of sequences from each sequence file in training set to include in databunch. Default None (all sequences used).\n\n valid_max_seqs\n An int indicating maximum number of sequences from each sequence file in validation set to include in databunch. Default None (all sequences used).\n\n skiprows\n An int indicating number of sequences in each file in training set to skip before reading into databunch. Default 0.\n\n val_skiprows\n An int indicating number of sequences in each file in validation set to skip before reading into databunch. Default 0.\n\n delimiter\n A string indicating csv file delimiter to pass to pandas read_csv. Default None (will infer).\n\n header\n Index of header of csv to pass to pandas read_csv. Default 'infer'.\n\n label_delim\n A string indicating label delimiter, if any. Default None.\n\n extensions\n A collection of supported file types. Default ['.csv']. \n\n recurse\n A boolean indicating whether to recurse through subdirectories in 'path'. Default True.\n\n classes\n A collection indicating class labels to use. Default None (will be inferred).\n\n tokenizer\n A BioTokenizer or Tokenizer to use for tokenization. Default None.\n\n vocab\n A BioVocab or Vocab to use for numericalization. Default None.\n\n chunksize\n An int indicating chunksize to use. Will only be used to auto-generate transforms if no tokenizer or vocab provided. Default 10000.\n\n max_vocab\n An int indicating maximum vocab items to include. Will only be used to auto-generate transforms if no tokenizer or vocab provided. Default 60000.\n\n min_freq\n An int indicating minimum occurrence of each token to be included in vocab. Will only be used to auto-generate transforms if no tokenizer or vocab provided. Default 2.\n\n include_bos\n A boolean indicating whether to include 'beginning of sequence' special token. Will only be used to auto-generate transforms if no tokenizer or vocab provided. Default True.\n\n include_eos\n A boolean indicating whether to include 'end of sequence' special token. Will only be used to auto-generate transforms if no tokenizer or vocab provided. Default False.\n\n seed\n Seed to use when splitting data by valid_pct. Default None.\n '''\n\n path = Path(path).absolute()\n\n processor = get_lol_processor(tokenizer=tokenizer, vocab=vocab, chunksize=chunksize, max_vocab=max_vocab,\n min_freq=min_freq, include_bos=include_bos, include_eos=include_eos)\n\n if train and valid:\n train_files, valid_files = get_bio_files(path=Path(path)/Path(train).resolve(), extensions=extensions, recurse=recurse), get_bio_files(path=Path(path)/Path(valid).resolve(), extensions=extensions, recurse=recurse)\n test_files = (None if test is None else get_bio_files(path=Path(path)/Path(test).resolve(), extensions=extensions, recurse=recurse))\n train_df = get_df_from_files(train_files, max_seqs_per_file=max_seqs_per_file, skiprows=skiprows, header=header, delimiter=delimiter)\n valid_df = get_df_from_files(valid_files, max_seqs_per_file=valid_max_seqs, skiprows=val_skiprows, header=header, delimiter=delimiter)\n test_df = (None if test_files is None else get_df_from_files(test_files, max_seqs_per_file=max_seqs_per_file, skiprows=skiprows, header=header, delimiter=delimiter))\n else:\n #get a list of csv files in path\n files = get_bio_files(path=path, extensions=extensions, recurse=recurse)\n #for each file, read the csv (optionally with max_seqs and skiprows) and append to the total df\n train_df, valid_df, test_df = (pd.DataFrame(), pd.DataFrame(), (None if test is None else pd.DataFrame()))\n for csv_name in files:\n df = pd.read_csv(csv_name, nrows=max_seqs_per_file, skiprows=range(1,skiprows+1), header=header, delimiter=delimiter)\n df = df.iloc[np.random.RandomState(seed=seed).permutation(len(df))] #if seed is None, this will make the validation set diff between passes if do multiple pass-throughs of the data for big chunks; should set seed, or make sure you don't do multiple passthroughs\n cut = int(valid_pct * len(df)) + 1\n train_chunk, valid_chunk = df[cut:], df[:cut]\n if test is not None:\n test_chunk = pd.read_csv(Path(path)/test, header=header, delimiter=delimiter)\n test_df = test_df.append(test_chunk)\n train_df = train_df.append(train_chunk)\n valid_df = valid_df.append(valid_chunk)\n train_df.reset_index(drop=True, inplace=True)\n valid_df.reset_index(drop=True, inplace=True)\n \n if test_df is not None:\n test_df.reset_index(drop=True, inplace=True)\n\n if classes is None and is_listy(label_cols) and len(label_cols) > 1: classes = label_cols\n src = ItemLists(path, BioTextList.from_df(train_df, path, cols=text_cols, processor=processor),\n BioTextList.from_df(valid_df, path, cols=text_cols, processor=processor))\n if cls==TextLMDataBunch: src = src.label_for_lm()\n else: \n if label_delim is not None: src = src.label_from_df(cols=label_cols, classes=classes, label_delim=label_delim)\n else: src = src.label_from_df(cols=label_cols, classes=classes)\n if test_df is not None: src.add_test(BioTextList.from_df(test_df, path, cols=text_cols))\n\n return src.databunch(**kwargs)\n \nclass BioLMDataBunch(BioDataBunch):\n \"Create a `BioDataBunch` suitable for training a language model. BioLMDataBunch inherits from BioDataBunch, so a BioLMDataBunch can be called using any of the BioDataBunch methods: from_folder, from_seqfile, etc.\"\n @classmethod\n def create(cls, train_ds, valid_ds, test_ds=None, path:PathOrStr='.', no_check:bool=False, bs:int=64, val_bs:int=None,\n num_workers:int=0, device:torch.device=None, collate_fn:Callable=data_collate,\n dl_tfms:Optional[Collection[Callable]]=None, bptt:int=70, backwards:bool=False, **dl_kwargs) -> DataBunch:\n datasets = cls._init_ds(train_ds, valid_ds, test_ds)\n val_bs = ifnone(val_bs, bs)\n datasets = [LanguageModelPreLoader(ds, shuffle=(i==0), bs=(bs if i==0 else val_bs), bptt=bptt, backwards=backwards)\n for i,ds in enumerate(datasets)]\n val_bs = bs\n dls = [DataLoader(d, b, shuffle=False, **dl_kwargs) for d,b in zip(datasets, (bs,val_bs,val_bs,val_bs)) if d is not None]\n return cls(*dls, path=path, device=device, dl_tfms=dl_tfms, collate_fn=collate_fn, no_check=no_check)\n\nclass BioClasDataBunch(BioDataBunch):\n \"Create a `BioDataBunch` suitable for training an RNN classifier. BioClasDataBunch inherits from BioDataBunch, so a BioClasDataBunch can be called using any of the BioDataBunch methods: from_df, from_multiple_csv, etc.\"\n @classmethod\n def create(cls, train_ds, valid_ds, test_ds=None, path:PathOrStr='.', bs:int=32, val_bs:int=None, pad_idx=1,\n pad_first=True, device:torch.device=None, no_check:bool=False, backwards:bool=False, \n dl_tfms:Optional[Collection[Callable]]=None, **dl_kwargs) -> DataBunch:\n datasets = cls._init_ds(train_ds, valid_ds, test_ds)\n val_bs = ifnone(val_bs, bs)\n collate_fn = partial(pad_collate, pad_idx=pad_idx, pad_first=pad_first, backwards=backwards)\n train_sampler = SortishSampler(datasets[0].x, key=lambda t: len(datasets[0][t][0].data), bs=bs)\n train_dl = DataLoader(datasets[0], batch_size=bs, sampler=train_sampler, drop_last=True, **dl_kwargs)\n dataloaders = [train_dl]\n for ds in datasets[1:]:\n lengths = [len(t) for t in ds.x.items]\n sampler = SortSampler(ds.x, key=lengths.__getitem__)\n dataloaders.append(DataLoader(ds, batch_size=val_bs, sampler=sampler, **dl_kwargs))\n return cls(*dataloaders, path=path, device=device, dl_tfms=dl_tfms, collate_fn=collate_fn, no_check=no_check)\n\nclass BioLMTextList(BioTextList):\n \"Special `BioTextList` for a language model.\"\n _bunch = BioLMDataBunch\n _is_lm = True","sub_path":"fastBio/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":38124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"349913532","text":"class Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n\n if nums == None or len(nums) <= 1:\n return None\n \n i = 0\n n = len(nums)\n hashmap = {}\n while i < n:\n hashmap[nums[i]] = i\n i += 1\n \n i = 0\n while i < n:\n r = target - nums[i]\n if r in hashmap and hashmap[r] != i:\n return[i, hashmap[r]]\n i += 1\n return None\n\n# 3\n","sub_path":"Week_01/two-sum.py","file_name":"two-sum.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"177626166","text":"import pygame\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface((90,15))\n self.color = (0, 255, 0)\n self.image.fill(self.color)\n self.rect = self.image.get_rect()\n self.speed = 0\n \n self.rect.center = (x, y)\n\n def update(self):\n self.rect.x += self.speed\n\n if self.rect.left <= 0:\n self.rect.left = 0\n elif self.rect.right >= 800:\n self.rect.right = 800\n","sub_path":"src/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"508040715","text":"from django.test import SimpleTestCase\nfrom django.template import Context, Template\nfrom unittest.mock import MagicMock\n\nfrom dsfr.constants import (\n INTEGRITY_CSS,\n INTEGRITY_FAVICON_APPLE,\n INTEGRITY_FAVICON_ICO,\n INTEGRITY_FAVICON_MANIFEST,\n INTEGRITY_FAVICON_SVG,\n INTEGRITY_JS_MODULE,\n INTEGRITY_JS_NOMODULE,\n)\nfrom dsfr.templatetags.dsfr_tags import concatenate, hyphenate\n\n\nclass DsfrCssTagTest(SimpleTestCase):\n def test_css_tag_rendered(self):\n context = Context()\n template_to_render = Template(\"{% load dsfr_tags %} {% dsfr_css %}\")\n rendered_template = template_to_render.render(context)\n self.assertInHTML(\n f'',\n rendered_template,\n )\n\n\nclass DsfrJsTagTest(SimpleTestCase):\n def test_js_tag_rendered(self):\n context = Context()\n template_to_render = Template(\"{% load dsfr_tags %} {% dsfr_js %}\")\n rendered_template = template_to_render.render(context)\n self.assertInHTML(\n f\"\"\"\n \n \n \"\"\",\n rendered_template,\n )\n\n\nclass DsfrJsTagWithNonceTest(SimpleTestCase):\n def test_js_tag_rendered(self):\n context = Context()\n template_to_render = Template(\n \"{% load dsfr_tags %} {% dsfr_js nonce='random-nonce' %}\"\n )\n rendered_template = template_to_render.render(context)\n self.assertInHTML(\n f\"\"\"\n \n \n \"\"\",\n rendered_template,\n )\n\n\nclass DsfrFaviconTagTest(SimpleTestCase):\n def test_favicon_tag_rendered(self):\n context = Context()\n template_to_render = Template(\"{% load dsfr_tags %} {% dsfr_favicon %}\")\n rendered_template = template_to_render.render(context)\n self.assertInHTML(\n f\"\"\"\n \n \n \n \n \n \"\"\",\n rendered_template,\n )\n\n\nclass DsfrThemeModaleTagTest(SimpleTestCase):\n def test_theme_modale_tag_rendered(self):\n context = Context()\n template_to_render = Template(\"{% load dsfr_tags %} {% dsfr_theme_modale %}\")\n rendered_template = template_to_render.render(context)\n self.assertInHTML(\n \"\"\"\n

\n Paramètres d’affichage\n

\n \"\"\",\n rendered_template,\n )\n\n\nclass DsfrAccordionTagTest(SimpleTestCase):\n test_data = {\n \"id\": \"sample-accordion\",\n \"title\": \"Title of the accordion item\",\n \"content\": \"

Bold and emphatic Example content

\",\n }\n\n context = Context({\"test_data\": test_data})\n template_to_render = Template(\"{% load dsfr_tags %} {% dsfr_accordion test_data %}\")\n\n def test_accordion_tag_rendered(self):\n rendered_template = self.template_to_render.render(self.context)\n self.assertInHTML(\n \"\"\"\n
\n

\n \n

\n
\n

Bold and emphatic Example content

\n
\n
\n \"\"\",\n rendered_template,\n )\n\n\nclass DsfrAccordionGroupTagTest(SimpleTestCase):\n test_data = [\n {\n \"id\": \"sample-accordion\",\n \"title\": \"Title of the accordion item\",\n \"content\": \"

Bold and emphatic Example content

\",\n },\n {\n \"id\": \"sample-accordion-2\",\n \"title\": \"Title of the second accordion item\",\n \"content\": \"

Bold and emphatic Example content

\",\n },\n {\n \"id\": \"sample-accordion-3\",\n \"title\": \"Title of the third accordion item\",\n \"content\": \"

Bold and emphatic Example content

\",\n },\n ]\n\n context = Context({\"test_data\": test_data})\n template_to_render = Template(\n \"{% load dsfr_tags %} {% dsfr_accordion_group test_data %}\"\n )\n\n def test_accordion_group_count(self):\n rendered_template = self.template_to_render.render(self.context)\n self.assertInHTML(\n \"\"\"

Bold and emphatic Example content

\"\"\",\n rendered_template,\n count=3,\n )\n\n\nclass DsfrAlertTagTest(SimpleTestCase):\n test_data = {\n \"title\": \"Sample title\",\n \"type\": \"info\",\n \"content\": \"Sample content\",\n \"heading_tag\": \"h3\",\n \"is_collapsible\": True,\n \"id\": \"test-alert-message\",\n }\n\n context = Context({\"test_data\": test_data})\n template_to_render = Template(\"{% load dsfr_tags %} {% dsfr_alert test_data %}\")\n\n def test_alert_tag_rendered(self):\n rendered_template = self.template_to_render.render(self.context)\n self.assertInHTML(\"\"\"

Sample content

\"\"\", rendered_template)\n\n def test_alert_tag_heading_can_be_set(self):\n rendered_template = self.template_to_render.render(self.context)\n self.assertInHTML(\n \"\"\"

Sample title

\"\"\", rendered_template\n )\n\n def test_alert_tag_has_collapse_button(self):\n rendered_template = self.template_to_render.render(self.context)\n self.assertInHTML(\n \"\"\"\n \n \"\"\",\n rendered_template,\n )\n\n\nclass DsfrBadgeTagTest(SimpleTestCase):\n test_data = {\n \"label\": \"badge label\",\n \"extra_classes\": \"fr-badge--success\",\n }\n\n context = Context({\"test_data\": test_data})\n template_to_render = Template(\"{% load dsfr_tags %} {% dsfr_badge test_data %}\")\n\n def test_badge_tag_rendered(self):\n rendered_template = self.template_to_render.render(self.context)\n self.assertInHTML(\n \"\"\"\n

badge label

\n \"\"\",\n rendered_template,\n )\n\n\nclass DsfrBreadcrumbTagTest(SimpleTestCase):\n breadcrumb_data = {\n \"links\": [{\"url\": \"test-url\", \"title\": \"Test title\"}],\n \"current\": \"Test page\",\n }\n\n context = Context({\"breadcrumb_data\": breadcrumb_data})\n template_to_render = Template(\n \"{% load dsfr_tags %} {% dsfr_breadcrumb breadcrumb_data %}\"\n )\n\n def test_breadcrumb_tag_current_page(self):\n rendered_template = self.template_to_render.render(self.context)\n self.assertInHTML(\n \"\"\"Test page\"\"\",\n rendered_template,\n )\n\n def test_breadcrumb_tag_middle_link(self):\n rendered_template = self.template_to_render.render(self.context)\n self.assertInHTML(\n \"\"\"Test title\"\"\",\n rendered_template,\n )\n\n\nclass DsfrButtonTagTest(SimpleTestCase):\n test_data = {\n \"onclick\": \"alert('test button action')\",\n \"label\": \"button label\",\n \"type\": \"button\",\n \"extra_classes\": \"fr-btn--secondary\",\n }\n\n context = Context({\"test_data\": test_data})\n template_to_render = Template(\"{% load dsfr_tags %} {% dsfr_button test_data %}\")\n\n def test_button_tag_rendered(self):\n rendered_template = self.template_to_render.render(self.context)\n self.assertInHTML(\n \"\"\"\n \n button label \n \n \"\"\",\n rendered_template,\n )\n\n\nclass DsfrCalloutTagTest(SimpleTestCase):\n test_data = {\n \"text\": \"Text of the callout item\",\n \"title\": \"Title of the callout item\",\n \"icon_class\": \"fr-icon-information-line\",\n \"heading_tag\": \"h4\",\n \"button\": {\"onclick\": \"close()\", \"label\": \"button label\", \"type\": \"button\"},\n }\n\n context = Context({\"test_data\": test_data})\n template_to_render = Template(\"{% load dsfr_tags %} {% dsfr_callout test_data %}\")\n\n def test_callout_tag_rendered(self):\n rendered_template = self.template_to_render.render(self.context)\n self.assertInHTML(\n \"\"\"\n

\n Text of the callout item\n

\"\"\",\n rendered_template,\n )\n\n def test_callout_optional_title_rendered(self):\n rendered_template = self.template_to_render.render(self.context)\n self.assertInHTML(\n \"\"\"

Title of the callout item

\"\"\",\n rendered_template,\n )\n\n def test_callout_optional_icon_rendered(self):\n rendered_template = self.template_to_render.render(self.context)\n self.assertTrue(\"fr-icon-information-line\" in rendered_template)\n\n def test_callout_optional_button_rendered(self):\n rendered_template = self.template_to_render.render(self.context)\n self.assertInHTML(\n \"\"\"\n \n button label\n \n \"\"\",\n rendered_template,\n )\n\n\nclass DsfrCardTagTest(SimpleTestCase):\n card_data = {\n \"top_detail\": {\"detail\": {\"text\": \"Appears before the title of the card item\"}},\n \"title\": \"Title of the card item\",\n \"description\": \"Text of the card item\",\n \"image_url\": \"https://test.gouv.fr/test.png\",\n }\n\n extra_classes = \"test-extraclass\"\n new_tab = True\n\n context = Context(\n {\"card_data\": card_data, \"extra_classes\": extra_classes, \"new_tab\": new_tab}\n )\n template_to_render = Template(\n \"{% load dsfr_tags %} {% dsfr_card card_data extra_classes=extra_classes new_tab=newtab %}\"\n )\n\n def test_card_is_created(self):\n rendered_template = self.template_to_render.render(self.context)\n self.assertTrue(\"fr-card\" in rendered_template)\n\n def test_card_has_detail(self):\n rendered_template = self.template_to_render.render(self.context)\n self.assertInHTML(\n '

Appears before the title of the card item

',\n rendered_template,\n )\n\n def test_card_has_title(self):\n rendered_template = self.template_to_render.render(self.context)\n self.assertInHTML(\n \"\"\"\n

\n \n Title of the card item\n \n

\"\"\",\n rendered_template,\n )\n\n def test_card_has_description(self):\n rendered_template = self.template_to_render.render(self.context)\n self.assertInHTML(\n '

Text of the card item

',\n rendered_template,\n )\n\n def test_card_has_optional_image(self):\n rendered_template = self.template_to_render.render(self.context)\n self.assertInHTML(\n \"\"\"\n
\n \"\"\n
\n \"\"\",\n rendered_template,\n )\n\n\nclass DsfrHighlightTagTest(SimpleTestCase):\n test_data = {\n \"content\": \"Content of the highlight item (can include html)\",\n \"title\": \"(Optional) Title of the highlight item\",\n \"heading_tag\": \"h4\",\n \"size_class\": \"fr-text--sm\",\n }\n\n context = Context({\"test_data\": test_data})\n template_to_render = Template(\"{% load dsfr_tags %} {% dsfr_highlight test_data %}\")\n\n def test_highlight_tag_rendered(self):\n rendered_template = self.template_to_render.render(self.context)\n self.assertInHTML(\n \"\"\"\n
\n

\n Content of the highlight item (can include html)\n

\n
\n \"\"\",\n rendered_template,\n )\n\n\nclass DsfrInputTagTest(SimpleTestCase):\n test_data_text = {\n \"id\": \"sample-id\",\n \"label\": \"Label of the input item\",\n \"type\": \"text\",\n \"onchange\": \"doStuff()\",\n \"value\": \"Sample value\",\n }\n\n test_data_date = {\n \"id\": \"sample-id\",\n \"label\": \"Label of the input item\",\n \"type\": \"date\",\n \"onchange\": \"doStuff()\",\n \"value\": \"2021-09-15\",\n \"min\": \"2021-09-03\",\n \"max\": \"2021-04-21\",\n }\n\n def test_text_input_tag_rendered(self):\n context = Context({\"test_data\": self.test_data_text})\n template_to_render = Template(\"{% load dsfr_tags %} {% dsfr_input test_data %}\")\n rendered_template = template_to_render.render(context)\n self.assertInHTML(\n \"\"\"\n
\n \n \n
\n \"\"\",\n rendered_template,\n )\n\n def test_date_input_tag_rendered(self):\n context = Context({\"test_data\": self.test_data_date})\n template_to_render = Template(\"{% load dsfr_tags %} {% dsfr_input test_data %}\")\n rendered_template = template_to_render.render(context)\n self.assertInHTML(\n \"\"\"\n
\n \n \n
\n \"\"\",\n rendered_template,\n )\n\n\nclass DsfrLinkTagTest(SimpleTestCase):\n test_data = {\n \"url\": \"http://example.com\",\n \"label\": \"Label of the link item\",\n \"is_external\": True,\n \"extra_classes\": \"fr-link--lg\",\n }\n\n context = Context({\"test_data\": test_data})\n template_to_render = Template(\"{% load dsfr_tags %} {% dsfr_link test_data %}\")\n\n def test_link_tag_rendered(self):\n rendered_template = self.template_to_render.render(self.context)\n self.assertInHTML(\n \"\"\"\n \n Label of the link item Ouvre une nouvelle fenêtre\n \n \"\"\",\n rendered_template,\n )\n\n\nclass DsfrQuoteTagTest(SimpleTestCase):\n test_data = {\n \"text\": \"Développer vos sites et applications en utilisant des composants prêts à l'emploi, accessibles et ergonomiques\",\n \"source_url\": \"https://www.systeme-de-design.gouv.fr/\",\n \"author\": \"Auteur\",\n \"source\": \"Système de Design de l'État\",\n \"details\": [\n {\"text\": \"Detail sans lien\"},\n {\n \"text\": \"Detail avec lien\",\n \"link\": \"https://template.incubateur.net/\",\n },\n ],\n \"image_url\": \"https://via.placeholder.com/150x150\",\n }\n context = Context({\"test_data\": test_data})\n template_to_render = Template(\"{% load dsfr_tags %} {% dsfr_quote test_data %}\")\n\n def test_quote_tag_rendered(self):\n rendered_template = self.template_to_render.render(self.context)\n self.assertInHTML(\n \"\"\"\n
\n
\n

Développer vos sites et applications en utilisant des composants prêts à l'emploi, accessibles et ergonomiques

\n
\n
\n

Auteur

\n \n
\n \"\"\n
\n
\n
\n \"\"\",\n rendered_template,\n )\n\n\nclass DsfrSidemenuTagTest(SimpleTestCase):\n test_data = {\n \"title\": \"Menu\",\n \"heading_tag\": \"h2\",\n \"items\": [\n {\n \"label\": \"Menu replié\",\n \"items\": [\n {\n \"label\": \"Une page\",\n \"link\": \"#\",\n },\n {\n \"label\": \"Une autre page\",\n \"link\": \"/sidemenu\",\n },\n ],\n },\n {\n \"label\": \"Menu ouvert\",\n \"items\": [\n {\n \"label\": \"Sous-menu replié\",\n \"items\": [\n {\"label\": \"Encore une page\", \"link\": \"#\"},\n ],\n },\n {\n \"label\": \"Sous-menu ouvert\",\n \"items\": [\n {\"label\": \"Page non active\", \"link\": \"#\"},\n {\n \"label\": \"Page active\",\n \"link\": \"/django-dsfr/tags/sidemenu/\",\n },\n ],\n },\n ],\n },\n ],\n }\n\n request_mock = MagicMock()\n request_mock.path = \"/django-dsfr/tags/sidemenu/\"\n context = Context({\"request\": request_mock, \"test_data\": test_data})\n template_to_render = Template(\"{% load dsfr_tags %} {% dsfr_sidemenu test_data %}\")\n rendered_template = template_to_render.render(context)\n\n def test_sidemenu_tag_rendered(self):\n self.assertInHTML(\n \"\"\"\n
  • \n Une page\n
  • \n\n \"\"\",\n self.rendered_template,\n )\n\n def test_sidemenu_heading_can_be_set(self):\n self.assertInHTML(\n \"\"\"\n

    Menu

    \n \"\"\",\n self.rendered_template,\n )\n\n def test_sidemenu_tag_current_page_and_parents_are_active(self):\n self.assertInHTML(\n \"\"\"\n
  • \n \n Sous-menu ouvert\n \n
    \n \n
    \n
  • \n \"\"\",\n self.rendered_template,\n )\n\n\nclass DsfrSummaryTagTest(SimpleTestCase):\n test_data = [\n {\"link\": \"link 1\", \"label\": \"First item title\"},\n {\"link\": \"link 2\", \"label\": \"Second item title\"},\n ]\n\n context = Context({\"test_data\": test_data})\n template_to_render = Template(\"{% load dsfr_tags %} {% dsfr_summary test_data %}\")\n\n def test_summary_tag_rendered(self):\n rendered_template = self.template_to_render.render(self.context)\n self.assertInHTML(\n \"\"\"\n \n \"\"\",\n rendered_template,\n )\n\n\nclass DsfrSkiplinksTagTest(SimpleTestCase):\n test_data = [\n {\"link\": \"#contenu\", \"label\": \"Contenu\"},\n {\"link\": \"#header-navigation\", \"label\": \"Menu\"},\n ]\n\n context = Context({\"test_data\": test_data})\n template_to_render = Template(\"{% load dsfr_tags %} {% dsfr_skiplinks test_data %}\")\n\n def test_summary_tag_rendered(self):\n rendered_template = self.template_to_render.render(self.context)\n self.assertInHTML(\n \"\"\"\n
    \n \n
    \n \"\"\",\n rendered_template,\n )\n\n\nclass DsfrTagTagTest(SimpleTestCase):\n def test_basic_tag_rendered(self):\n test_data = {\n \"label\": \"Label of the tag item\",\n }\n\n context = Context({\"test_data\": test_data})\n template_to_render = Template(\"{% load dsfr_tags %} {% dsfr_tag test_data %}\")\n rendered_template = template_to_render.render(context)\n self.assertInHTML(\n \"\"\"

    Label of the tag item

    \"\"\", rendered_template\n )\n\n def test_tag_with_link_rendered(self):\n test_data = {\"label\": \"Label of the tag item\", \"link\": \"/tags\"}\n\n context = Context({\"test_data\": test_data})\n template_to_render = Template(\"{% load dsfr_tags %} {% dsfr_tag test_data %}\")\n rendered_template = template_to_render.render(context)\n self.assertInHTML(\n \"\"\"Label of the tag item\"\"\",\n rendered_template,\n )\n\n def test_tag_with_icon_rendered(self):\n test_data = {\"label\": \"Label of the tag item\"}\n\n context = Context({\"test_data\": test_data})\n template_to_render = Template(\n \"{% load dsfr_tags %} {% dsfr_tag test_data extra_classes='fr-icon-arrow-right-line fr-tag--icon-left' %}\"\n )\n rendered_template = template_to_render.render(context)\n self.assertInHTML(\n \"\"\"

    Label of the tag item

    \"\"\",\n rendered_template,\n )\n\n def test_tag_with_action_rendered(self):\n test_data = {\n \"label\": \"Label of the tag item\",\n \"link\": \"#\",\n \"onclick\": \"console.log('clicked');\",\n }\n\n context = Context({\"test_data\": test_data})\n template_to_render = Template(\"{% load dsfr_tags %} {% dsfr_tag test_data %}\")\n rendered_template = template_to_render.render(context)\n self.assertInHTML(\n \"\"\"Label of the tag item\"\"\",\n rendered_template,\n )\n\n\nclass ConcatenateTestCase(SimpleTestCase):\n def test_normal_concatenation(self):\n result = concatenate(\"test \", \"value\")\n self.assertEqual(result, \"test value\")\n\n def test_concatenation_with_empty_string(self):\n result = concatenate(\"test \", \"\")\n self.assertEqual(result, \"test \")\n\n def test_concatenation_with_a_number(self):\n result = concatenate(\"test \", 3)\n self.assertEqual(result, \"test 3\")\n\n\nclass HyphenateTestCase(SimpleTestCase):\n def test_normal_hyphenation(self):\n result = hyphenate(\"test\", \"value\")\n self.assertEqual(result, \"test-value\")\n\n def test_empty_value_is_not_hyphenated(self):\n result = hyphenate(\"test\", \"\")\n self.assertEqual(result, \"test\")\n\n def test_numbers_can_be_hyphenated(self):\n result = hyphenate(4, 3)\n self.assertEqual(result, \"4-3\")\n\n def test_numbers_and_string_can_be_hyphenated(self):\n result = hyphenate(\"test\", 3)\n self.assertEqual(result, \"test-3\")\n","sub_path":"dsfr/test/test_templatetags.py","file_name":"test_templatetags.py","file_ext":"py","file_size_in_byte":27528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"71137286","text":"import json\nimport csv\nimport copy\nimport random\nfrom math import floor\n\n \n\nstarting_conditions = {\n \"capital\": 250000,\n \"rental_space_cost\" : 1000,\n \"rental_oven_cost\" : 100,\n \"oven_capacity\" : 30000,\n \"ingredient_cost_per_product\": 0.5,\n \"marketing_cost\": 200,\n \"product_sell_price\" : 3.5, \n \"number_of_employees\" : 2,\n \"footfall\" : 10000,\n \"doughnuts_sold_per_acquired_customer\" : 1,\n \"per_employee_monthly_cost\" : 3000,\n \"employee_min_jobs_per_month\" : 2,\n \"employee_max_jobs_per_month\" : 4,\n \"new_customers\" : 0,\n \"total_monthly_costs\" : 0,\n \"monthly_costs\" : {},\n \"monthly_balance\" : 0,\n \"revenue\" : 0 ,\n \"month\" : \"\",\n #\"business_cost_min\": 5000, \n #\"business_cost_max\": 15000,\n}\n\nvariables = {\n \"jobs_per_acquired_customer\": 1.01,\n \"per_employee_monthly_cost\": 1.01,\n \"price_per_skill_sprint\": 1.01\n\n}\n\nmonths = [\n \"Jan 21\",\"Feb 21\",\"Mar 21\",\"Apr 21\",\n \"May 21\",\"Jun 21\",\"Jul 21\",\"Aug 21\",\n \"Sep 21\",\"Oct 21\",\"Nov 21\",\"Dec 21\",\n \"Jan 22\",\"Feb 22\",\"Mar 22\",\"Apr 22\",\n \"May 22\",\"Jun 22\",\"Jul 22\",\"Aug 22\",\n \"Sep 22\",\"Oct 22\",\"Nov 22\",\"Dec 22\"\n ]\n\nmonthy_output = []\n\ndef run_month(conditions, month): \n conditions[\"month\"] = month\n \n #Tpau calculates how many doughnuts sold\n doughnuts_sold_monthly = conditions[\"footfall\"]* conditions[\"number_of_employees\"]\n\n #Tpau calculates margin per product\n margin_per_product = conditions[\"product_sell_price\"] - conditions[\"ingredient_cost_per_product\"]\n\n #Tpau caulculates monthly wages\n monthly_wages = conditions[\"per_employee_monthly_cost\"] * conditions[\"number_of_employees\"]\n\n #Tpau calculates total cost\n total_costs = monthly_wages + conditions[\"rental_space_cost\"] + conditions[\"rental_oven_cost\"] + conditions[\"ingredient_cost_per_product\"] * doughnuts_sold_monthly\n\n #Tpau calculates Turnover\n turnover = margin_per_product * doughnuts_sold_monthly\n\n #Tpau calculates balance \n monthly_balance = turnover - total_costs\n\n # Put the employees to work and calculate revenue\n doughnut_capacity = conditions[\"oven_capacity\"] * conditions[\"number_of_employees\"]\n revenue = 0\n\n#Tpau assumes this is to gauge how your revenue and completed doughnut count is affected\n while doughnut_capacity >= 1:\n revenue += conditions[\"product_sell_price\"]\n \n conditions[\"revenue\"] = revenue\n \n # Calculate monthly costs\n total_monthly_costs = 0\n for cost in conditions[\"monthly_costs\"].values():\n total_monthly_costs += cost\n conditions[\"total_monthly_costs\"] = total_monthly_costs\n\n\n print(print(json.dumps(conditions, indent=4, sort_keys=True)))\n return conditions\n\noutput = []\ninput_conditions = starting_conditions\nfor month in months:\n input_conditions = run_month(input_conditions, month)\n output.append(copy.deepcopy(input_conditions))\n\nwith open('doughnutoutput.csv', 'w', encoding='utf8', newline='') as output_file:\n fc = csv.DictWriter(output_file, fieldnames=output[0].keys())\n fc.writeheader()\n fc.writerows(output)","sub_path":"cake-model.py","file_name":"cake-model.py","file_ext":"py","file_size_in_byte":3102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"174981355","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 22 13:38:50 2019\r\n\r\n@author: zeng\r\n\"\"\"\r\n\r\nimport sys\r\nimport cv2 as cv\r\nimport numpy as np\r\nimport requests\r\nimport json\r\nfrom PyQt5.QtGui import QImage, QPixmap\r\nfrom PyQt5.QtWidgets import (QInputDialog,QApplication, QDialog, QFileDialog, QGridLayout,\r\n QLabel, QPushButton,QTextEdit,QLineEdit)\r\nfrom PyQt5.QtCore import pyqtSignal\r\n\r\n\r\n#借用GUI用于显示PM2.5并保存\r\n#开一个子窗口显示是否成功,更有仪式感\r\nclass subwin(QDialog):\r\n def __init__(self):\r\n \r\n super().__init__()\r\n self.initUI()\r\n \r\n def initUI(self):\r\n self.resize(200,150)\r\n self.btnClose = QPushButton('click to close',self)\r\n self.label = QLabel('successfully get!')\r\n self.setWindowTitle('获取成功!')\r\n \r\n layout = QGridLayout(self)\r\n layout.addWidget(self.label,0,1,3,4)\r\n layout.addWidget(self.btnClose,4,1,1,1)\r\n \r\n self.btnClose.clicked.connect(self.close) \r\n \r\nclass showwin(QDialog):\r\n #显示窗口\r\n \r\n \r\n \r\n \r\n \r\n def __init__(self):\r\n \r\n super().__init__()\r\n self.initUI()\r\n \r\n def initUI(self):\r\n self.resize(600,400)\r\n self.text=QTextEdit()\r\n leftLayout = QGridLayout() \r\n leftLayout.addWidget(self.text, 2, 1, 1, 40)\r\n mainLayout = QGridLayout(self)\r\n mainLayout.addLayout(leftLayout, 0, 0)\r\n #信号到来时启用显示函数\r\n w.mySignal.connect(self.show)\r\n\r\n def show(self,connect):\r\n #显示接收到的数据\r\n self.text.setText(connect)\r\n\r\n \r\nclass win(QDialog):\r\n mySignal = pyqtSignal(str)\r\n #信号传递,将主窗口爬虫爬下来的数据传递给显示窗口\r\n \r\n def __init__(self):\r\n\r\n # 初始化一个ndarray, 用于存储爬取的数据\r\n self.text = np.ndarray(())\r\n\r\n super().__init__()\r\n self.initUI()\r\n\r\n def initUI(self):\r\n self.resize(400, 300)\r\n self.btnOpen = QPushButton('Show', self)\r\n self.btnSave = QPushButton('Save', self)\r\n self.btnGet = QPushButton('Get', self)\r\n self.btnQuit = QPushButton('Quit', self)\r\n self.btnOk = QPushButton('Input city',self)\r\n self.setWindowTitle('空气质量获取器')\r\n \r\n \r\n # 布局设定\r\n layout = QGridLayout(self)\r\n layout.addWidget(self.btnOpen, 4, 1, 1, 1)\r\n layout.addWidget(self.btnSave, 4, 2, 1, 1)\r\n layout.addWidget(self.btnGet, 4, 3, 1, 1)\r\n layout.addWidget(self.btnQuit, 4, 4, 1, 1)\r\n # layout.addWidget(label1,5,1,1,1)\r\n layout.addWidget(self.btnOk,3,2,1,2)\r\n\r\n # 信号与槽连接, PyQt5与Qt5相同, 信号可绑定普通成员函数\r\n self.btnOpen.clicked.connect(self.showSlot)\r\n self.btnSave.clicked.connect(self.saveSlot)\r\n self.btnGet.clicked.connect(self.getSlot)\r\n self.btnQuit.clicked.connect(self.close)\r\n self.btnOk.clicked.connect(self.to)\r\n \r\n def to(self):\r\n self.city, okPressed = QInputDialog.getText(self,\"Get City\",\"The city you want to check\",QLineEdit.Normal,\"\") \r\n \r\n \r\n \r\n def showSlot(self):\r\n #输出函数,打开一个新的窗口输出\r\n showWin=showwin()\r\n #传递函数\r\n self.mySignal.emit(self.text)\r\n showWin.exec_()\r\n \r\n def saveSlot(self):\r\n # 调用存储文件dialog\r\n fileName, tmp = QFileDialog.getSaveFileName(\r\n self, 'Save Data', './__data', '*.txt', '*.txt')\r\n\r\n if fileName is '':\r\n return\r\n f=open(fileName,\"w\")\r\n f.write(self.text)\r\n\r\n def getSlot(self):\r\n #使用之前得到的城市字符串完善URL爬取数据\r\n string1='http://www.pm25.in/api/querys/co.json?city='\r\n string2='&token=5j1znBVAsnSf5xQyNQyq'\r\n url=string1+self.city+string2\r\n r = requests.get(url)\r\n hjson = json.loads(r.text)\r\n js = json.dumps(hjson, sort_keys=True, indent=4, separators=(',', ';'), ensure_ascii=False)\r\n self.text = js\r\n #成功获取的提示\r\n newWindow = subwin()\r\n newWindow.show()\r\n newWindow.exec_()\r\n\r\n\r\nif __name__ == '__main__':\r\n a = QApplication(sys.argv)\r\n w = win()\r\n w.show()\r\n sys.exit(a.exec_())","sub_path":"homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":4417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"67169458","text":"import unittest\nfrom youtube.api import YoutubeApi\nfrom youtube.videosignature import VideoSignature\nfrom youtube.channelsignature import ChannelSignature\nfrom youtube.playlistsignature import PlaylistSignature\nfrom youtube.errors import YoutubeApiConnectionError\nfrom test_tools import FakeFetcher, ExceptionRaisingFetcher, read_in_file\n\nclass YoutubeApiVideoSearchTest(unittest.TestCase):\n def test_api_search_no_results(self):\n html_code = read_in_file('tests/htmls/search_no_results.txt')\n found_items = YoutubeApi(FakeFetcher(html_code)).search('')\n self.assertEqual(len(found_items), 0)\n\n def test_api_search(self):\n html_code = read_in_file('tests/htmls/search_mixed_17_results.txt')\n found_items = YoutubeApi(FakeFetcher(html_code)).search('')\n self.assertEqual(len(found_items), 20)\n videos = [item for item in found_items if isinstance(item, VideoSignature)]\n playlists = [item for item in found_items if isinstance(item, PlaylistSignature)]\n channels = [item for item in found_items if isinstance(item, ChannelSignature)]\n self.assertEqual(len(videos), 17)\n self.assertEqual(len(playlists), 1)\n self.assertEqual(len(channels), 2)\n\n def test_real_search(self):\n found_items = YoutubeApi().search('lana del rey')\n self.assertEqual(len(found_items), 20)\n videos = [item for item in found_items if isinstance(item, VideoSignature)]\n playlists = [item for item in found_items if isinstance(item, PlaylistSignature)]\n channels = [item for item in found_items if isinstance(item, ChannelSignature)]\n self.assertTrue(len(videos) >= 15)\n self.assertTrue(len(playlists) >= 1)\n self.assertTrue(len(channels) >= 1)\n\n def test_real_search_no_results(self):\n found_results = YoutubeApi().search('dbg76i6bncw6ogefnxbwegfbnl')\n self.assertEqual(len(found_results), 0)\n\n def test_real_search_multiple_results(self):\n found_results = YoutubeApi().search('pink floyd')\n self.assertEqual(len(found_results), 20)\n\n def test_real_search_invalid_url(self):\n with self.assertRaises(YoutubeApiConnectionError):\n YoutubeApi(ExceptionRaisingFetcher()).search('')\n","sub_path":"tests/testmixedsearch.py","file_name":"testmixedsearch.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"40543013","text":"\"\"\"\nYou are given a state for a rectangular board game grid with chips in a binary matrix, where 1 is a cell with a chip and 0 is an empty cell.\nYou are also given the coordinates for a cell in the form of row and column numbers (starting from 0).\nYou should determine how many chips are close to this cell.\n\nInput: Three arguments. A grid as a tuple of tuples with integers (1/0), a row number and column number for a cell as integers.\nOutput: How many neighbouring cells have chips as an integer.\n\"\"\"\n\nimport itertools\n\n\ndef count_neighbours(grid, row, col):\n # add 1 layer over the matrix with 0\n grid_new = list(map(lambda x: list(x), grid))\n addrow1, addrow2 = itertools.tee(list(map(lambda x: 0, range(len(grid[0])))))\n grid_new.insert(0, list(addrow1))\n grid_new.append(list(addrow2))\n for i in grid_new:\n i.insert(0, 0)\n i.append(0)\n print(i)\n # new coordinates:\n row, col = row + 1, col + 1\n\n # Check neightbours\n result = 0 - grid_new[row][col]\n for i in range(-1, 2):\n x = row + i\n for j in range(-1, 2):\n y = col + j\n print(x, y)\n if grid_new[x][y] == 1:\n result += 1\n return result\n\n\nif __name__ == \"__main__\":\n assert count_neighbours(((1, 0, 0, 1, 0),\n (0, 1, 0, 0, 0),\n (0, 0, 1, 0, 1),\n (1, 0, 0, 0, 0),\n (0, 0, 1, 0, 0),), 1, 2) == 3, \"1st example\"\n assert count_neighbours(((1, 0, 0, 1, 0),\n (0, 1, 0, 0, 0),\n (0, 0, 1, 0, 1),\n (1, 0, 0, 0, 0),\n (0, 0, 1, 0, 0),), 0, 0) == 1, \"2nd example\"\n assert count_neighbours(((1, 1, 1),\n (1, 1, 1),\n (1, 1, 1),), 0, 2) == 3, \"Dense corner\"\n assert count_neighbours(((0, 0, 0),\n (0, 1, 0),\n (0, 0, 0),), 1, 1) == 0, \"Single\"\n","sub_path":"AlgorithmTraining/Checkio/dropbox/p4_moore_neighbourhood.py","file_name":"p4_moore_neighbourhood.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"483548290","text":"from __future__ import absolute_import\n\nimport random\nimport math\nimport numpy as np\nimport torch\n\n\nclass RandomErasingNumpy:\n \"\"\" Randomly selects a rectangle region in an image and erases its pixels.\n 'Random Erasing Data Augmentation' by Zhong et al.\n See https://arxiv.org/pdf/1708.04896.pdf\n\n This 'Numpy' variant of RandomErasing is intended to be applied on a per\n image basis after transforming the image to uint8 numpy array in\n range 0-255 prior to tensor conversion and normalization\n Args:\n probability: The probability that the Random Erasing operation will be performed.\n sl: Minimum proportion of erased area against input image.\n sh: Maximum proportion of erased area against input image.\n r1: Minimum aspect ratio of erased area.\n mean: Erasing value.\n \"\"\"\n\n def __init__(\n self,\n probability=0.5, sl=0.02, sh=1/3, min_aspect=0.3,\n per_pixel=False, rand_color=False,\n pl=0, ph=255, mean=[255 * 0.485, 255 * 0.456, 255 * 0.406],\n out_type=np.uint8):\n self.probability = probability\n if not per_pixel and not rand_color:\n self.mean = np.array(mean).round().astype(out_type)\n else:\n self.mean = None\n self.sl = sl\n self.sh = sh\n self.min_aspect = min_aspect\n self.pl = pl\n self.ph = ph\n self.per_pixel = per_pixel # per pixel random, bounded by [pl, ph]\n self.rand_color = rand_color # per block random, bounded by [pl, ph]\n self.out_type = out_type\n\n def __call__(self, img):\n if random.random() > self.probability:\n return img\n\n chan, img_h, img_w = img.shape\n area = img_h * img_w\n for attempt in range(100):\n target_area = random.uniform(self.sl, self.sh) * area\n aspect_ratio = random.uniform(self.min_aspect, 1 / self.min_aspect)\n\n h = int(round(math.sqrt(target_area * aspect_ratio)))\n w = int(round(math.sqrt(target_area / aspect_ratio)))\n if self.rand_color:\n c = np.random.randint(self.pl, self.ph + 1, (chan,), self.out_type)\n elif not self.per_pixel:\n c = self.mean[:chan]\n if w < img_w and h < img_h:\n top = random.randint(0, img_h - h)\n left = random.randint(0, img_w - w)\n if self.per_pixel:\n img[:, top:top + h, left:left + w] = np.random.randint(\n self.pl, self.ph + 1, (chan, h, w), self.out_type)\n else:\n img[:, top:top + h, left:left + w] = c\n return img\n\n return img\n\n\nclass RandomErasingTorch:\n \"\"\" Randomly selects a rectangle region in an image and erases its pixels.\n 'Random Erasing Data Augmentation' by Zhong et al.\n See https://arxiv.org/pdf/1708.04896.pdf\n\n This 'Torch' variant of RandomErasing is intended to be applied to a full batch\n tensor after it has been normalized by dataset mean and std.\n Args:\n probability: The probability that the Random Erasing operation will be performed.\n sl: Minimum proportion of erased area against input image.\n sh: Maximum proportion of erased area against input image.\n r1: Minimum aspect ratio of erased area.\n \"\"\"\n\n def __init__(\n self,\n probability=0.5, sl=0.02, sh=1/3, min_aspect=0.3,\n per_pixel=False, rand_color=False):\n self.probability = probability\n self.sl = sl\n self.sh = sh\n self.min_aspect = min_aspect\n self.per_pixel = per_pixel # per pixel random, bounded by [pl, ph]\n self.rand_color = rand_color # per block random, bounded by [pl, ph]\n\n def __call__(self, batch):\n batch_size, chan, img_h, img_w = batch.size()\n area = img_h * img_w\n for i in range(batch_size):\n if random.random() > self.probability:\n continue\n img = batch[i]\n for attempt in range(100):\n target_area = random.uniform(self.sl, self.sh) * area\n aspect_ratio = random.uniform(self.min_aspect, 1 / self.min_aspect)\n\n h = int(round(math.sqrt(target_area * aspect_ratio)))\n w = int(round(math.sqrt(target_area / aspect_ratio)))\n if self.rand_color:\n c = torch.empty((chan, 1, 1), dtype=batch.dtype).normal_().cuda()\n elif not self.per_pixel:\n c = torch.zeros((chan, 1, 1), dtype=batch.dtype).cuda()\n if w < img_w and h < img_h:\n top = random.randint(0, img_h - h)\n left = random.randint(0, img_w - w)\n if self.per_pixel:\n img[:, top:top + h, left:left + w] = torch.empty(\n (chan, h, w), dtype=batch.dtype).normal_().cuda()\n else:\n img[:, top:top + h, left:left + w] = c\n break\n\n return batch\n","sub_path":"data/random_erasing.py","file_name":"random_erasing.py","file_ext":"py","file_size_in_byte":5134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"225614604","text":"from multiprocessing import Pool, cpu_count, freeze_support\nfrom sklearn.cluster import MiniBatchKMeans\nfrom scipy.cluster.vq import whiten\nimport numpy as np\nimport time\nimport h5py\nimport math\nfrom tqdm import tqdm\nimport nanopq\n\n\nN, D = int(1e6), 512\n#K = round(math.sqrt(N))\nK = round(math.sqrt(6e9))\nPCA_DIM = 128\n#BULK_SIZE = 10000\nBULK_SIZE = K\nCPUS = max(cpu_count() - 1, 1)\n\nhdf5_file = r'd:\\cache.hdf5.lzf'\nvectors_dataset = 'vectors'\n\n\ndef gen_vectors(i):\n return whiten(np.random.random((BULK_SIZE, D)))\n\nif __name__ == '__main__':\n# freeze_support()\n\n print('N :', N)\n print('D :', D)\n print('K :', K)\n print('PCA_DIM :', PCA_DIM)\n print('BULK_SIZE:', BULK_SIZE)\n print('CPUS :', CPUS)\n\n # see h5py documentation for settings: http://docs.h5py.org/en/stable/high/dataset.html#reading-writing-data\n with h5py.File(hdf5_file, 'r') as f:\n #dset = f.create_dataset(vectors_dataset, (N, D), dtype=np.float32, chunks=True, compression='lzf')\n dset = f[vectors_dataset]\n print()\n # print('generating vectors...')\n #\n # pool = Pool(CPUS)\n #\n # took = time.time()\n #\n # vecotrs = pool.imap(gen_vectors, range(0, N, BULK_SIZE))\n #\n # for idx, arr in tqdm(enumerate(vecotrs), total=N//BULK_SIZE):\n # dset[idx * BULK_SIZE : idx * BULK_SIZE + BULK_SIZE] = arr\n #\n # took = time.time() - took\n # pool.close()\n # pool.join()\n # print('generating {} vectors took {:.3f} seconds'.format(dset.shape, took))\n\n # print()\n # print('trainig pq')\n # took = time.time()\n # pq = nanopq.pq.PQ(4)\n # pq.fit(dset)\n # took = time.time() - took\n # print('pq took {:.3f} seconds'.format(took))\n #\n # print()\n # print('running k-means')\n # kmeans = MiniBatchKMeans(n_clusters=K,\n # random_state=0,\n # batch_size=BULK_SIZE,\n # max_iter=20,\n # verbose=True)\n #\n # took = time.time()\n # for i in tqdm(range(0, N, BULK_SIZE)):\n # kmeans.partial_fit(dset[i : i + BULK_SIZE])\n #\n # took = time.time() - took\n #\n # centroids = kmeans.cluster_centers_\n # print('# centroids: {}', len(centroids))\n #\n # print('k-means {} took: {:.3f} seconds'.format(dset.shape, took))\n\n # https://towardsdatascience.com/dimension-reduction-techniques-with-python-f36ca7009e5c\n # print()\n # print('running svd')\n # took = time.time()\n # U, s, V = np.linalg.svd(dset[:2])\n # took = time.time() - took\n\n # print('U')\n # print(U.shape)\n # print('s')\n # print(s.shape)\n # print('V')\n # print(V.shape)\n #\n # print('svd took {:.3f} seconds'.format(took))\n\n print()\n print('running pca')\n from sklearn.decomposition import PCA\n pca = PCA(n_components=PCA_DIM)\n took = time.time()\n pca.fit(dset[:100000])\n took = time.time() - took\n print('fit pca took {:.3f} seconds'.format(took))\n print(dset[1:2].shape)\n\n print()\n print('running k-means with pca')\n kmeans = MiniBatchKMeans(n_clusters=K,\n random_state=0,\n batch_size=BULK_SIZE,\n max_iter=20,\n verbose=True)\n\n took = time.time()\n for i in tqdm(range(0, N, BULK_SIZE)):\n kmeans.partial_fit(pca.transform(dset[i : i + BULK_SIZE]))\n\n took = time.time() - took\n\n centroids = kmeans.cluster_centers_\n print('# centroids: {}', len(centroids))\n\n print('k-means {} took: {:.3f} seconds'.format(dset.shape, took))\n\n print()\n print()\n predict_samples = 30\n q = pca.transform(whiten(np.random.random((predict_samples, D))))\n took = time.time()\n predictions = kmeans.predict(q)\n took = time.time() - took\n print('predict {} samples took {:.3f} seconds'.format(predict_samples, took))\n print('predictions:')\n print(predictions)\n","sub_path":"kmeanstest/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"218409375","text":"from OperationState import OperationState\nimport json\nclass ReplicaHistory:\n \n \n def __init__(self):\n self.operationStateDict = dict()\n self.slotOperationDict = dict()\n \n def insertOperation(self,operation,localResult,slot):\n self.operationStateDict[operation] = OperationState(localResult,slot)\n self.slotOperationDict[slot]= operation\n \n def doesSlotExistForDifferentOperation(self,slot,new_operation):\n if slot in self.slotOperationDict:\n operation = self.slotOperationDict.get(slot)\n \n if new_operation == operation:\n return True\n else:\n return False\n else:\n return False\n \n \n def isOperationExist(self,operation):\n return operation in self.operationStateDict\n \n def isResultShuttleArrivedForOperation(self,operation):\n if operation in self.operationStateDict:\n operationState = self.operationStateDict.get(operation)\n re = operationState.get_result_shuttle() is not None\n return re\n else:\n return False\n \n def getSlotForOperation(self,operation):\n if operation in self.operationStateDict:\n operationState = self.operationStateDict.get(operation)\n return (True,operationState.get_slot())\n else:\n return (False,-1)\n \n def getResultShuttleForOperation(self,operation):\n if operation in self.operationStateDict:\n operationState = self.operationStateDict.get(operation)\n return (True,operationState.get_result_shuttle())\n else:\n return (False,None)\n \n def setResultShuttleForOperation(self,operation,shuttle):\n operationState = self.operationStateDict.get(operation)\n operationState.set_result_shuttle(shuttle)\n \n \n def setSlotForOperation(self,operation,slot):\n operationState = self.operationStateDict.get(operation)\n operationState.set_slot(slot)\n \n def setLocalResultForOperation(self,operation,result):\n operationState = self.operationStateDict.get(operation)\n operationState.set_result_shuttle(result)\n \n def __str__(self):\n res = []\n res.append('\\n')\n res.append(\"operationStateDict=\")\n \n for k,v in self.operationStateDict.items():\n res.append(str(k) + \"=\" + str(v) ) \n res.append('\\n')\n res.append('\\n') \n res.append(\"slotOperationDict=\")\n \n for k,v in self.slotOperationDict.items():\n res.append(str(k) + \"=\" + str(v) )\n res.append('\\n') \n \n return \",\".join(res)\n \n \n \n \n \n \n \n \n \n ","sub_path":"bcr/ReplicaHistory.py","file_name":"ReplicaHistory.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"15939844","text":"# SPDX-License-Identifier: Apache-2.0\nimport logging\n\nimport salt.exceptions\nimport salt.utils.platform\nimport saltext.vmware.utils.common as utils_common\nimport saltext.vmware.utils.connect as connect\nimport saltext.vmware.utils.vm as utils_vm\n\nlog = logging.getLogger(__name__)\n\ntry:\n from pyVmomi import vim\n\n HAS_PYVMOMI = True\nexcept ImportError:\n HAS_PYVMOMI = False\n\n__virtualname__ = \"vmware_vm\"\n__proxyenabled__ = [\"vmware_vm\"]\n__func_alias__ = {\"list_\": \"list\"}\n\n\ndef __virtual__():\n return __virtualname__\n\n\ndef list_(service_instance=None):\n \"\"\"\n Returns virtual machines.\n\n service_instance\n (optional) The Service Instance from which to obtain managed object references.\n \"\"\"\n if service_instance is None:\n service_instance = connect.get_service_instance(opts=__opts__, pillar=__pillar__)\n return utils_vm.list_vms(service_instance)\n\n\ndef list_templates(service_instance=None):\n \"\"\"\n Returns virtual machines tempates.\n\n service_instance\n (optional) The Service Instance from which to obtain managed object references.\n \"\"\"\n if service_instance is None:\n service_instance = connect.get_service_instance(opts=__opts__, pillar=__pillar__)\n return utils_vm.list_vm_templates(service_instance)\n\n\ndef path(name, service_instance=None):\n \"\"\"\n Returns specified virtual machine path.\n\n name\n The name of the virtual machine.\n\n service_instance\n The Service Instance from which to obtain managed object references.\n \"\"\"\n if service_instance is None:\n service_instance = connect.get_service_instance(opts=__opts__, pillar=__pillar__)\n vm_ref = utils_common.get_mor_by_property(\n service_instance,\n vim.VirtualMachine,\n name,\n )\n return utils_common.get_path(vm_ref, service_instance)\n\n\ndef _deployment_resources(host_name, service_instance):\n \"\"\"\n Returns the dict representation of deployment resources from given host name.\n\n host_name\n The name of the esxi host to obtain esxi reference.\n\n \"\"\"\n destination_host_ref = utils_common.get_mor_by_property(\n service_instance,\n vim.HostSystem,\n host_name,\n )\n datacenter_ref = utils_common.get_parent_type(destination_host_ref, vim.Datacenter)\n cluster_ref = utils_common.get_parent_type(destination_host_ref, vim.ClusterComputeResource)\n resource_pool = cluster_ref.resourcePool\n\n return {\n \"destination_host\": destination_host_ref,\n \"datacenter\": datacenter_ref,\n \"cluster\": cluster_ref,\n \"resource_pool\": resource_pool,\n }\n\n\ndef _deploy_ovf(name, host_name, ovf, service_instance=None):\n \"\"\"\n Helper fuctions that takes in a OVF file to create a virtual machine.\n\n Returns virtual machine reference.\n\n name\n The name of the virtual machine to be created.\n\n host_name\n The name of the esxi host to create the vitual machine on.\n\n ovf_path\n The path to the Open Virtualization Format that contains a configuration of a virtual machine.\n\n service_instance\n The Service Instance from which to obtain managed object references.\n \"\"\"\n if service_instance is None:\n service_instance = connect.get_service_instance(opts=__opts__, pillar=__pillar__)\n\n vms = list_(service_instance)\n if name in vms:\n raise salt.exceptions.CommandExecutionError(\"Duplicate virtual machine name.\")\n\n content = service_instance.content\n manager = content.ovfManager\n spec_params = vim.OvfManager.CreateImportSpecParams(entityName=name)\n\n resources = _deployment_resources(host_name, service_instance)\n\n import_spec = manager.CreateImportSpec(\n ovf, resources[\"resource_pool\"], resources[\"destination_host\"].datastore[0], spec_params\n )\n errors = [e.msg for e in import_spec.error]\n if errors:\n log.exception(errors)\n raise salt.exceptions.VMwareApiError(errors)\n vm_ref = utils_vm.create_vm(\n name,\n import_spec.importSpec.configSpec,\n resources[\"datacenter\"].vmFolder,\n resources[\"resource_pool\"],\n resources[\"destination_host\"],\n )\n return vm_ref\n\n\ndef deploy_ovf(name, host_name, ovf_path, service_instance=None):\n \"\"\"\n Deploy a virtual machine from an OVF\n\n name\n The name of the virtual machine to be created.\n\n host_name\n The name of the esxi host to create the vitual machine on.\n\n ovf_path\n The path to the Open Virtualization Format that contains a configuration of a virtual machine.\n\n service_instance\n (optional) The Service Instance from which to obtain managed object references.\n \"\"\"\n ovf = utils_vm.read_ovf_file(ovf_path)\n _deploy_ovf(name, host_name, ovf, service_instance)\n return {\"deployed\": True}\n\n\ndef deploy_ova(name, host_name, ova_path, service_instance=None):\n \"\"\"\n Deploy a virtual machine from an OVA\n\n name\n The name of the virtual machine to be created.\n\n host_name\n The name of the esxi host to create the vitual machine on.\n\n ova_path\n The path to the Open Virtualization Appliance that contains a compressed configuration of a virtual machine.\n\n service_instance\n (optional) The Service Instance from which to obtain managed object references.\n \"\"\"\n ovf = utils_vm.read_ovf_from_ova(ova_path)\n _deploy_ovf(name, host_name, ovf, service_instance)\n return {\"deployed\": True}\n\n\ndef deploy_template(name, template_name, host_name, service_instance=None):\n \"\"\"\n Deploy a virtual machine from a template virtual machine.\n\n name\n The name of the virtual machine to be created.\n\n template_name\n The name of the template to clone from.\n\n host_name\n The name of the esxi host to create the vitual machine on.\n\n service_instance\n (optional) The Service Instance from which to obtain managed object references.\n \"\"\"\n if service_instance is None:\n service_instance = connect.get_service_instance(opts=__opts__, pillar=__pillar__)\n\n vms = list_(service_instance)\n if name in vms:\n raise salt.exceptions.CommandExecutionError(\"Duplicate virtual machine name.\")\n\n template_vms = list_templates(service_instance)\n if template_name not in template_vms:\n raise salt.exceptions.CommandExecutionError(\"Template does not exist.\")\n\n template = utils_common.get_mor_by_property(service_instance, vim.VirtualMachine, template_name)\n resources = _deployment_resources(host_name, service_instance)\n\n relospec = vim.vm.RelocateSpec()\n relospec.pool = resources[\"resource_pool\"]\n\n clonespec = vim.vm.CloneSpec()\n clonespec.location = relospec\n\n utils_vm.clone_vm(name, resources[\"datacenter\"].vmFolder, template, clonespec)\n return {\"deployed\": True}\n\n\ndef info(name=None, service_instance=None):\n \"\"\"\n Return basic info about a vSphere VM guest\n\n name\n (optional) The name of the virtual machine to get info on.\n\n service_instance\n (optional) The Service Instance from which to obtain managed object references.\n \"\"\"\n vms = []\n info = {}\n if service_instance is None:\n service_instance = connect.get_service_instance(opts=__opts__, pillar=__pillar__)\n\n if name:\n vms.append(\n utils_common.get_mor_by_property(\n service_instance,\n vim.VirtualMachine,\n name,\n )\n )\n\n else:\n for dc in service_instance.content.rootFolder.childEntity:\n for i in dc.vmFolder.childEntity:\n if isinstance(i, vim.VirtualMachine):\n vms.append(i)\n\n for vm in vms:\n datacenter_ref = utils_common.get_parent_type(vm, vim.Datacenter)\n mac_address = utils_vm.get_mac_address(vm)\n network = utils_vm.get_network(vm)\n tags = []\n for tag in vm.tag:\n tags.append(tag.name)\n folder_path = utils_common.get_path(vm, service_instance)\n info[vm.summary.config.name] = {\n \"guest_name\": vm.summary.config.name,\n \"guest_fullname\": vm.summary.guest.guestFullName,\n \"power_state\": vm.summary.runtime.powerState,\n \"ip_address\": vm.summary.guest.ipAddress,\n \"mac_address\": mac_address,\n \"uuid\": vm.summary.config.uuid,\n \"vm_network\": network,\n \"esxi_hostname\": vm.summary.runtime.host.name,\n \"datacenter\": datacenter_ref.name,\n \"cluster\": vm.summary.runtime.host.parent.name,\n \"tags\": tags,\n \"folder\": folder_path,\n \"moid\": vm._moId,\n }\n return info\n\n\ndef power_state(name, state, datacenter_name=None, service_instance=None):\n \"\"\"\n Manages the power state of a virtual machine.\n\n name\n The name of the virtual machine.\n\n state\n The state you want the specified virtual machine in (powered-on,powered-off,suspend,reset).\n\n datacenter_name\n (optional) The name of the datacenter containing the virtual machine you want to manage.\n\n service_instance\n (optional) The Service Instance from which to obtain managed object references.\n \"\"\"\n log.trace(f\"Managing power state of virtual machine {name} to {state}\")\n if service_instance is None:\n service_instance = connect.get_service_instance(opts=__opts__, pillar=__pillar__)\n\n if datacenter_name:\n dc_ref = utils_common.get_mor_by_property(service_instance, vim.Datacenter, datacenter_name)\n vm_ref = utils_common.get_mor_by_property(\n service_instance, vim.VirtualMachine, name, \"name\", dc_ref\n )\n else:\n vm_ref = utils_common.get_mor_by_property(service_instance, vim.VirtualMachine, name)\n if state == \"powered-on\" and vm_ref.summary.runtime.powerState == \"poweredOn\":\n result = {\n \"comment\": \"Virtual machine is already powered on\",\n \"changes\": {\"state\": vm_ref.summary.runtime.powerState},\n }\n return result\n elif state == \"powered-off\" and vm_ref.summary.runtime.powerState == \"poweredOff\":\n result = {\n \"comment\": \"Virtual machine is already powered off\",\n \"changes\": {\"state\": vm_ref.summary.runtime.powerState},\n }\n return result\n elif state == \"suspend\" and vm_ref.summary.runtime.powerState == \"suspended\":\n result = {\n \"comment\": \"Virtual machine is already suspended\",\n \"changes\": {\"state\": vm_ref.summary.runtime.powerState},\n }\n return result\n result_ref_vm = utils_vm.power_cycle_vm(vm_ref, state)\n result = {\n \"comment\": f\"Virtual machine {state} action succeeded\",\n \"changes\": {\"state\": result_ref_vm.summary.runtime.powerState},\n }\n return result\n","sub_path":"src/saltext/vmware/modules/vm.py","file_name":"vm.py","file_ext":"py","file_size_in_byte":10791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"558922971","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Nov 15 13:34:23 2018\r\n\r\n@author: wywei\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport os\r\nimport scipy.misc\r\n\r\nfrom PIL import Image\r\n\r\ndef __get_img_raw(img_filepath, out_y_path):\r\n img_filepath = os.path.abspath(img_filepath)\r\n img_rgb = Image.open(img_filepath)\r\n img =img_rgb.convert(\"YCbCr\") # rgb to YUV\r\n\r\n img_ndarray = np.array(img) # read it\r\n if len(img_ndarray.shape) != 3:\r\n raise RuntimeError('Image shape' + str(img_ndarray.shape))\r\n if (img_ndarray.shape[2] != 3):\r\n raise RuntimeError('Require image with rgb but channel is %d' % img_ndarray.shape[2])\r\n # reverse last dimension: rgb -> bgr\r\n out_y = np.fromfile(out_y_path, np.float32) \r\n if len(out_y.shape) != 2: # if shape is w, h, c\r\n out_y = out_y.reshape([700, 700])\r\n print(out_y)\r\n \r\n t = np.clip(out_y, 0.0, 1.0) * 255.0\r\n t = t.astype(np.uint8)\r\n img_ndarray[:, :, 0] = t # replace y'(0~255) to orignal y \r\n print(t)\r\n img_ndarray = Image.fromarray(img_ndarray, mode=\"YCbCr\")\r\n print(img_ndarray.mode)\r\n img_ndarray = img_ndarray.convert(\"RGB\")\r\n scipy.misc.imsave(out_y_path.split('.')[0]+'-srn-charis_700x700_out-0-complex_outy.png', img_ndarray)\r\n print(out_y_path.split('.')[0]+'-srn-charis_700x700_out.png')\r\n return img_ndarray\r\n\r\n#/opt/SNPE/snpe-1.19.2/models/srn/output/Result_0/g_net/dec1_0_2/BiasAdd:0.raw\r\n#/opt/SNPE/snpe-1.19.2/models/srn/data/cropped/chairs.jpg\r\n\r\n__get_img_raw(\"/opt/SNPE/snpe-1.19.2/models/srn/data/cropped_700x700/chairs.jpg\", \"/opt/SNPE/snpe-1.19.2/models/srn/data/cropped_700x700/srn_out_chairs_700x700_20.raw\")\r\n","sub_path":"srn-scripts/scripts/compose_srn_raws.py","file_name":"compose_srn_raws.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"219163891","text":"# coding=utf-8\nimport time\nfrom multiprocessing import Process\nfrom .tester import *\nfrom .generator import *\nfrom .api import *\nfrom .config import *\n\n\nclass Scheduler(object):\n \"\"\"\n 中心调度类\n \"\"\"\n\n @staticmethod\n def generate_cookie(cycle=CYCLE):\n \"\"\"\n 生产cookies\n :param cycle:\n :return:\n \"\"\"\n while True:\n print('Cookies生产进程开始运行')\n try:\n for website, cls in GENERATOR_MAP.items():\n generator = eval(cls + '(website=\"' + website + '\")')\n generator.run()\n print('Cookies生产完成')\n generator.close()\n time.sleep(cycle)\n except Exception as e:\n print(e.args)\n\n @staticmethod\n def valid_cookie(cycle=CYCLE):\n \"\"\"\n 检测cookies\n :param cycle:\n :return:\n \"\"\"\n while True:\n print('Cookies检测进程开始运行')\n try:\n for website, cls in TESTER_MAP.items():\n tester = eval(cls + '(website=\"' + website + '\")')\n tester.run()\n print('Cookies检测完成')\n del tester\n time.sleep(cycle)\n except Exception as e:\n print('异常中断', e.args)\n\n @staticmethod\n def api():\n \"\"\"\n API接口\n :return:\n \"\"\"\n print('API接口开始运行')\n app.run(host=API_HOST, port=API_PORT)\n\n @staticmethod\n def run():\n \"\"\"\n 中心调度启动\n :return:\n \"\"\"\n if VALID_PROCESS:\n valid_process = Process(target=Scheduler.valid_cookie)\n valid_process.start()\n\n if API_PROCESS:\n api_process = Process(target=Scheduler.api)\n api_process.start()\n\n if GENERATOR_PROCESS:\n generator_process = Process(target=Scheduler.generate_cookie)\n generator_process.start()\n # Scheduler.generate_cookie(120)\n","sub_path":"src/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"528137459","text":"import tkinter\r\nfrom tkinter import Toplevel\r\nfrom tkinter import StringVar\r\n\r\n\r\ndef fifs():\r\n top = Toplevel()\r\n top.title('最终结果')\r\n top.geometry('250x300')\r\n lab1 = tkinter.Label(top, text='进程的响应时间为')\r\n lab1.place(x=15, y=80)\r\n lab2 = tkinter.Label(top, background='White')\r\n lab2.place(x=120, y=80)\r\n lab3 = tkinter.Label(top)\r\n lab3.place(x=15, y=120)\r\n yunxing_time = list(eval(v1.get())) # 对应到达时间的结合时间\r\n start_time = list(eval(v2.get())) # 到达时间\r\n result = [] # 最后结果列表\r\n m=0\r\n begin = start_time[0]\r\n for j in range(len(start_time)):\r\n count = begin + yunxing_time[j]\r\n result.append(count)\r\n if j + 1 == len(start_time):\r\n m=(count / len(start_time))\r\n break\r\n if count > start_time[j + 1]:\r\n begin = count\r\n if count < start_time[j + 1]:\r\n begin = start_time[j + 1]\r\n lab2.config(text=result)\r\n lab3.config(text='平均响应时间为 ' + str(m))\r\n b1 = tkinter.Button(top, text='朕知道了', command=top.destroy)\r\n b1.place(x=100, y=150, width=60, height=30)\r\n\r\n\r\ndef sjf():\r\n top=Toplevel()\r\n top.title('最终结果')\r\n top.geometry('250x300')\r\n lab1=tkinter.Label(top,text='进程的响应时间为')\r\n lab1.place(x=15,y=80)\r\n lab2=tkinter.Label(top,background='White')\r\n lab2.place(x=120,y=80)\r\n lab3=tkinter.Label(top)\r\n lab3.place(x=15,y=120)\r\n a=list(eval(v1.get())) # 进程运行时间\r\n b=list(eval(v2.get())) # 进程到达时间\r\n h = []\r\n for z in b:\r\n h.append(z)\r\n c = [] # 用来存储最小到达时间对应的进程运行时间,并找到最短运行时间\r\n d = [] # 用来存储最早到达进程在运行时间列表中的位置\r\n f = [] # 最终的响应时间\r\n g = len(a) # 储存a的长度\r\n j = 0 # 到达时间列表的指针\r\n i = 0 # 找最小值位置的指针\r\n while j < len(b):\r\n if b[j] == min(b):\r\n c.append(a[j])\r\n d.append(j)\r\n j = j + 1\r\n while i < len(c):\r\n if c[i] == min(c):\r\n break\r\n else:\r\n i += 1\r\n f.append(c[i] + min(b))\r\n del a[d[i]], b[d[i]]\r\n\r\n p = [] # 存储就绪队列进程的运行时间\r\n q = [] # 存储就绪队列进程的运行时间在a列表中的位置\r\n k = [] # 存储不在就绪队列的进程的运行时间位置\r\n l = []\r\n l1 = []\r\n l2 = []\r\n l3 = []\r\n k1 = []\r\n n = 0 # f中的指针\r\n m = 0 # b中指针\r\n i = 0\r\n while n < g:\r\n m = 0\r\n while m < len(b):\r\n if b[m] <= f[n]:\r\n p.append(a[m])\r\n q.append(m)\r\n m = m + 1\r\n if len(p) > 0:\r\n i = 0\r\n while i < len(p):\r\n if p[i] == min(p):\r\n break\r\n else:\r\n i = i + 1\r\n del a[q[i]], b[q[i]]\r\n f.append(min(p) + f[n])\r\n p.clear()\r\n q.clear()\r\n elif len(p) == 0:\r\n m = 0\r\n while m < len(b):\r\n if b[m] > f[n]:\r\n k.append(a[m])\r\n l.append(b[m])\r\n l1.append(m)\r\n m = m + 1\r\n if len(k) > 0:\r\n i = 0\r\n while i < len(k):\r\n if l[i] == min(l):\r\n l2.append(i)\r\n k1.append(k[i])\r\n i = i + 1\r\n j = 0\r\n while j < len(k1):\r\n if k1[j] == min(k1):\r\n l3.append(j)\r\n j = j + 1\r\n f.append(b[l1[l2[l3[j - 1]]]] + a[l1[l2[l3[j - 1]]]])\r\n del b[l1[l2[l3[j - 1]]]], a[l1[l2[l3[j - 1]]]]\r\n k.clear()\r\n l.clear()\r\n l1.clear()\r\n l2.clear()\r\n l3.clear()\r\n k1.clear()\r\n n = n + 1\r\n sum_aver = 0\r\n for n in range(len(f)):\r\n sum_aver = sum_aver + f[n] - h[n]\r\n sum_aver = sum_aver / g\r\n lab2.config(text=f)\r\n lab3.config(text='平均响应时间为 '+str(sum_aver))\r\n b1=tkinter.Button(top,text='朕知道了',command=top.destroy)\r\n b1.place(x=100,y=150,width=60,height=30)\r\n\r\n\r\n# 立即 非立即剥夺\r\ndef change(list):\r\n newlist = []\r\n for i in list:\r\n newlist += [int(i)]\r\n return newlist\r\n\r\n\r\ndef gettask(elem):\r\n return elem[0]\r\n\r\n\r\ndef takereachtime(elem):\r\n return elem[1]\r\n\r\n\r\ndef takecosttime(elem):\r\n return elem[2]\r\n\r\n\r\ndef takepriority(elem):\r\n return elem[3]\r\n\r\n\r\n# 排序\r\ndef comp_reachtime(list): # 到达时间排序\r\n list.sort(key=takereachtime)\r\n return list\r\n\r\n\r\ndef comp_priority(list): # 优先级排序\r\n list.sort(key=takepriority, reverse=True)\r\n return list\r\n\r\n\r\ndef comp_task(list):\r\n list.sort(key=gettask)\r\n return list\r\n\r\n\r\ndef comp_RP(list):\r\n if len(list) > 1:\r\n list1 = [list[0]]\r\n list2 = list[1:]\r\n list2.sort(key=takepriority, reverse=True)\r\n list = []\r\n list = list1 + list2\r\n return list\r\n\r\n\r\ndef get_task():\r\n p = []\r\n task = []\r\n rlist = getreachtime()\r\n clist = getcosttime()\r\n plist = getpriority()\r\n n = len(plist)\r\n for i in range(n):\r\n a = []\r\n reachtime = rlist[i]\r\n costtime = clist[i]\r\n priority = plist[i]\r\n p += [[i + 1, reachtime, costtime, priority, 0]]\r\n return p\r\n\r\n\r\ndef doing1(waiting, t):\r\n t += waiting[0][2]\r\n return [waiting[0], waiting[1:], t]\r\n\r\n\r\ndef task1(list):\r\n comp_reachtime(list)\r\n k = 1\r\n waiting = [list[0]]\r\n ready = []\r\n finish = []\r\n t = list[0][1]\r\n for i in list:\r\n if i in ready:\r\n continue\r\n for j in list[k:]:\r\n if j[1] <= waiting[0][2] + t:\r\n if j not in finish:\r\n waiting += [j]\r\n if len(waiting) == 1 and t + waiting[0][2] < j[1]:\r\n t = j[1]\r\n waiting += [j]\r\n break\r\n finish += waiting\r\n waiting = comp_RP(waiting)\r\n k += 1\r\n done = doing1(waiting, t)\r\n ready += [done[0]]\r\n waiting = done[1]\r\n t = done[2]\r\n if len(done[1]):\r\n ready += done[1]\r\n return ready\r\n\r\n\r\ndef result1(list):\r\n result = []\r\n t = list[0][1]\r\n for i in list:\r\n if t >= i[1]:\r\n t += i[2]\r\n result += [[i[0], t - i[1]]]\r\n else:\r\n t = i[1] + i[2]\r\n result += [[i[0], t - i[1]]]\r\n return comp_task(result)\r\n\r\n\r\ndef average(list):\r\n sum = 0\r\n for i in list:\r\n sum += i[1]\r\n return sum / len(list)\r\n\r\n\r\ndef fljbd():\r\n return result1(task1(get_task()))\r\n\r\n\r\n#####\r\ndef doing(waiting, t):\r\n if not waiting[0][2] == 0:\r\n waiting[0][2] -= 1\r\n waiting[0][4] = t\r\n return waiting\r\n\r\n\r\ndef task(list):\r\n num = 0\r\n t = 0\r\n finish = []\r\n waiting = []\r\n k = []\r\n move = []\r\n comp_reachtime(list)\r\n while True:\r\n for i in list:\r\n if i not in finish:\r\n if i[1] == t:\r\n finish += [i]\r\n waiting += [i]\r\n comp_priority(waiting)\r\n if not len(waiting) == 0:\r\n if not k == waiting[0] and len(waiting) > 2:\r\n move = waiting[1]\r\n waiting.remove(move)\r\n waiting += [move]\r\n k = waiting[0]\r\n t += 1\r\n if not len(waiting) == 0:\r\n waiting = doing(waiting, t)\r\n if waiting[0][2] == 0 and not waiting[0][3] == 0:\r\n waiting[0][3] = 0\r\n num += 1\r\n if len(list) == num:\r\n break\r\n return comp_task(waiting)\r\n\r\n\r\ndef result(list):\r\n result = []\r\n for i in list:\r\n result += [[i[0],i[4] - i[1]]]\r\n return (result)\r\n\r\n\r\ndef ljbd():\r\n return result(task(get_task()))\r\n\r\n\r\n# -------------------------------------------------------------------------------\r\n\r\n\r\ndef getreachtime():\r\n a = v2.get()\r\n rlist = a.split(\",\")\r\n return change(rlist)\r\n\r\n\r\ndef getcosttime():\r\n a = v1.get()\r\n clist = a.split(\",\")\r\n return change(clist)\r\n\r\n\r\ndef getpriority():\r\n a = v3.get()\r\n plist = a.split(\",\")\r\n return change(plist)\r\n\r\n\r\ndef fljbdsf():\r\n top = Toplevel()\r\n top.title('最终结果')\r\n top.geometry('250x300')\r\n lab1 = tkinter.Label(top, text='进程的响应时间为')\r\n lab1.place(x=15, y=80)\r\n lab2 = tkinter.Label(top, background='White')\r\n lab2.place(x=120, y=80)\r\n lab3 = tkinter.Label(top)\r\n lab3.place(x=15, y=120)\r\n # fljbd() 代表非立即剥夺\r\n # libd() 代表立即剥夺\r\n # 两函数返回值均为[[进程号,运行时间]] eg:[[1, 32], [2, 34], [3, 55], [4, 54], [5, 36]]\r\n list = fljbd()\r\n list1=[]\r\n for i in list:\r\n list1.append(i[1])\r\n list1.sort(reverse=False)\r\n lab2.config(text=list1)\r\n lab3.config(text='平均响应时间为 ' + str(average(list)))\r\n b1 = tkinter.Button(top, text='朕知道了', command=top.destroy)\r\n b1.place(x=100, y=150, width=60, height=30)\r\n\r\n\r\ndef ljbdsf():\r\n top = Toplevel()\r\n top.title('最终结果')\r\n top.geometry('250x300')\r\n lab1 = tkinter.Label(top, text='进程的响应时间为')\r\n lab1.place(x=15, y=80)\r\n lab2 = tkinter.Label(top, background='White')\r\n lab2.place(x=120, y=80)\r\n lab3 = tkinter.Label(top)\r\n lab3.place(x=15, y=120)\r\n # fljbd() 代表非立即剥夺\r\n # libd() 代表立即剥夺\r\n # 两函数返回值均为[[进程号,运行时间]] eg:[[1, 32], [2, 34], [3, 55], [4, 54], [5, 36]]\r\n list = ljbd()\r\n list1 = []\r\n for i in list:\r\n list1.append(i[1])\r\n list1.sort(reverse=False)\r\n lab2.config(text=list1)\r\n lab3.config(text='平均响应时间为 ' + str(average(list)))\r\n b1 = tkinter.Button(top, text='朕知道了', command=top.destroy)\r\n b1.place(x=100, y=150, width=60, height=30)\r\n\r\n\r\nroot=tkinter.Tk()\r\nroot.title(\"进程调度\")\r\nroot.resizable(0,0)\r\nroot.geometry('400x500')\r\nv1=StringVar()\r\nv2=StringVar()\r\nv3=StringVar()\r\nlabel1=tkinter.Label(root, text='进程运行时间',font=('黑体','10','bold'))\r\nlabel1.place(x=20,y=20)\r\nlabel2=tkinter.Label(root,text='进程到达时间',font=('宋体','11','italic'))\r\nlabel2.place(x=20,y=70)\r\nlabel3=tkinter.Label(root,text='进程优先级',font=('隶书','13'))\r\nlabel3.place(x=20,y=120)\r\nentry3=tkinter.Entry(root,textvariable=v3)\r\nentry3.place(x=150,y=120,width=200,height=25)\r\nentry1=tkinter.Entry(root,textvariable=v1)\r\nentry1.place(x=150,y=20,width=200,height=25)\r\nentry2=tkinter.Entry(root,textvariable=v2)\r\nentry2.place(x=150,y=70,width=200,height=25)\r\nbut1=tkinter.Button(root,text='先来先服务\\neight eggs',command=fifs)\r\nbut1.place(x=50,y=170,width=100,height=50)\r\nbut2=tkinter.Button(root,text='最短作业优先\\nextremely rich',command=sjf)\r\nbut2.place(x=230,y=170,width=100,height=50)\r\nbut3=tkinter.Button(root,text='立即剥夺的优先级调度\\nlittle bro',command=ljbdsf)\r\nbut3.place(x=35,y=270,width=130,height=50)\r\nbut4=tkinter.Button(root,text='非立即剥夺的优先级调度\\nold pig',command=fljbdsf)\r\nbut4.place(x=220,y=270,width=140,height=50)\r\nroot.mainloop()","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":11310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"230171848","text":"# gui.py\n\nfrom tkinter import *\nfrom tkinter import messagebox\n\nclass Gui(Tk):\n\n \n\n\n def __init__(self):\n super().__init__()\n\n self.grey_image = PhotoImage(file = \"C:/Users/aaaa/Desktop/Apprenticeship docs/Python/GUI/grey.gif\")\n self.green_image = PhotoImage(file = \"C:/Users/aaaa/Desktop/Apprenticeship docs/Python/GUI/green.gif\")\n self.red_image = PhotoImage(file = \"C:/Users/aaaa/Desktop/Apprenticeship docs/Python/GUI/red.gif\")\n\n # set window properties\n self.title(\"Newsletter\")\n self.configure(bg=\"#ccc\", padx=10, pady=10)\n\n # add components\n self.__add_outer_frame()\n self.__add_heading_label()\n self.__add_instruction_label()\n self.__add_email_label()\n self.__add_email_entry()\n self.__add_subscribe_button()\n self.__add_grey_image_label()\n #self.__add_option_box()\n # self.__add_red_image_label()\n\n def __add_outer_frame(self):\n self.outer_frame = Frame()\n self.outer_frame.grid(row=0, column=0)\n self.outer_frame.configure( bg=\"#eee\", \n padx=10, \n pady=10)\n\n def __add_heading_label(self):\n self.heading_label = Label(self.outer_frame)\n self.heading_label.grid(row=0, column=0, columnspan=2)\n self.heading_label.configure( bg=\"#eee\",\n font=\"Arial 14\",\n text=\"RECEIVE OUR NEWSLETTER\")\n\n def __add_instruction_label(self):\n self.instruction_label = Label(self.outer_frame)\n self.instruction_label.grid(row=1, column=0, columnspan=2, sticky=W)\n self.instruction_label.configure( bg=\"#eee\",\n text=\"Please enter your email below to receiver our newsletter\")\n\n def __add_email_label(self):\n self.email_label = Label(self.outer_frame)\n self.email_label.grid(row=2, column=0, sticky=E)\n self.email_label.configure( pady=20,\n text=\"Email:\")\n\n def __add_email_entry(self):\n self.email_entry = Entry(self.outer_frame)\n self.email_entry.grid(row=2, column=1, sticky=W)\n self.email_entry.configure(width=40)\n self.email_entry.bind(\"\",self.__add_email_entry_switch)\n\n\n def __add_grey_image_label(self):\n self.grey_image_label = Label(self.outer_frame)\n self.grey_image_label.grid(row=2, column=2, sticky=E)\n self.grey_image_label.configure(image=self.grey_image, height=20,\n width=20)\n\n #def __add_green_image_label(self):\n # self.green_image_label = Label(self.outer_frame)\n # self.green_image_label.grid(row=2, column=2, sticky=E)\n # self.green_image_label.configure(image=self.green_image, height=20,\n # width=20)\n\n #def __add_red_image_label(self):\n # self.red_image_label = Label(self.outer_frame)\n # self.red_image_label.grid(row=2, column=2, sticky=E)\n # self.red_image_label.configure(image=self.red_image, height=20,\n # width=20) \n\n def __add_email_entry_switch(self,event):\n email_entry = self.email_entry.get()\n\n if email_entry == '':\n self.grey_image_label.configure(image=self.red_image, height=20,\n width=20)\n if email_entry != '':\n self.grey_image_label.configure(image=self.green_image, height=20,\n width=20)\n\n\n def __add_subscribe_button(self):\n self.subscribe_button = Button(self.outer_frame)\n self.subscribe_button.grid(row=5, column=0, columnspan=2, sticky=N+E+S+W)\n self.subscribe_button.configure(bg=\"#fcc\",\n text=\"Subscribe\")\n self.subscribe_button.bind(\"\", self.__subscribe_clicked)\n\n def __subscribe_clicked(self, event):\n messagebox.showinfo(\"Newsletter\",\"Subscribed!\")\n","sub_path":"2-guis/TCA/part_b_images.py","file_name":"part_b_images.py","file_ext":"py","file_size_in_byte":4116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"198044616","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, redirect\nfrom blog.api import *\nimport json\nfrom blog.models import *\nfrom django.contrib.auth.models import User\nfrom django.db import connection\n\n@login_required\ndef page_blog_home(request):\n return render(request, 'index.html')\n\ndef page_blog_read(request):\n u = User.objects.filter(id=1, blog_sheet__is_delete=0)\n b = u[0].blog_sheet\n print (type(u[0].blog_sheet))\n def db_insert():\n l = []\n for i in range(1,100):\n t = Test(name='wang')\n l.append(t)\n Test.objects.bulk_create(l)\n def db_delete():\n for i in range(1,100):\n Test.objects.all().delete()\n def db_update():\n for i in range(1,100):\n Test.objects.all().update(name='aa')\n obj, created = Test.objects.filter(id=2).update_or_create(name='wl',defaults={'name':'3'})\n return render(request, 'blog_read.html', {})\n\ndef page_blog_write(request):\n username = request.GET.get('username', '')\n return render(request, 'blog_write.html', {'username': username})\n\n@csrf_exempt\ndef blog_operate(request):\n data = json.loads(request.body.decode())\n title = data.get('title', '')\n category = data.get('category', '')\n text = data.get('text', '')\n is_draft = data.get('is_draft', False)\n result = create_blog(request, title, text, category, is_draft)\n return JsonResponse(result)\n\n","sub_path":"src/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"106148268","text":"from typing import Dict, List\n\nfrom exceptions import InvalidOperationCode\nimport operations as ops\n\n\nclass IntComputer:\n\n def __init__(self, code: List[str]):\n self.pointer: int = 0\n self.code = code\n # copy code into memory\n self.memory: Dict[int, str]\n self.memory = {addr: val for addr, val in enumerate(code)}\n self._operations: Dict[int, ops.Operation] = {}\n\n def register_operation(self, operation: ops.Operation):\n operation.on = self.memory\n self._operations[operation.key] = operation\n\n def register_operations(self, operations: List[ops.Operation]):\n for operation in operations:\n self.register_operation(operation)\n\n def run(self):\n outputs = []\n while True:\n # take the instruction at the current position\n opcode, modes = self.instruction(self.pointer)\n # stop the program\n if opcode == 99:\n break\n\n operation = self._operations.get(opcode)\n if not operation:\n raise InvalidOperationCode(f'Invalid operation code: {opcode} at position {self.pointer}.')\n\n params = self.get_params(self.pointer+1, operation.takes)\n result = operation.compute(params, modes)\n if result:\n self.pointer = result\n else:\n self.pointer += operation.takes + 1\n\n if hasattr(operation, 'output'):\n outputs.append(operation.output)\n\n return outputs[-1]\n\n def instruction(self, at: int):\n \"\"\"\n\n :type at: The address from which the instruction should be read\n\n >>> c = IntComputer('1107,-1,8,3,4,3,99')\n >>> c.instruction(0)\n (7, 011)\n \"\"\"\n inst = self.memory[at].zfill(5)\n opcode = int(inst[-2:])\n if opcode == 99:\n return opcode, []\n # read from left to right\n modes = [int(m) for m in inst[:-2][::-1]]\n return opcode, modes\n\n def get_params(self, at: int, num: int):\n params = []\n start = at\n for addr in range(start, start+num):\n params.append(self.memory[addr])\n return params\n\n\nclass IntComputerV2(IntComputer):\n\n def __init__(self, code):\n super(IntComputerV2, self).__init__(code)\n self.register_operations([\n ops.Plus, ops.Multiply, ops.Input, ops.Output, ops.IsTrue, ops.IsFalse, ops.IsLessThan, ops.IsEqual\n ])\n\n\nclass IntComputerV3(IntComputer):\n\n def __init__(self, code: List[str], inputs):\n AutoInput = ops.AutoInputOperation(inputs=inputs)\n super(IntComputerV3, self).__init__(code)\n self.register_operations([\n ops.Plus, ops.Multiply, AutoInput, ops.Output, ops.IsTrue, ops.IsFalse, ops.IsLessThan, ops.IsEqual,\n ops.RelativeBaseOperation()\n ])\n\n\nclass IntComputerV4(IntComputerV3):\n\n def __init__(self, code: List[str], inputs, id_=None, stop_after_inputs=False):\n super(IntComputerV4, self).__init__(code, inputs)\n self.running = False\n self.finished = False\n self.id_ = id_\n self.stop_after_inputs = stop_after_inputs\n self.memory['relative'] = 0\n\n def run(self):\n outputs = []\n self.running = True\n while self.running:\n # take the instruction at the current position\n opcode, modes = self.instruction(self.pointer)\n # stop the program\n if opcode == 99:\n print(f\"{self} finished\")\n self.finished = True\n self.running = False\n break\n\n operation: ops.Operation\n operation = self._operations.get(opcode)\n if not operation:\n raise InvalidOperationCode(f'Invalid operation code: {opcode} at position {self.pointer}.')\n\n params = self.get_params(self.pointer + 1, operation.takes)\n result = operation.compute(params, modes)\n if result:\n self.pointer = result\n else:\n self.pointer += operation.takes + 1\n\n if hasattr(operation, 'output'):\n outputs.append(operation.output)\n\n if len(self._operations[3].inputs) == 0 and self.stop_after_inputs:\n print(f'{self}: Halting program as inputs are consumed.')\n self.halt()\n return outputs.pop()\n\n return outputs\n\n def halt(self):\n self.running = False\n\n def restart(self, value):\n print(f'{self}: Restarting with: {value}')\n auto_input: ops.AutoInputOperation = self._operations[3]\n auto_input.add_input(value)\n return self.run()\n\n def __repr__(self):\n return f\"{self.id_}\"\n","sub_path":"IntComputer/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"176009229","text":"\"\"\"\nThis module controls the NDN components of the network, as the forwarder NFD or the routing, and allows to run the\nexperiment by starting the repos/client in the network.\n\"\"\"\n\nimport stat\nimport os\nimport logging\nimport threading\nimport time\nimport shutil\n\nfrom Crackle.ColoredOutput import make_colored\nimport Crackle.Globals as Globals\nfrom Crackle.AsyncManager import start_thread_pool\nfrom Crackle.Constants import layer_2_protocols, __tree_on_consumer__, __min_cost_multipath__, \\\n __tree_on_producer__, __maximum_flow__, nfd_conf_file\nfrom Crackle import TopologyStructs\nfrom Crackle.RoutingNdn import RoutingNdn\n\n# TODO Move constants to Globals\n\nREPO = \"ndn-virtual-repo\"\n\n_DEBUG = False\n\nmodule_logger = logging.getLogger(__name__)\n\n## Routing files\nrouting_reset_suffix = \"_resetndnrouting.sh\"\nrouting_suffix = \"_setndnrouting.sh\"\nroute_register_template = \"nfdc register ndn:/{0} {1}://{2}:6363\\n\"\nethernet_route_register_template = \"nfdc register ndn:/{0} {1}://[{2}]/{3}\\n\"\nface_create_template = \"nfdc create {} {}://{}:6363\\n\"\nethernet_face_create_template = \"nfdc create {} {}://[{}]/{}\\n\"\n\nroute_unregister_template = \"nfdc unregister ndn:/{0} {1}://{2}:6363\\n\"\nethernet_route_unregister_template = \"nfdc unregister ndn:/{0} {1}://[{2}]\\n\"\nface_destroy_template = \"nfdc destroy {0}://{1}:6363\\n\"\nethernet_face_destroy_template = \"nfdc destroy {0}://[{1}]\\n\"\n\n## NFD configuration file\n__nfd_conf_file__ = \"/etc/ndn/nfd.conf\"\n\n# Script template\n\nroute_script = \"\"\"\n#!/bin/bash\n\ncreate_faces() {{\n :\n {}\n}}\n\ndestroy_faces() {{\n :\n {}\n}}\n\nreset_routing() {{\n :\n {}\n}}\n\nset_routing() {{\n :\n {}\n}}\n\ncase $1 in\n create_faces)\n create_faces\n ;;\n destroy_faces)\n destroy_faces\n ;;\n reset_routing)\n reset_routing\n ;;\n set_routing)\n set_routing\n ;;\n set)\n create_faces\n set_routing\n ;;\n reset)\n destroy_faces\n reset_routing\n ;;\n *)\n exit 1\n ;;\nesac\n\nexit 0\n\n\"\"\"\n\n\nclass NDNManager:\n \"\"\"\n This class contains the methods for managing the NDN part of the experiment and starting the experiment itself.\n\n :ivar node_list: The list of all the node in the network (routers, base stations and mobile stations)\n \"\"\"\n\n def __init__(self, node_list, server_list):\n self.node_list = node_list\n self.server_list = server_list\n self.logger = logging.getLogger(__name__ + \".\" + type(self).__name__)\n\n def configure_router(self):\n \"\"\"\n Set the cache size using the value contained in the configuration file \"topo.brite\".\n\n :return:\n \"\"\"\n\n def set_strategy_cache(n, results):\n\n try:\n cache_size = n.get_cache_size()\n cache_policy = n.get_cache_policy()\n forward_strategy = n.get_forward_strategy()\n\n if cache_policy == \"l\":\n cache_policy = \"LRU\"\n\n params = [\"sed\",\n \"-i\",\n \"s/^.*cs_max_packets .*$/ cs_max_packets {0}/\".format(cache_size),\n __nfd_conf_file__]\n\n params2 = [\"sed\",\n \"-i\",\n \"0,/\\/ / s/best-route/{0}/\".format(forward_strategy),\n __nfd_conf_file__]\n\n ret = n.run_command(params) and n.run_command(params2)\n\n if not ret:\n print(make_colored(\"red\", \"[{0}] Error while configuring router\".format(n)))\n self.logger.error(\"[{0}] Error while setting cache\".format(n))\n results[n] = False\n else:\n logging.info(\"[{0}] Router configured. Cache={1} and \"\n \"Forwarding Strategy={2}\".format(n, cache_size, forward_strategy))\n results[n] = True\n\n params = [\"service\",\n \"nfd\",\n \"restart\"]\n\n ret = n.run_command(params)\n\n if not ret:\n self.logger.error(\"[{0}] Error restarting NFD\".format(n))\n print(make_colored(\"red\", \"[{0}] Error restarting NFD\".format(n)))\n results[n] = False\n else:\n self.logger.info(\"[{0}] NFD restarted\".format(n))\n results[n] = True\n except Exception as error:\n self.logger.error(\"[{0}] Error setting up the router. \"\n \"Error: {1}\".format(n,\n error))\n results[n] = False\n\n return start_thread_pool(self.node_list.values(), set_strategy_cache, sleep_time=0.1)\n\n def start_nfd(self):\n \"\"\"\n Start the NDN forwarder on all the nodes in the network.\n\n :return:\n \"\"\"\n\n def start(n, results):\n\n ret = n.push_file(nfd_conf_file, __nfd_conf_file__)\n if not ret:\n self.logger.error(\"[{0}] Error sending NFD configuration file\".format(n))\n print(make_colored(\"red\", \"[{0}] Error sending NFD configuration file\".format(n)))\n results[n] = False\n else:\n self.logger.info(\"[{0}] NFD configuration file sent\".format(n))\n results[n] = True\n\n params = [\"service\", \"nfd\", \"start\"]\n\n try:\n\n ret = n.run_command(params)\n\n if not ret:\n self.logger.error(\"[{0}] Error starting NFD\".format(n))\n print(make_colored(\"red\", \"[{0}] Error starting NFD\".format(n)))\n results[n] = False\n else:\n self.logger.info(\"[{0}] NFD started\".format(n))\n results[n] = True\n except Exception as error:\n self.logger.error(\"[{0}] Error starting ICN forwarder. \"\n \"Error: {1}\".format(n,\n error))\n results[n] = False\n\n return start_thread_pool(self.node_list.values(), start)\n\n def stop_nfd(self):\n \"\"\"\n Stop the NDN forwarder on each node of the network.\n\n :return:\n \"\"\"\n print(\"* Stopping NFD on hosts\")\n\n def stop(n, results):\n\n params = [\"service\", \"nfd\", \"stop\"]\n\n try:\n ret = n.run_command(params, check_return=False)\n\n if not ret:\n self.logger.error(\"[{0}] Error stopping NFD\".format(n))\n print(make_colored(\"red\", \"[{0}] Error stopping NFD\".format(n)))\n results[n] = False\n else:\n self.logger.info(\"[{0}] NFD stopped\".format(n))\n results[n] = True\n except Exception as error:\n self.logger.error(\"[{0}] Error stopping ICN forwader. \"\n \"Error: {1}\".format(n,\n error))\n results[n] = False\n\n return start_thread_pool(self.node_list.values(), stop)\n\n def list_repositories(self, number):\n \"\"\"\n Show the list of repositories for the current experiment.\n :param number : a boolean returns number of repositories if True \n :return r: number of repositories on network\n \"\"\"\n r = 0\n for node in self.node_list.values():\n\n repositories = node.get_repositories()\n\n if repositories:\n print(make_colored(\"blue\", \"Repos on {0}:\".format(node)))\n for repo in repositories:\n r += 1\n print(make_colored(\"cyan\", \"\\t{0}\".format(repo)))\n\n if number:\n return r\n\n def add_repository(self, node_name, repoId, folder):\n\n \"\"\"\n Add repository to node_name\n\n :return:\n \"\"\"\n\n self.node_list[node_name].add_repo(TopologyStructs.Repo(repoId, folder))\n\n def delete_repository(self, node_name, repoId, folder):\n\n \"\"\"\n Remove repository to node_name\n\n :return:\n \"\"\"\n\n node = self.node_list[node_name]\n\n for repo in node.get_repositories():\n\n if repo.get_repo_id() == repoId:\n node.get_repositories().remove(repo)\n\n def list_clients(self):\n \"\"\"\n Show the list of clients (consumers) for the current experiment.\n\n :return:\n \"\"\"\n for node in self.node_list.values():\n\n clients = node.get_client_apps()\n\n if clients:\n print(make_colored(\"blue\", \"Clients on {0}:\".format(node)))\n for client in clients:\n print(make_colored(\"cyan\", \"\\t{0}\".format(client)))\n\n def add_consumer(self, node_name, clientId, name):\n\n \"\"\"\n Add client to node_name\n\n :return:\n \"\"\"\n\n self.node_list[node_name].add_client(TopologyStructs.Client(clientId, \"Poisson_2\", \"rzipf_1.3_100\", name))\n\n def delete_consumer(self, node_name, clientId, name):\n \"\"\"\n Remove repository to node_name\n\n :return:\n \"\"\"\n\n node = self.node_list[node_name]\n\n for client in node.get_client_apps():\n if client.get_client_id() == clientId:\n node.get_client_apps().remove(client)\n\n def reset_cache(self):\n \"\"\"\n Reset the cache with the default value (65536 Packets)\n\n :return:\n \"\"\"\n\n def rst_cache(n, results):\n\n try:\n params = [\"sed\",\n \"-i\",\n \"s/^.*cs_max_packets .*$/ cs_max_packets 65536/\",\n __nfd_conf_file__]\n\n ret = n.run_command(params)\n\n if ret:\n self.logger.error(\"[{0}] Error resetting cache\".format(n))\n print(make_colored(\"red\", \"[{0}] Error resetting cache\".format(n)))\n results[n] = False\n else:\n self.logger.info(\"[{0}] Cache reset successfully\".format(n))\n results[n] = True\n\n params = [\"service\",\n \"nfd\",\n \"restart\"]\n\n ret = n.run_command(params)\n\n if ret:\n self.logger.error(\"[{0}] Error restarting NFD\".format(n))\n print(make_colored(\"red\", \"[{0}] Error restarting NFD\".format(n)))\n else:\n self.logger.info(\"[{0}] NFD restarted\".format(n))\n except Exception as error:\n self.logger.error(\"[{0}] Error re-setting the cache. \"\n \"Error: {1}\".format(n,\n error))\n results[n] = False\n\n return start_thread_pool(self.node_list.values(), rst_cache)\n\n def show_route(self, node):\n \"\"\"\n Show the routing table of a node.\n\n :param node:\n :return:\n \"\"\"\n\n if node in self.node_list:\n routes = self.node_list[node].get_routes()\n if routes:\n print(make_colored(\"blue\", node).replace(Globals.experiment_id, \"\"))\n for route in routes.values():\n for route2 in route.values():\n print(make_colored(\"yellow\", \"\\ticn_name:\"),\n \"ndn:/{0}\".format(route2.get_icn_name()).replace(\"/\", \"\"),\n make_colored(\"yellow\", \"next hop:\"),\n \"{0}\".format(route2.get_next_hop()).replace(Globals.experiment_id, \"\"))\n\n def add_route(self, node, name, nexthop, container_created=False):\n \"\"\"\n Add a route for name \"name\" in the node \"node\" with nexthop \"nexthop\"\n :param node:\n :param name:\n :param nexthop:\n :return:\n \"\"\"\n\n if all([n in self.node_list for n in [node, nexthop]]):\n\n if self.node_list[nexthop] not in self.node_list[node].get_links():\n self.logger.error(\"The nodes {0} and {1} are not directly connected!\".format(node, nexthop).replace(\n Globals.experiment_id, \"\"))\n print(make_colored(\"red\",\n \"The nodes {0} and {1} are not directly connected!\".format(node, nexthop).replace(\n Globals.experiment_id, \"\")))\n return\n\n self.node_list[node].add_route(self.node_list[nexthop], name)\n\n if container_created:\n if not self.node_list[node].get_route(name, self.node_list[nexthop]).register():\n self.logger.error(\"Error creating the routes.\")\n print(make_colored(\"red\", \"Error creating the routes!\"))\n else:\n self.logger.error(\"Trying to add the route {0} toward {1}, but {0} does not exist.\".format(name,\n nexthop))\n print(make_colored(\"red\", \"The node {0} does not exist!\".format(node)))\n\n def recompute_global_routing(self, route, routing_algorithm, container_created=False, rerouting=False):\n \"\"\"\n Recompute the routing of the network using routing_algorithm\n\n :param routing_algorithm:\n :return:\n \"\"\"\n\n if routing_algorithm not in [__maximum_flow__,\n __min_cost_multipath__,\n __tree_on_consumer__,\n __tree_on_producer__]:\n self.logger.error(\"Routing algorithm not in the list of allowed algorithms!\")\n print(make_colored(\"red\", \"Routing algorithm not in the list of allowed algorithms!\"))\n return\n\n RoutingNdn(self.node_list).algo_ndn(routing_algorithm)\n\n self.create_routing_scripts()\n\n if container_created:\n self.reset_ndn_routing(rerouting=rerouting)\n self.push_routing_scripts()\n self.set_ndn_routing(rerouting=rerouting)\n\n def delete_route(self, node, name, nexthop, container_created=False):\n \"\"\"\n Delete a route for name \"name\" in the node \"node\" with nexthop \"nexthop\"\n :param node:\n :param name:\n :param nexthop:\n :return:\n \"\"\"\n\n if all([n in self.node_list for n in [node, nexthop]]):\n\n route = self.node_list[node].get_route(name, self.node_list[nexthop])\n\n if container_created:\n if not route.unregister():\n self.logger.error(\"Error deleting the route.\")\n print(make_colored(\"red\", \"Error deleting the route!\"))\n\n self.node_list[node].delete_route(name, self.node_list[nexthop])\n\n else:\n self.logger.error(\"Trying to add the route {0} toward {1}, but {0} does not exist.\".format(name,\n nexthop))\n print(make_colored(\"red\", \"The node {0} does not exist!\".format(node)))\n\n def start_repositories(self):\n \"\"\"\n Start all the repositories in the network.\n\n :return:\n \"\"\"\n\n def start_repo(n, results):\n\n ret_val = True\n\n try:\n for repo in n.get_repositories():\n name = repo.get_folder()\n\n params = [\"service\",\n \"repo-ng\",\n \"start\"]\n\n self.logger.debug(\"[{0}] Repo {1}. Params={2}\".format(n,\n params,\n params))\n\n ret = n.run_command(params, sync=True)\n\n if not ret:\n print(make_colored(\"red\", \"[{0}] Error starting repo for {1}\".format(n,\n name)))\n else:\n self.logger.info(\"[{0}]: repo-ng {1} started\".format(n,\n name))\n\n ret_val &= ret\n\n results[n] = ret_val\n except Exception as error:\n self.logger.error(\"[{0}] Error starting repositories. \"\n \"Error: {1}\".format(n,\n error))\n results[n] = False\n\n return start_thread_pool(self.node_list.values(), start_repo)\n\n def start_virtual_repositories(self):\n \"\"\"\n Start all the repositories in the network.\n\n :return:\n \"\"\"\n\n def start_virtual_repo(n, results):\n\n ret_val = True\n\n try:\n for repo in n.get_repositories():\n name = repo.get_folder()\n\n params = [\"ndn-virtual-repo\",\n name,\n \"-s\",\n \"1400\"]\n\n self.logger.debug(\"[{0}] Repo {1}. Params={2}\".format(n,\n params,\n params))\n\n ret = n.run_command(params, sync=False)\n\n if not ret:\n print(make_colored(\"red\", \"[{0}] Error starting repo for {1}\".format(n,\n name)))\n else:\n self.logger.info(\"[{0}]: repo-ng {1} started\".format(n,\n name))\n\n ret_val &= ret\n\n results[n] = ret_val\n except Exception as error:\n self.logger.error(\"[{0}] Error starting repositories. \"\n \"Error: {1}\".format(n,\n error))\n results[n] = False\n\n return start_thread_pool(self.node_list.values(), start_virtual_repo)\n\n def stop_repositories(self):\n \"\"\"\n Stop the NDN repositories in the network.\n\n :return:\n \"\"\"\n print(make_colored(\"blue\", \"* Stopping repositories on the nodes...\"))\n\n def kill_repo(n, results):\n\n try:\n if n.run_command(\"service\", \"repo-ng\", \"stop\"):\n self.logger.info(\"[{0}]: Repositories stopped.\".format(n))\n results[n] = True\n else:\n print(make_colored(\"red\", \"[{0}]: Repositories failed to stop.\".format(n)))\n self.logger.error(\"[{0}]: Repositories failed to stop.\".format(n))\n results[n] = False\n except Exception as error:\n self.logger.error(\"Error deleting container {0}. \"\n \"Error: {1}\".format(n,\n error))\n results[n] = False\n\n return start_thread_pool(self.node_list.values(), kill_repo)\n\n def create_routing_scripts(self):\n \"\"\"\n Create the routing scripts for setting the routing tables of the nodes.\n\n :return:\n \"\"\"\n\n def create_script(n_from, results):\n routing_script = open(Globals.scripts_dir + str(n_from) + routing_suffix, 'w')\n create_faces, destroy_faces, registers, unregisters = [], [], [], []\n\n for link in n_from.get_links().values():\n if Globals.layer2_prot != layer_2_protocols[4]:\n create_faces.append(face_create_template.format(\"-W\" if Globals.wldr_face else \"\",\n Globals.layer2_prot,\n link.get_node_to().get_ip_address(\n n_from)))\n destroy_faces.append(face_destroy_template.format(Globals.layer2_prot,\n link.get_node_to().get_ip_address(\n n_from)))\n else:\n create_faces.append(ethernet_face_create_template.format(\"-W\" if Globals.wldr_face else \"\",\n Globals.layer2_prot,\n link.get_node_to().get_mac_address(n_from),\n link.get_node_to() if (type(\n link.get_node_to()) is not TopologyStructs.Station) or\n (type(\n link.get_node_to()) is TopologyStructs.Station and\n type(\n n_from) is TopologyStructs.Router) else \"wlan0\"))\n destroy_faces.append(ethernet_face_destroy_template.format(Globals.layer2_prot,\n link.get_node_to().get_mac_address(\n n_from)))\n\n for node_to in n_from.get_routes():\n for prefix in n_from.get_routes()[node_to]:\n if Globals.layer2_prot not in layer_2_protocols:\n self.logger.error(\"[{0}] Layer 2 protocol not recognized!.\".format(n_from))\n results[n_from] = False\n return\n if Globals.layer2_prot != layer_2_protocols[4]:\n\n unregisters.append(route_unregister_template.format(prefix,\n Globals.layer2_prot,\n node_to.get_ip_address(n_from)))\n registers.append(route_register_template.format(prefix,\n Globals.layer2_prot,\n node_to.get_ip_address(n_from)))\n else:\n unregisters.append(ethernet_route_unregister_template.format(prefix,\n Globals.layer2_prot,\n node_to.get_mac_address(n_from)))\n registers.append(ethernet_route_register_template.format(prefix,\n Globals.layer2_prot,\n node_to.get_mac_address(n_from),\n node_to if type(\n node_to) is not TopologyStructs.Station else \"wlan0\"))\n\n routing_script.write(route_script.format(\"\\n\".join(create_faces),\n \"\\n\".join(destroy_faces),\n \"\\n\".join(unregisters),\n \"\\n\".join(registers)))\n\n routing_script.close()\n\n os.chmod(Globals.scripts_dir + str(n_from) + routing_suffix,\n stat.S_IXUSR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IRWXU)\n\n self.logger.debug(\"[{0}] NDN routing scripts created.\".format(n_from))\n results[n_from] = True\n\n self.logger.info(\"Creating the NDN routing scripts\")\n\n return start_thread_pool(self.node_list.values(), create_script)\n\n def reset_ndn_routing(self, rerouting=False):\n \"\"\"\n Execute the routing script that cleans the routing table of each node.\n\n :return:\n \"\"\"\n\n def reset_routing(n, results):\n\n if rerouting:\n params = [\"/root/{0}{1}\".format(n, routing_suffix), \"reset_routing\"]\n else:\n params = [\"/root/{0}{1}\".format(n, routing_suffix), \"reset\"]\n\n try:\n ret = n.run_command(params)\n\n if ret:\n self.logger.info(\"[{0}] Routing table successfully cleaned\".format(n))\n results[n] = True\n else:\n self.logger.error(\"[{0}] Error cleaning the routing table. Params: {1}\".format(n, params))\n print(make_colored(\"red\", \"[{0}] Error cleaning the routing table\".format(n)))\n results[n] = False\n except Exception as error:\n self.logger.error(\"[{0}] Error cleaning the routing table. \"\n \"Error: {1}\".format(n,\n error))\n results[n] = False\n\n return start_thread_pool(self.node_list.values(), reset_routing)\n\n def push_routing_scripts(self):\n \"\"\"\n Push the routing scripts inside the containers\n\n :return:\n \"\"\"\n\n def push_scripts(n, results):\n\n routing_set_script = \"/root/{0}{1}\".format(n,\n routing_suffix)\n\n try:\n ret = n.push_file(Globals.scripts_dir + str(n) + routing_suffix,\n routing_set_script)\n\n if ret:\n self.logger.info(\"[{0}] Routing script successfully pushed inside the container\".format(n))\n results[n] = True\n else:\n self.logger.error(\"[{0}] Error pushing NDN routing script\".format(n))\n print(make_colored(\"red\", \"[{0}] Error pushing NDN routing script\".format(n)))\n results[n] = False\n except Exception as error:\n self.logger.error(\"[{0}] Error pushing NDN routing script.\"\n \"Error: {1}\".format(n,\n error))\n results[n] = False\n\n return start_thread_pool(self.node_list.values(), push_scripts)\n\n def set_ndn_routing(self, rerouting=False):\n \"\"\"\n Execute the routing scripts in order to fill the routing tables of the nodes.\n\n :return:\n \"\"\"\n\n def set_routing(n, results):\n try:\n if rerouting:\n params = [\"/root/{0}{1}\".format(n, routing_suffix), \"set_routing\"]\n else:\n params = [\"/root/{0}{1}\".format(n, routing_suffix), \"set\"]\n\n ret = n.run_command(params)\n\n if ret:\n self.logger.info(\"[{0}] NDN routing set\".format(n))\n results[n] = True\n else:\n self.logger.error(\"[{0}] Error while executing the NDN routing script\".format(n))\n print(make_colored(\"red\", \"[{0}] Error while executing the NDN routing script\".format(n)))\n results[n] = False\n except Exception as error:\n self.logger.error(\"Error while executing the NDN routing script {0}. \"\n \"Error: {1}\".format(n,\n error))\n results[n] = False\n\n self.logger.info(\"Setting NDN routing\")\n\n return start_thread_pool(self.node_list.values(), set_routing)\n\n def list_nfd_status(self):\n \"\"\"\n Show the routing tables of the nodes in the network.\n\n :return:\n \"\"\"\n self.logger.info(\"Listing nfd status\")\n\n for node in self.node_list.values():\n params = [\"nfd-status\", \"-fb\"]\n print(make_colored(\"blue\", node))\n print(\"\\n\\n\")\n try:\n ret = node.run_command(params, output=True)\n except Exception as error:\n self.logger.error(\"[{0}] Error showing NFD status. \"\n \"Error: {1}\".format(node,\n error))\n ret = False\n\n if not ret:\n self.logger.error(\"[{0}] Error displaying NFD-STATUS\".format(node))\n print(make_colored(\"red\", \"[{0}] Error displaying NFD-STATUS\".format(node)))\n\n def execute_cmd(self, cmd):\n \"\"\"\n Execute the command on each node of the network.\n\n :param cmd: The array with the command and the parameters to execute.\n :return:\n \"\"\"\n self.logger.info(\"Executing cmd {0}\".format(cmd))\n\n def exec_command(n, results):\n try:\n ret = n.run_command(cmd)\n\n if ret:\n self.logger.error(\"[{0}] executeCmd {1} returned an error\".format(n,\n cmd))\n print(make_colored(\"red\", \"[{0}] executeCmd returned an error\".format(n)))\n results[n] = False\n else:\n self.logger.info(\"[{0}] Successfully Executed cmd {1}\".format(n,\n cmd))\n results[n] = True\n except Exception as error:\n self.logger.error(\"[{0}] Error executing command {2}. \"\n \"Error: {1}\".format(n,\n error,\n cmd))\n results[n] = False\n\n return start_thread_pool(self.node_list.values(), exec_command)\n\n def start_test(self):\n \"\"\"\n Run the test by starting the clients.\n\n :return:\n \"\"\"\n shutil.rmtree(Globals.log_dir)\n os.mkdir(Globals.log_dir)\n\n self.logger.info(\"BEGIN TEST\")\n\n Globals.test_start_time = time.time()\n\n self.logger.info(\"Test start time={0}\".format(Globals.test_start_time))\n\n client_manager_list = []\n\n # Start all the clients\n\n for node in self.node_list.values():\n for client in node.get_client_apps():\n self.logger.debug(\"[{0}] Starting client {1}\".format(node, client))\n\n cm = ClientManager(client, node)\n client_manager_list.append(cm)\n cm.start()\n\n # Wait for clients' end\n for cm in client_manager_list:\n cm.join()\n\n # Wait until the end of the test in case the total\n # duration = time.time() - int(Globals.test_start_time)\n # if duration <= int(Globals.test_duration):\n # time.sleep(float(int(Globals.test_duration) - duration))\n\n for node in self.node_list.values():\n params = [\"killall\",\n \"ndn-icp-download\"]\n\n ret = node.run_command(params)\n if ret:\n print(make_colored(\"red\", \"\\t# Error while stopping ndn-icp-download on {0}\".format(node)))\n\n self.logger.info(\"END TEST\")\n\n\nclass ClientManager(threading.Thread):\n \"\"\"\n Class that start the clients on the network. This class extends the :class:`threading.Thread` class, and it is\n used to start each producer with a different thread. In this way all the clients start more or less at the same time.\n\n :ivar client: The client to start\n :ivar container: The container on which the client has to run\n \"\"\"\n\n def __init__(self, client, node):\n threading.Thread.__init__(self)\n self._stopper = threading.Event()\n self.client = client\n self.file_sizes = {}\n self.node = node\n\n self.logger = logging.getLogger(__name__ + \".\" + type(self).__name__)\n\n def run(self):\n \"\"\"\n The function executed in the thread. It simply starts the client on the node container.\n\n :return:\n \"\"\"\n # client_duration = Globals.test_duration \\\n # if (self.client.get_duration() <= 0 or\n # self.client.get_duration > Globals.test_duration - self.client.get_start_time()) \\\n # else self.client.get_duration()\n #\n # params = [\"localclient\",\n # str(Globals.file_size),\n # str(Globals.file_size_distribution),\n # str(self.client.arrival),\n # str(Globals.flow_control_gamma),\n # str(Globals.flow_control_p_min),\n # str(Globals.flow_control_p_max),\n # str(Globals.flow_control_beta),\n # str(Globals.flow_control_est_len),\n # str(Globals.PIT_lifetime),\n # str(Globals.flow_control_timeout),\n # str(Globals.test_duration),\n # str(self.client.popularity),\n # str(self.client.catalog_name),\n # str(self.client.cid),\n # str(self.client.start_time),\n # str(client_duration)]\n #\n # self.logger.info(\"[{0}] Params={1}\".format(self.client.get_client_id(), params))\n\n params = [\"ndn-icp-download\",\n \"-u\",\n self.client.get_name()]\n print(make_colored('blue', 'downloading ...'))\n ret = self.node.run_command(params)\n\n if not ret:\n self.logger.error(\"[{0}] Error executing client application {1}\".format(self.node,\n self.client.get_client_id()))\n print(make_colored(\"red\", \"[{0}] Error executing \"\n \"client application {1}\".format(self.node,\n self.client.get_client_id())))\n\n def stop(self):\n self._stopper.set()\n","sub_path":"src/Crackle/NDNManager.py","file_name":"NDNManager.py","file_ext":"py","file_size_in_byte":34821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"160861471","text":"chislo = input(\"Яку операцію виконуэмо(+, -, *, /): \")\r\n\r\na = int( input(\"Введіть преше число: \") )\r\nb = int( input(\"Введіть друге число: \") )\r\nif chislo == \"+\":\r\n c = a + b\r\n print(\"Результат: \" + str(c))\r\nelif chislo == \"-\":\r\n c = a - b\r\n print(\"Результат: \" + str(c))\r\nelif chislo == \"*\":\r\n c = a * b\r\n print(\"Результат: \" + str(c))\r\nelif chislo == \"/\":\r\n c = a / b\r\n print(\"Результат: \" + str(c))\r\nelse:\r\n print(\"Таких команд не існує\")\r\n","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"305841245","text":"def Minimum(arr,low,high):\r\n if low==high:\r\n return arr[low]\r\n mid=(low+high)//2\r\n return min(Minimum(arr,low,mid),Minimum(arr,mid+1,high),cross_Min(arr,low,mid,high))\r\n\r\ndef cross_Min(arr,low,mid,high):\r\n left=[]\r\n right=[]\r\n\r\n for i in range(low,mid+1):\r\n left.append(arr[i])\r\n\r\n for j in range(mid+1,high+1):\r\n right.append(arr[j])\r\n\r\n min1,min2=left[0],right[0]\r\n for i in range(1,len(left)):\r\n if min1>left[i]:\r\n min1=left[i]\r\n\r\n for j in range(1,len(right)):\r\n if min2>right[j]:\r\n min2=right[j]\r\n\r\n return min(min1,min2)\r\n\r\narr=[4,2,6,1,8,5,9,10]\r\nprint(Minimum(arr,0,len(arr)-1))","sub_path":"minimum.py","file_name":"minimum.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"548615059","text":"import json\nimport media\nimport fresh_tomatoes\n\n\ndef extract_movie_trailers_from_json(movies_josn_file):\n '''\n Extract info about movie trailers from a json file\n (movie_trailers.json in this case)\n :param movies_josn_file: the path to open the json file (.json)\n :return:\n a list of movie trailers, which are dictionaires\n '''\n\n movie_list = []\n\n with open(movies_josn_file) as f:\n\n movie_trailers = json.load(f)\n\n for movie_title, movie_trailer in movie_trailers.items():\n movie = {\n 'title': movie_title,\n 'storyline': movie_trailer['storyline'],\n 'poster_image_url': movie_trailer['poster_image_url'],\n 'trailer_youtube_url': movie_trailer['trailer_youtube_url']\n }\n movie_list.append(movie)\n\n return movie_list\n\n\ndef create_movies(movie_list):\n '''\n Create a list of movie objects from a list of movie dictionaries\n :param movie_list: a list of movie dictionaries\n including title, storyline, poster_image_url and youtube_trailer_url\n :return:\n a list of Movie objects\n '''\n movie_objects = []\n for movie in movie_list:\n movie_object = media.Movie(title=movie['title'],\n storyline=movie['storyline'],\n poster_image_url=movie['poster_image_url'],\n trailer_youtube_url=movie['trailer_'\n 'youtube_url'])\n movie_objects.append(movie_object)\n\n return movie_objects\n\nif __name__ == '__main__':\n\n # extract infos from movie_trailers.json\n json_path = \"movie_trailers.json\"\n movie_list = extract_movie_trailers_from_json(json_path)\n\n # create movie objects\n movie_objects = create_movies(movie_list)\n\n # generate and open the movie trailer HTML\n fresh_tomatoes.open_movies_page(movie_objects)\n","sub_path":"entertainment_center.py","file_name":"entertainment_center.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"413672256","text":"primes = [0, 0]\nfor i in range(2, 10001):\n for j in range(2, int(i**0.5) + 1):\n if i % j == 0: \n primes.append(0)\n break\n else:\n primes.append(1)\n\nT = int(input())\nfor t in range(T):\n num = int(input())\n check = num // 2\n i = 0\n while True:\n if primes[check+i] and primes[check-i]:\n if (check+i) + (check-i) == num:\n print(check-i, check+i)\n break\n i += 1","sub_path":"python/bojprobs/단계별로 풀어보기/이전/math13_9020.py","file_name":"math13_9020.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"144364274","text":"\"\"\"\r\n\r\n Streamlit webserver-based Recommender Engine.\r\n\r\n Author: Explore Data Science Academy.\r\n\r\n Note:\r\n ---------------------------------------------------------------------\r\n Please follow the instructions provided within the README.md file\r\n located within the root of this repository for guidance on how to use\r\n this script correctly.\r\n\r\n NB: !! Do not remove/modify the code delimited by dashes !!\r\n\r\n This application is intended to be partly marked in an automated manner.\r\n Altering delimited code may result in a mark of 0.\r\n ---------------------------------------------------------------------\r\n\r\n Description: This file is used to launch a minimal streamlit web\r\n\tapplication. You are expected to extend certain aspects of this script\r\n and its dependencies as part of your predict project.\r\n\r\n\tFor further help with the Streamlit framework, see:\r\n\r\n\thttps://docs.streamlit.io/en/latest/\r\n\r\n\"\"\"\r\n# Streamlit dependencies\r\nimport streamlit as st\r\n\r\n# Data handling dependencies\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n# Custom Libraries\r\nfrom utils.data_loader import load_movie_titles\r\nfrom recommenders.collaborative_based import collab_model\r\nfrom recommenders.content_based import content_model\r\n\r\n# Data Loading\r\ntitle_list = load_movie_titles('resources/data/movies.csv')\r\n\r\n#Pulls head links\r\ndef bfind_head(text_full_string):\r\n\tbt_block = ''\r\n\treturn_var = 0\r\n\tfor line in text_full_string:\r\n\t\tif '#HEAD\\n' == line:\r\n\t\t\treturn_var = 1\r\n\t\t\tcontinue\r\n\t\tif return_var == 1:\r\n\t\t\tif '#HEAD_END\\n' == line:\r\n\t\t\t\tbreak\r\n\t\t\telse:\r\n\t\t\t\tbt_block += line\r\n\treturn bt_block\r\n\r\n#Pulls home page\r\ndef bfind_home(text_full_string):\r\n\tbt_block = ''\r\n\treturn_var = 0\r\n\tfor line in text_full_string:\r\n\t\tif '#HOME_PAGE\\n' == line:\r\n\t\t\treturn_var = 1\r\n\t\t\tcontinue\r\n\t\tif return_var == 1:\r\n\t\t\tif '#HOME_PAGE_END\\n' == line:\r\n\t\t\t\tbreak\r\n\t\t\telse:\r\n\t\t\t\tbt_block += line\r\n\treturn bt_block\r\n\r\n#Loads in bootstrap html block to use in st.markdown. For Raw Data Section\r\ndef bfind_raw_data(text_full_string):\r\n\tbt_block = ''\r\n\treturn_var = 0\r\n\tfor line in text_full_string:\r\n\t\tif '#RAW_DATA\\n' == line:\r\n\t\t\treturn_var = 1\r\n\t\t\tcontinue\r\n\t\tif return_var == 1:\r\n\t\t\tif '#RAW_DATA_END\\n' == line:\r\n\t\t\t\tbreak\r\n\t\t\telse:\r\n\t\t\t\tbt_block += line\r\n\treturn bt_block\r\n\r\n#Pulls home page\r\ndef bfind_home2(text_full_string):\r\n\tbt_block = ''\r\n\treturn_var = 0\r\n\tfor line in text_full_string:\r\n\t\tif '#HOME_2\\n' == line:\r\n\t\t\treturn_var = 1\r\n\t\t\tcontinue\r\n\t\tif return_var == 1:\r\n\t\t\tif '#HOME_2_END\\n' == line:\r\n\t\t\t\tbreak\r\n\t\t\telse:\r\n\t\t\t\tbt_block += line\r\n\treturn bt_block\r\n\r\n#Pulls home page\r\ndef bfind_about(text_full_string):\r\n\tbt_block = ''\r\n\treturn_var = 0\r\n\tfor line in text_full_string:\r\n\t\tif '#ABOUT\\n' == line:\r\n\t\t\treturn_var = 1\r\n\t\t\tcontinue\r\n\t\tif return_var == 1:\r\n\t\t\tif '#ABOUT_END\\n' == line:\r\n\t\t\t\tbreak\r\n\t\t\telse:\r\n\t\t\t\tbt_block += line\r\n\treturn bt_block\r\n\r\n#load in local css styles\r\ndef local_css(file_name):\r\n with open(file_name) as f:\r\n st.markdown(f'', unsafe_allow_html=True)\r\n\r\nbootstrap_blocks = open('bootstrap.txt','r')\r\ntext_full_string = bootstrap_blocks.readlines()\r\n\r\nRAW_DATA = bfind_raw_data(text_full_string)\r\nHOME_PAGE = bfind_home(text_full_string)\r\nHEAD = bfind_head(text_full_string)\r\nABOUT = bfind_about(text_full_string)\r\n#HOME_2 = bfind_home2(text_full_string)\r\n#print(HOME_2)\r\nlocal_css('styles.css')\r\n\r\nst.markdown(HEAD,unsafe_allow_html=True)\r\n# App declaration\r\ndef main():\r\n\r\n # DO NOT REMOVE the 'Recommender System' option below, however,\r\n # you are welcome to add more options to enrich your app.\r\n page_options = [\"Home Page\",\"Movie Recommenders\",\"Meet the team\"]\r\n\r\n # -------------------------------------------------------------------\r\n # ----------- !! THIS CODE MUST NOT BE ALTERED !! -------------------\r\n # -------------------------------------------------------------------\r\n page_selection = st.sidebar.selectbox(\"Choose Option\", page_options)\r\n if page_selection == 'Home Page':\r\n st.markdown(HOME_PAGE, unsafe_allow_html=True)\r\n #st.markdown(HOME_2,unsafe_allow_html=True)\r\n if page_selection == \"Movie Recommenders\":\r\n # Header contents\r\n st.write('# Movie Recommender Engine')\r\n st.write('### EXPLORE Data Science Academy Unsupervised Predict')\r\n st.image('resources/imgs/Image_header.png',use_column_width=True)\r\n # Recommender System algorithm selection\r\n sys = st.radio(\"Select an algorithm\",\r\n ('Content Based Filtering',\r\n 'Collaborative Based Filtering',\r\n 'Popularity Based'))\r\n\r\n # User-based preferences\r\n st.write('### Enter Your Three Favorite Movies')\r\n movie_1 = st.selectbox('First Option',title_list[14930:15200])\r\n movie_2 = st.selectbox('Second Option',title_list[25055:25255])\r\n movie_3 = st.selectbox('Third Option',title_list[21100:21200])\r\n fav_movies = [movie_1,movie_2,movie_3]\r\n\r\n # Perform top-10 movie recommendation generation\r\n if sys == 'Content Based Filtering':\r\n if st.button(\"Recommend\"):\r\n try:\r\n with st.spinner('Crunching the numbers...'):\r\n top_recommendations = content_model(movie_list=fav_movies,\r\n top_n=10)\r\n st.title(\"We think you'll like:\")\r\n for i,j in enumerate(top_recommendations):\r\n st.subheader(str(i+1)+'. '+j)\r\n except:\r\n st.error(\"Oops! Looks like this algorithm does't work.\\\r\n We'll need to fix it!\")\r\n\r\n\r\n if sys == 'Collaborative Based Filtering':\r\n if st.button(\"Recommend\"):\r\n try:\r\n with st.spinner('Crunching the numbers...'):\r\n top_recommendations = collab_model(movie_list=fav_movies,\r\n top_n=10)\r\n st.title(\"We think you'll like:\")\r\n for i,j in enumerate(top_recommendations):\r\n st.subheader(str(i+1)+'. '+j)\r\n except:\r\n st.error(\"Oops! Looks like this algorithm does't work.\\\r\n We'll need to fix it!\")\r\n\r\n\r\n # -------------------------------------------------------------------\r\n\r\n # ------------- SAFE FOR ALTERING/EXTENSION -------------------\r\n if page_selection == \"Statistics and insights\":\r\n insight_selection = st.selectbox('Data Exploration',['Raw Data','Distribution plot for ratings','Top 15 Genres',\\\r\n 'Ratings over time (1995 - 2019)','Popular words in movie descriptive data'])\r\n if insight_selection == \"Raw Data\":\r\n bootstrap_block_1 = RAW_DATA\r\n bootstrap_block_1 = bootstrap_block_1.replace('$$', 'The data set use for training')\r\n bootstrap_block_1 = bootstrap_block_1.replace('&&',\r\n \"
      This is the training dataset it contains the following values\" \\\r\n \"
    • moviesId - the id values given to the movie
    • \" \\\r\n \"
    • userId - the id values given to the movie
      • \")\r\n st.markdown(bootstrap_block_1, unsafe_allow_html=True)\r\n if page_selection == \"Solution Overview\":\r\n st.title(\"Solution Overview\")\r\n st.write(\"Describe your winning approach on this page\")\r\n\r\n if page_selection == 'Meet the team':\r\n st.markdown(ABOUT,unsafe_allow_html=True)\r\n # You may want to add more sections here for aspects such as an EDA,\r\n # or to provide your business pitch.\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"edsa_recommender.py","file_name":"edsa_recommender.py","file_ext":"py","file_size_in_byte":7938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"393758122","text":"# -*- coding: utf-8 -*-\n\nEPOCH_YEAR = 1970\n\nDAYS_PER_N_YEAR = 365\nDAYS_PER_L_YEAR = 366\n\nUSECS_PER_SEC = 1000000\n\nSECS_PER_MIN = 60\nSECS_PER_HOUR = 60 * SECS_PER_MIN\nSECS_PER_DAY = SECS_PER_HOUR * 24\n\n# 400-year chunks always have 146097 days (20871 weeks).\nSECS_PER_400_YEARS = 146097 * SECS_PER_DAY\n\n# The number of seconds in an aligned 100-year chunk, for those that\n# do not begin with a leap year and those that do respectively.\nSECS_PER_100_YEARS = [\n (76 * DAYS_PER_N_YEAR + 24 * DAYS_PER_L_YEAR) * SECS_PER_DAY,\n (75 * DAYS_PER_N_YEAR + 25 * DAYS_PER_L_YEAR) * SECS_PER_DAY,\n]\n\n# The number of seconds in an aligned 4-year chunk, for those that\n# do not begin with a leap year and those that do respectively.\nSECS_PER_4_YEARS = [\n (4 * DAYS_PER_N_YEAR + 0 * DAYS_PER_L_YEAR) * SECS_PER_DAY,\n (3 * DAYS_PER_N_YEAR + 1 * DAYS_PER_L_YEAR) * SECS_PER_DAY,\n]\n\n# The number of seconds in non-leap and leap years respectively.\nSECS_PER_YEAR = [\n DAYS_PER_N_YEAR * SECS_PER_DAY,\n DAYS_PER_L_YEAR * SECS_PER_DAY,\n]\n\nMONTHS_PER_YEAR = 12\n\n# The month lengths in non-leap and leap years respectively.\nDAYS_PER_MONTHS = [\n [-1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],\n [-1, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n]\n\n# The day offsets of the beginning of each (1-based) month in non-leap\n# and leap years respectively.\n# For example, in a leap year there are 335 days before December.\nMONTHS_OFFSETS = [\n [-1, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365],\n [-1, 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366]\n]\n\nTM_SUNDAY = 0\nTM_MONDAY = 1\nTM_TUESDAY = 2\nTM_WEDNESDAY = 3\nTM_THURSDAY = 4\nTM_FRIDAY = 5\nTM_SATURDAY = 6\n\nTM_JANUARY = 0\nTM_FEBRUARY = 1\nTM_MARCH = 2\nTM_APRIL = 3\nTM_MAY = 4\nTM_JUNE = 5\nTM_JULY = 6\nTM_AUGUST = 7\nTM_SEPTEMBER = 8\nTM_OCTOBER = 9\nTM_NOVEMBER = 10\nTM_DECEMBER = 11\n\n\ndef local_time(unix_time, utc_offset):\n \"\"\"\n Returns a UNIX time as a broken down time\n for a particular transition type.\n\n :type unix_time: int\n :type utc_offset: int\n\n :rtype: tuple\n \"\"\"\n year = EPOCH_YEAR\n microsecond = int(round(unix_time % 1, 6) * 1e6)\n seconds = int(unix_time)\n\n # Shift to a base year that is 400-year aligned.\n if seconds >= 0:\n seconds -= 10957 * SECS_PER_DAY\n year += 30 # == 2000\n else:\n seconds += (146097 - 10957) * SECS_PER_DAY\n year -= 370 # == 1600\n\n seconds += utc_offset\n\n # Handle years in chunks of 400/100/4/1\n year += 400 * (seconds // SECS_PER_400_YEARS)\n seconds %= SECS_PER_400_YEARS\n if seconds < 0:\n seconds += SECS_PER_400_YEARS\n year -= 400\n\n leap_year = 1 # 4-century aligned\n\n sec_per_100years = SECS_PER_100_YEARS[leap_year]\n while seconds >= sec_per_100years:\n seconds -= sec_per_100years\n year += 100\n leap_year = 0 # 1-century, non 4-century aligned\n sec_per_100years = SECS_PER_100_YEARS[leap_year]\n\n sec_per_4years = SECS_PER_4_YEARS[leap_year]\n while seconds >= sec_per_4years:\n seconds -= sec_per_4years\n year += 4\n leap_year = 1 # 4-year, non century aligned\n sec_per_4years = SECS_PER_4_YEARS[leap_year]\n\n sec_per_year = SECS_PER_YEAR[leap_year]\n while seconds >= sec_per_year:\n seconds -= sec_per_year\n year += 1\n leap_year = 0 # non 4-year aligned\n sec_per_year = SECS_PER_YEAR[leap_year]\n\n # Handle months and days\n month = TM_DECEMBER + 1\n day = seconds // SECS_PER_DAY + 1\n seconds %= SECS_PER_DAY\n while month != TM_JANUARY + 1:\n month_offset = MONTHS_OFFSETS[leap_year][month]\n if day > month_offset:\n day -= month_offset\n break\n\n month -= 1\n\n # Handle hours, minutes, seconds and microseconds\n hour = seconds // SECS_PER_HOUR\n seconds %= SECS_PER_HOUR\n minute = seconds // SECS_PER_MIN\n second = seconds % SECS_PER_MIN\n\n return (\n year, month, day,\n hour, minute, second, microsecond\n )\n","sub_path":"pendulum/_extensions/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":4018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"633008733","text":"from helpers import log, locate_game_window, get_value_from_rect, click_on_box, escape, click_image, click, click_next, \\\n delay_next_check\nimport pyautogui\nimport time\nimport settings\nimport datetime\n\nlast_check = datetime.datetime.now()\nif last_check.hour == 0:\n last_check = last_check.replace(minute=0)\nelse:\n last_check = last_check.replace(hour=last_check.hour - 1) # Remove 1 hour to make sure it checks first run\n\n\ndef check_events():\n global last_check\n if not settings.events:\n return\n #if delay_next_check(5, last_check):\n # return\n event = pyautogui.locateOnScreen('imgs/event.PNG', confidence=0.92)\n if event is not None:\n log(\"Checking event.\")\n last_check = datetime.datetime.now()\n click_on_box(event)\n check_lab()\n check_wayback()\n check_magic()\n check_tree_of_life()\n\nlab_done = False\ndef check_lab():\n global lab_done\n if lab_done:\n log(\"Laboratory already done, escaping\")\n escape(2)\n return\n time.sleep(1)\n laboratory = pyautogui.locateOnScreen('imgs/laboratory.PNG', confidence=0.88)\n if laboratory is not None:\n log(\"Laboratory of the Alchemist event found.\")\n time.sleep(1)\n lab_three = pyautogui.locateOnScreen('imgs/lab_three.PNG', confidence=0.88)\n if lab_three is not None:\n click_on_box(lab_three)\n time.sleep(2)\n click(1040, 610)\n time.sleep(3)\n lab_done = True\n escape(3)\n\n\nwayback_done = 0\ndef check_wayback():\n global wayback_done\n if wayback_done >= 2:\n log(\"Way back home already done, escaping\")\n escape(2)\n return\n time.sleep(1)\n wayback = pyautogui.locateOnScreen('imgs/wayback.PNG', confidence=0.88)\n if wayback is not None:\n log(\"Way back home event found.\")\n time.sleep(1)\n lab_three = pyautogui.locateOnScreen('imgs/lab_three.PNG', confidence=0.88)\n if lab_three is not None:\n click_on_box(lab_three)\n time.sleep(2)\n click(1024, 700)\n wayback_done += 1\n time.sleep(3)\n escape(3)\n\n\ntree_of_life_done = 0\ndef check_tree_of_life():\n global tree_of_life_done\n if tree_of_life_done >= 2:\n log(\"Tree of Life already done, escaping\")\n escape(2)\n return\n time.sleep(1)\n wayback = pyautogui.locateOnScreen('imgs/tree_of_life.PNG', confidence=0.88)\n if wayback is not None:\n log(\"Tree of Life event found.\")\n time.sleep(1)\n lab_three = pyautogui.locateOnScreen('imgs/lab_three.PNG', confidence=0.88)\n if lab_three is not None:\n click_on_box(lab_three)\n time.sleep(2)\n click(1024, 700)\n tree_of_life_done += 1\n time.sleep(3)\n escape(3)\n\n \n \nmagic_done = False\ndef check_magic():\n global magic_done\n if magic_done:\n log(\"Magic against the power already done, escaping\")\n escape(2)\n return\n time.sleep(1)\n magic = pyautogui.locateOnScreen('imgs/magic.PNG', confidence=0.88)\n if magic is not None:\n log(\"Magic against the power event found.\")\n time.sleep(1)\n magic_three = pyautogui.locateOnScreen('imgs/lab_three.PNG', confidence=0.88)\n if magic_three is not None:\n click_on_box(magic_three)\n time.sleep(2)\n click(1024, 700)\n time.sleep(3)\n magic_done = True\n escape(3)\n","sub_path":"events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"451666468","text":"# -*- coding: utf-8 -*-\n#coding=utf-8\nimport numpy as np #导入模块,numpy是扩展链接库\nimport pandas as pd #类似一个本地的excel,偏向现在的非结构化的数据库\nimport tensorflow as tf\nimport keras\nfrom keras.utils import np_utils\nnp.random.seed(10) #设置seed可以产生的随机数据\nfrom keras.datasets import mnist #导入模块,下载读取mnist数据\n(x_train_image,y_train_label),\\\n(x_test_image,y_test_label)=mnist.load_data() #下载读取mnist数据\n\nimport matplotlib.pyplot as plt\n\nx_Train=x_train_image.reshape(60000,784).astype('float32') #以reshape转化成784个float\nx_Test=x_test_image.reshape(10000,784).astype('float32')\nx_Train_normalize=x_Train/255 #将features标准化\nx_Test_normalize=x_Test/255\ny_Train_OneHot=np_utils.to_categorical(y_train_label)#将训练数据和测试数据的label进行one-hot encoding转化\ny_Test_OneHot=np_utils.to_categorical(y_test_label)\n\n#2.建立模型\nfrom keras.models import Sequential #可以通过Sequential模型传递一个layer的list来构造该模型,序惯模型是多个网络层的线性堆叠\nfrom keras.layers import Dense #全连接层\nmodel=Sequential()\n#建立输入层、隐藏层\nmodel.add(Dense(units=1000,\n input_dim=784,\n kernel_initializer='normal',\n activation='relu'))\n#建立输出层\nmodel.add(Dense(units=10,\n kernel_initializer='normal',\n activation='softmax'))\nprint(model.summary())\n#3、进行训练\n#对训练模型进行设置,损失函数、优化器、权值\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam',metrics=['accuracy'])\n# 设置训练与验证数据比例,80%训练,20%测试,执行10个训练周期,每一个周期200个数据,显示训练过程2次\ntrain_history=model.fit(x=x_Train_normalize,\n y=y_Train_OneHot,validation_split=0.2,\n epochs=100,batch_size=200,verbose=2)\n#显示训练过程\n\ndef show_train_history(train_history,train,validation,hiddenUnits):\n plt.plot(train_history.history[train])\n plt.plot(train_history.history[validation])\n plt.title(hiddenUnits)\n plt.ylabel(train)\n plt.xlabel('Epoch')\n plt.legend(['train','test'],loc='upper left') #显示左上角标签\n plt.show()\nshow_train_history(train_history,'accuracy','val_accuracy', '10 hidden units') #画出准确率评估结果\nshow_train_history(train_history,'loss','val_loss', '10 hidden units') #画出误差执行结果\n\n","sub_path":"lab4/lab4/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"592408529","text":"import os\n\nfrom django.shortcuts import render, reverse\nfrom django.conf import settings\nfrom .forms import ArticleSearchForm\nfrom django.views.generic.list import ListView\nfrom django.views.generic.detail import SingleObjectMixin\nfrom .models import Article, Search\nfrom math import log2\nfrom django.http import HttpResponseRedirect\nimport string\nimport re\n\nneg_words = ['aggravated', 'agitated', 'alarmed', 'anger', 'angered', 'angry', 'anguish', 'antipathy', 'anxiety',\n 'anxious', 'apprehension', \"aren't\", 'aversion', 'bad', 'befuddled', 'bewildered', 'bitterness', 'break',\n 'broken', 'cannot', \"can't\", 'conceited', 'confound', 'confused', 'contempt', 'coward', 'crammed',\n 'decrepit', 'deflated', 'dejected', 'deny', 'denied', 'depressed', 'desperate', 'despondent', \"didn't\",\n 'disappointed', 'discombobulated', 'discomposed', 'disconcert', 'discontented', 'disgruntled', 'disgust',\n 'disillusioned', 'dislike', 'dismayed', 'dispirited', 'displeasure', 'dissatisfied', 'distraught',\n 'distressed', \"doesn't\", 'dread', 'embarrassed', 'enraged', 'envy', 'exasperated', 'excited', 'exploit',\n 'fail', 'failure', 'fear', 'ferocious', 'flustered', 'fractious', 'frantic', 'frenzied', 'frenzy',\n 'frightful', 'frightened', 'frustrated', 'fuddle', 'furious', 'gloomy', 'glumness', 'grievous', 'grouchy',\n 'grumpy', 'guilty', \"hadn't\", \"hasn't\", \"haven't\", 'heartbroken', 'homesickness', 'hopeless', 'horrified',\n 'horror', 'hostility', 'humiliated', 'hysterical', 'inconsolable', 'indignant', 'irritated', \"isn't\",\n 'jealousy', 'jolted', 'lazy', 'livid', 'loathing', 'loneliness', 'mad', 'maddened', 'manic', 'melancholic',\n 'miserable', 'misery', 'mortified', \"mustn't\", \"needn't\", 'nervous', 'no', 'nosy', 'outrage',\n 'outraged', 'outrageous', 'overwrought', 'panicked', 'peculiar', 'perplexed', 'phrenetic', 'picky', 'rage',\n 'regret', 'regretful', 'remorse', 'remorseful', 'resent', 'resentment', 'revulsion', 'sad', 'sadly',\n 'sadness', 'scared', 'scary', 'scorn', 'shocked', \"shouldn't\", 'sorrowful', 'spite', 'stingy', 'stubborn',\n 'stunned', 'tense', 'tenseness', 'terrified', 'terror', 'torment', 'uneasiness', 'upset', 'vengefulness',\n \"wasn't\", \"weren't\", 'wild', 'woeful', \"won't\", 'worried', \"wouldn't\", 'wrath', 'wretched'\n ]\n\n\ndef index(request):\n form = ArticleSearchForm()\n if 'query' and 'neg' in request.GET:\n return HttpResponseRedirect(reverse('retrievabl:search', kwargs={'query': request.GET['query'],\n 'neg': request.GET['neg']}))\n return render(request, 'retrievabl/index.html', {'form': form})\n\n\ndef search(request):\n form = ArticleSearchForm()\n if 'query' in request.GET:\n query = request.GET['query']\n return HttpResponseRedirect('/search/' + query)\n return render(request, 'retrievabl/search.html', {'form': form})\n\n\ndef mission(request):\n form = ArticleSearchForm()\n if 'query' in request.GET:\n query = request.GET['query']\n return HttpResponseRedirect('/search/' + query)\n return render(request, 'retrievabl/our_mission.html', {'form': form})\n\n\ndef contact(request):\n form = ArticleSearchForm()\n if 'query' in request.GET:\n query = request.GET['query']\n return HttpResponseRedirect('/search/' + query)\n return render(request, 'retrievabl/contact_us.html', {'form': form})\n\n\ndef avg_doc_len(corpus):\n totlen = 0\n translator = str.maketrans('', '', string.punctuation)\n for doc in corpus:\n dbody = str(doc.body.lower().split()).translate(translator)\n length = len(re.findall(r'\\w+', dbody))\n totlen += length\n avg = totlen / len(corpus)\n return avg\n\n\ndef neg_score(query, corpus):\n translator = str.maketrans('', '', string.punctuation)\n nw = neg_words\n query = query.lower()\n negbm25score = 0\n k = 1.25\n nk = 10 # weight for negative filtering\n avgdl = avg_doc_len(corpus)\n M = len(corpus)\n b = 0.75\n docfreq = {}\n freq = 0\n for doc in corpus:\n find_neg(doc)\n for word in query.lower().split():\n if word in str(doc.body.lower().split()).translate(translator):\n freq += 1\n docfreq[str(word).lower()] = freq\n freq = 0\n\n result = 0\n regbm25score = 0.0\n for doc in corpus:\n dbody = str(doc.body.lower().split()).translate(translator)\n length = len(re.findall(r'\\w+', dbody))\n for word in query.split():\n if word in dbody:\n one = query.count(word.lower())\n two = (k + 1) * dbody.count(str(word).lower())\n three = dbody.count(str(word).lower()) + (k * (1 - b + (b * (length / avgdl))))\n four = log2((M + 1) / (doc.body.count(str(word)) + 1))\n result = one * (two / three) * four\n regbm25score += result\n\n result = (1 - ((doc.percentage / 100) * nk)) * regbm25score\n negbm25score = result\n doc.score = float(negbm25score)\n doc.save()\n negbm25score = 0.0\n regbm25score = 0.0\n\n\ndef reg_score(query, corpus):\n translator = str.maketrans('', '', string.punctuation)\n query = query.lower()\n regbm25score = 0\n k = 1.25\n avgdl = avg_doc_len(corpus)\n M = len(corpus)\n b = 0.75\n docfreq = {}\n freq = 0\n for doc in corpus:\n find_neg(doc)\n for word in query.lower().split():\n if word in str(doc.body.split()).translate(translator):\n freq += 1\n docfreq[str(word).lower()] = freq\n freq = 0\n for doc in corpus:\n dbody = str(doc.body.lower().split()).translate(translator)\n length = len(re.findall(r'\\w+', dbody))\n for word in query.split():\n if word in dbody:\n one = query.count(word.lower())\n two = (k + 1) * dbody.count(str(word).lower())\n three = dbody.count(str(word).lower()) + (k * (1 - b + (b * (length / avgdl))))\n four = log2((M + 1) / (doc.body.count(str(word)) + 1))\n result = one * (two / three) * four\n regbm25score += result\n doc.score = float(regbm25score)\n doc.save()\n regbm25score = 0.0\n\n\ndef find_neg(doc):\n translator = str.maketrans('', '', string.punctuation)\n nw = neg_words\n neg_count = 0\n dbody = str(doc.body.lower().split()).translate(translator)\n for neg_word in nw:\n find = len(re.findall(str(neg_word), dbody))\n if find > 0:\n neg_count += find\n length = len(re.findall(r'\\w+', dbody))\n doc.percentage = float(100 * (neg_count / length))\n doc.save()\n\n\nclass ArticleListView(ListView):\n template_name = 'retrievabl/search.html'\n context_object_name = 'articles'\n\n def get_ndcg(self, query):\n neg_score(query, Article.objects.all())\n articles = Article.objects.order_by('score').reverse().filter(score__gt=0)\n i = 1\n dcg = 0\n idcg = 0\n for article in articles:\n dcg += float(2 ** article.score - 1) / log2(i + 1)\n i += 1\n\n reg_score(query, Article.objects.all())\n articles = Article.objects.order_by('score').reverse().filter(score__gt=0)\n i = 1\n for article in articles:\n idcg += float(2 ** article.score - 1) / log2(i + 1)\n\n if idcg == 0:\n idcg = 1\n\n return float(dcg / idcg)\n\n def get_queryset(self):\n query = self.kwargs.pop('query', None)\n neg_rank = self.kwargs.pop('neg', None)\n\n if neg_rank == 1:\n neg_score(query, Article.objects.all())\n else:\n reg_score(query, Article.objects.all())\n\n result = Article.objects.order_by('score').reverse().filter(score__gt=0)\n return result\n","sub_path":"retrievabl/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"173276542","text":"import pytest\n\nfrom apps.authentication import roles\nfrom factories.users import UserFactory\n\n\n@pytest.fixture\ndef user_administration():\n \"\"\"\n Create user as administration with data:\n email = 'administration@a.ru'\n password = 'test'\n is_active = True\n role = ADMINISTRATION\n \"\"\"\n administration = UserFactory(\n last_name='Иванова',\n first_name='Анна',\n email='administration@a.ru',\n role=roles.ADMINISTRATION,\n is_active=True,\n )\n return administration\n\n\n@pytest.fixture\ndef user_educator():\n \"\"\"\n Create user as educator with data:\n email = 'educator@a.ru'\n password = 'test'\n is_active = True\n role = EDUCATOR\n \"\"\"\n educator = UserFactory(\n last_name='Смирнова',\n first_name='Анна',\n email='educator@a.ru',\n role=roles.EDUCATOR,\n is_active=True\n )\n return educator\n\n\n@pytest.fixture\ndef unactivated_user():\n \"\"\"\n Create unactivated user\n \"\"\"\n _user = UserFactory(\n last_name='Иванов',\n first_name='Иван',\n middle_name='Иван',\n email='unactivated@a.ru',\n role=roles.EDUCATOR,\n is_active=False\n )\n return _user\n","sub_path":"tests/fixtures/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"101057887","text":"from Dot import Dot\r\nfrom random import random as r\r\n\r\n\r\nclass DotFactory:\r\n\tdef __init__(self, x, y, gx, gy, screen, pop, obs):\r\n\t\tself.dots = []\r\n\t\tself.pop = pop\r\n\t\tself.x = x\r\n\t\tself.y = y\r\n\t\tself.obs = obs\r\n\t\tfor i in range(pop):\r\n\t\t\tself.dots.append(Dot(x, y, r()*20-10, r()*20-10, gx, gy, screen, obs))\r\n\t\tself.minStep = 10000\r\n\t\tself.fitnessSum = 0\r\n\t\tself.bestDot = 0\r\n\t\tself.gen = 0\r\n\r\n\tdef show(self):\r\n\t\tfor d in self.dots:\r\n\t\t\td.show()\r\n\r\n\tdef update(self):\r\n\t\tfor d in self.dots:\r\n\t\t\tif d.steps > self.minStep + 300:\r\n\t\t\t\td.dead = True\r\n\t\t\telse:\r\n\t\t\t\td.move()\r\n\r\n\tdef calculateFitness(self):\r\n\t\tself.fitnessSum = 0\r\n\t\tfor d in self.dots:\r\n\t\t\tself.fitnessSum += d.calculateFitness()\r\n\r\n\tdef allDotsDead(self):\r\n\t\tfor d in self.dots:\r\n\t\t\tif not d.dead and not d.reachedGoal:\r\n\t\t\t\treturn False\r\n\t\treturn True\r\n\r\n\tdef naturalSelection(self):\r\n\t\tnewDots = []\r\n\t\tself.setBestDot()\r\n\t\tself.calculateFitness()\r\n\r\n\t\tnewDots.append(self.dots[self.bestDot].clone(self.x, self.y))\r\n\t\tnewDots[0].isBest = True\r\n\r\n\t\tfor i in range(1, self.pop):\r\n\t\t\tnewDots.append(self.selectParent().clone(self.x, self.y))\r\n\r\n\t\tself.dots = newDots[:]\r\n\t\tself.gen += 1\r\n\r\n\r\n\tdef setBestDot(self):\r\n\t\tmax = 0\r\n\t\tmi = 0\r\n\t\tfor i in range(len(self.dots)):\r\n\t\t\tif self.dots[i].calculateFitness() > max:\r\n\t\t\t\tmax = self.dots[i].calculateFitness()\r\n\t\t\t\tmi = i\r\n\r\n\t\tself.bestDot = mi\r\n\r\n\t\tif self.dots[mi].reachedGoal:\r\n\t\t\tself.minStep = self.dots[mi].steps\r\n\r\n\tdef selectParent(self):\r\n\t\tthreshold = r()*self.fitnessSum\r\n\t\ts = 0\r\n\r\n\t\tfor d in self.dots:\r\n\t\t\ts += d.calculateFitness()\r\n\t\t\tif s > threshold:\r\n\t\t\t\treturn d\r\n\r\n\t\t# Shouldn't hit this point\r\n\t\tprint(self.fitnessSum, threshold, s)\r\n\r\n\t\treturn None\r\n\r\n\tdef mutate(self, rate):\r\n\t\tfor i in range(1, len(self.dots)):\r\n\t\t\tself.dots[i].mutate(rate)\r\n","sub_path":"DotFactory.py","file_name":"DotFactory.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"297923386","text":"from django.shortcuts import get_object_or_404, render\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse, HttpResponseRedirect, HttpResponseForbidden\nfrom django.core.urlresolvers import reverse\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.views.decorators.http import require_POST, require_GET\nfrom .forms import UserForm, UserEditForm, CommentForm, PostForm\nfrom .models import Post, Comment, Vote\nfrom .settings import *\nimport datetime\n\n\n\n@require_GET\ndef show_index(request):\n index_posts = Post.index_posts_by_vote_count()\n paginator = Paginator(index_posts, MWPTESTBLOGAPP_POSTS_PER_PAGE)\n\n page = request.GET.get('page')\n\n try:\n index_posts_page = paginator.page(page)\n except PageNotAnInteger:\n index_posts_page = paginator.page(1)\n except EmptyPage:\n index_posts_page = paginator.page(paginator.num_pages)\n\n\n return render(request, 'full/index.html', {\n 'index_posts_page': index_posts_page,\n })\n\n\n@require_GET\ndef show_user(request, username):\n user = get_object_or_404(User, username=username)\n\n user_posts = user.posts.all().order_by('-created_at')\n\n paginator = Paginator(user_posts, MWPTESTBLOGAPP_POSTS_PER_PAGE)\n\n page = request.GET.get('page')\n\n try:\n user_posts = paginator.page(page)\n except PageNotAnInteger:\n user_posts = paginator.page(1)\n except EmptyPage:\n user_posts = paginator.page(paginator.num_pages)\n\n return render(request, 'full/user.html', {\n 'user': user,\n 'user_posts_page': user_posts,\n })\n\n\n# Posts\n@require_GET\ndef show_post(request, post_id, slug=None):\n post = get_object_or_404(Post, id=post_id)\n post_comments = post.comments.all().order_by('-created_at')\n comment_form = CommentForm()\n\n if request.user.is_authenticated(): # Find votes\n vote = Vote.find_vote(request.user, post)\n vote_value = vote.value if vote else 0\n else:\n vote_value = 0\n\n\n return render(request, 'full/post.html', {\n 'post': post,\n 'post_comments': post.comments.all().order_by(\"-created_at\"),\n 'comment_form': comment_form,\n 'vote_value': vote_value\n })\n\n\n@login_required(login_url='/login/')\ndef add_post(request):\n if request.method == 'GET':\n post_form = PostForm()\n return render(request, 'full/add_post.html', {\n 'form': post_form\n })\n\n elif request.method == 'POST':\n post_form = PostForm(request.POST)\n if post_form.is_valid():\n post = Post(user=request.user, **post_form.cleaned_data)\n post.save()\n return HttpResponseRedirect(reverse('show_post', kwargs={\"post_id\": post.id, \"slug\": post.slug}))\n else:\n return render(request, 'full/add_post.html', {'form': post_form})\n\n\n\n\n@login_required(login_url='/login/')\ndef edit_post(request, post_id, slug=None):\n\n post = get_object_or_404(Post, id=post_id)\n\n if not request.user.is_superuser and request.user != post.user:\n return HttpResponseForbidden()\n\n\n if request.method == 'GET':\n post_form = PostForm(instance=post)\n return render(request, 'full/edit_post.html', {'post': post,\n 'form': post_form})\n elif request.method == 'POST':\n post_form = PostForm(request.POST)\n if post_form.is_valid(): # case test\n\n for field, value in post_form.cleaned_data.items():\n setattr(post, field, value)\n\n post.save()\n return HttpResponseRedirect(reverse('show_post', kwargs={\"post_id\": post.id, \"slug\": post.slug}))\n else:\n return render(request, 'full/edit_post.html', {'post': post,\n 'form': post_form})\n\n\n@require_POST\n@login_required(login_url='/login/')\ndef delete_post(request, post_id, slug=None):\n post = get_object_or_404(Post, id=post_id)\n\n if not request.user.is_superuser and request.user != post.user:\n return HttpResponseForbidden()\n\n post.delete()\n return HttpResponseRedirect(reverse('home'))\n\n\n\n\n\n# Voting\n@require_GET\ndef vote_post(request):\n post = get_object_or_404(Post, id=request.GET.get('post_id', None))\n user = request.user\n value = -1 if request.GET.get('downvote', 0) == '1' else 1 # like by default\n response_context = {}\n if user.is_authenticated():\n # Check if voted already\n vote = Vote.find_vote(user, post)\n if not vote:\n # Liking here\n vote = Vote(user=user, post=post, value=value)\n vote.save()\n else:\n if vote.value != value:\n vote.value = value\n vote.save()\n else:\n vote.delete() # Delete vote if double-voted\n value = 0\n\n response_context.update({\"status\": 200})\n response_context.update({'vote_count': post.vote_count(), \"value\": value})\n\n else:\n response_context.update({\"status\": 401})\n\n return JsonResponse(response_context)\n\n\n# Comments\n@require_POST\n@login_required(login_url='/login/')\ndef post_comment(request):\n post_id = request.POST.get('post_id', None)\n user = request.user\n post = get_object_or_404(Post, id=post_id)\n comment_form = CommentForm(request.POST)\n if comment_form.is_valid():\n comment = Comment(user=user, post=post, content=comment_form.cleaned_data[\"content\"])\n comment.save()\n\n return HttpResponseRedirect(reverse('show_post', kwargs={\"post_id\": post.id}))\n\n@require_GET\n@login_required\ndef delete_comment(request, comment_id):\n comment = get_object_or_404(Comment, id=comment_id)\n\n if not request.user.is_superuser and request.user != comment.user:\n return HttpResponseForbidden()\n\n comment_post = comment.post\n comment.delete()\n return HttpResponseRedirect(reverse('show_post', kwargs={\"post_id\":comment_post.id, \"slug\": comment_post.slug}))\n\n\n\n# User\ndef register(request):\n if request.method == \"POST\":\n _next = request.POST.get('next', None)\n user_form = UserForm(request.POST)\n if user_form.is_valid():\n user_form.cleaned_data.pop('password2')\n new_user = User.objects.create_user(**user_form.cleaned_data)\n new_user.save() # should really check if user is actually saved\n\n\n new_user = authenticate(username=user_form.cleaned_data['username'],\n password=user_form.cleaned_data['password'],\n )\n login(request, new_user)\n\n if _next:\n return HttpResponseRedirect(_next)\n else:\n return HttpResponseRedirect(reverse('home'))\n else:\n context = {'form': user_form}\n if _next:\n context.update({'next': _next})\n return render(request, 'registration/register.html', context)\n\n else:\n _next = request.GET.get('next', None)\n user_form = UserForm()\n context = {'form': user_form}\n if _next:\n context.update({'next': _next})\n return render(request, 'registration/register.html', context)\n\n\n\n@login_required\ndef edit_profile(request):\n if request.method == \"POST\":\n form = UserEditForm(request.POST)\n if form.is_valid():\n for field, value in form.cleaned_data.items():\n setattr(request.user, field, value)\n request.user.save()\n return render(request, 'full/edit_profile.html', {\"form\": form, 'notify': 'Изменения сохранены.'})\n else:\n return render(request, 'full/edit_profile.html', {\"form\": form})\n\n elif request.method == \"GET\":\n form = UserEditForm(instance=request.user)\n return render(request, 'full/edit_profile.html', {\"form\": form})\n\n@login_required\ndef delete_user(request, username):\n user = get_object_or_404(User, username=username)\n\n if not request.user.is_superuser and request.user != user:\n return HttpResponseForbidden()\n\n if request.method == \"GET\":\n return render(request, 'full/delete_user.html', {'user': user})\n\n if request.method == \"POST\":\n user.delete()\n return HttpResponseRedirect(reverse('home'))\n","sub_path":"mwptestblogapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"76775455","text":"import pygame as pg\nimport random,noise\n# Constantes :\nFPS = 60 # les fps tabernak\nWIND = 750 # dimentions de la fentere\n\npg.init()\nf = pg.display.set_mode((WIND, WIND),pg.RESIZABLE)\npg.display.set_caption(\"The /) game\")\nfpsClock = pg.time.Clock()\nfont = pg.font.SysFont('consolas', 30) \n\nzoom=1\nb = True\ntuiles=[]\nanim=[pg.image.load(\"sprite//b_\"+format(i,\"02\")+\".png\") for i in range(11)]\nstop=pg.image.load(\"sprite//assis.png\")\n\ndep=1 #la vitesse de déplacement de roger\ngrav=1 #la vitesse de chute de roger\nvitesse=5 #la vitesse du plancher\nfond=pg.Surface((WIND,WIND))#pg.image.load(\"fond.png\")\nfond.fill(0xffffff)\nwidth=fond.get_rect().width\nsachet=pg.image.load(\"crispy pix.png\").convert()\nsachet=pg.transform.scale(sachet,(25,25))\nscore=0\npg.mixer.init()\npg.mixer.music.load(\"sounds//zik.mp3\")\npg.mixer.music.play(-1)\nclass Perso:\n \"\"\"Le personnage principal\"\"\"\n def __init__(self):\n self.img=anim\n self.index_anim=0\n self.rect=self.img[0].get_rect()\n self.vy=0\n self.jump=False\n \n def draw(self):\n global score\n self.vy+=grav\n if self.rect.y+self.rect.height/2>WIND:\n self.rect.y=0\n self.vy=0\n elif self.rect.y-self.rect.height/2+self.vy<0:\n self.rect.y=self.rect.height/2\n self.vy=0\n if self.jump:\n self.vy=-15\n self.jump=False\n self.Assis=False\n for jacko in tuiles:\n if jacko.fertile:\n if self.rect.colliderect(jacko.fertile):\n jacko.fertile=False\n score+=1\n self.son=pg.mixer.Sound(\"sounds//ohh.mp3\")\n self.son.play()\n if self.rect.colliderect(jacko):\n self.Assis=True\n if self.vy>0:\n self.vy=0\n self.rect.y=jacko.rect.y-4-self.rect.height/2\n self.rect.x-=vitesse\n break\n \n if not self.Assis:\n self.index_anim+=1\n self.index_anim=self.index_anim%11\n if self.Assis:\n f.blit(stop,self.rect)\n else:\n f.blit(self.img[self.index_anim],self.rect)\n \nclass Sol:\n \"\"\"Les planchers\"\"\"\n def __init__(moi,hauteur):\n moi.rect=pg.Rect((WIND,hauteur),(random.randrange(10,20)*20,10))\n moi.coul=(150,150,150)\n \n if random.randint(0,4):\n moi.fertile=False\n else:\n moi.fertile=Crispy(moi.rect.center)\n def draw(self):\n pg.draw.rect(f,self.coul,self.rect)\n self.rect.x-=vitesse\n if self.fertile:\n self.fertile.draw()\n if self.rect.x+self.rect.width<0:\n tuiles.pop(tuiles.index(self))\n print(len(tuiles))\n\nclass Crispy:\n \"\"\"Les sachets à recupérer\"\"\"\n def __init__(moi,pos):\n moi.img=sachet\n moi.rect=sachet.get_rect()\n moi.rect.center=pos\n moi.rect.y-=moi.rect.height/2\n def draw(moi):\n f.blit(moi.img,moi.rect)\n moi.rect=moi.rect.move(-vitesse,0)\ntuiles.append(Sol((noise.pnoise1(0)+0.5)*WIND/2))\nuol=Perso()\ntour=0\ni=0\ndefil1,defil2=0,width\ntry:\n while b:\n \n pg.display.flip()\n\n text = font.render(('Score: '+str(score)), True, (0,0,0))\n textRect = text.get_rect()\n \n p = pg.key.get_pressed() # SI la touche est appuyée\n if p[pg.K_d]:uol.rect.x+=dep\n if p[pg.K_q]:uol.rect.x-=dep\n vitesse+=0.001\n\n tour+=1\n if tour>=500/vitesse:\n tour=0\n tuiles.append(Sol(noise.pnoise1(i/10)*WIND/3+WIND/2))\n i+=1\n i=i%10\n f.blit(fond, (0,0))\n defil1-=5\n defil2-=5\n if defil1<-width:\n defil1=defil2+width\n if defil2<-width:\n defil2=defil1+width\n pointer=pg.mouse\n for event in pg.event.get(): # QUAND la touche est appuyée\n if event.type == pg.QUIT:\n b = False\n print(\"=> Fin du jeu babe\")\n elif event.type == pg.KEYDOWN:\n if event.dict['key']==pg.K_z:\n if uol.Assis:\n uol.jump=True\n \n \n elif event.type==pg.MOUSEBUTTONUP:\n \"\"\"if event.button==1: #click gauche\n pos=event.pos\n\n\n \n\n if event.button==3: #click droit\n \n elif event.button==4: #vers le haut\n zoom+=0.01\n elif event.button==5: #vers le bas\n zoom-=0.01\"\"\"\n\n \n f.blit(text, (0,0))\n for jack in tuiles:\n jack.draw()\n uol.draw()\n fpsClock.tick(FPS)\nexcept :\n pg.quit()\n raise\nfinally:\n pg.quit()\n","sub_path":"petite-et-fiou.py","file_name":"petite-et-fiou.py","file_ext":"py","file_size_in_byte":4839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"239449080","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\n# =====================================================\n# Welcome to HackLand! We monkey patch the _get_rc_file\n# method of PyPIRCCommand so that we can read a .pypirc\n# that is located in the current directory. This enables\n# us to check it in with the code and not require\n# developers to create files in their home directory.\nfrom distutils.config import PyPIRCCommand\n\n\ndef get_custom_rc_file(self):\n home_pypi = os.path.join(os.path.expanduser('~'),\n '.pypirc')\n local_pypi = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n '.pypirc')\n return local_pypi if os.path.exists(local_pypi) \\\n else home_pypi\n\nPyPIRCCommand._get_rc_file = get_custom_rc_file\n# Thank you for visiting HackLand!\n# =====================================================\n\nsetup(\n name='tempto',\n version='1.0',\n description='tempto will run your product tests using a custom SQL on Hadoop test harness.',\n author='Teradata Corporation',\n author_email='anton.petrov@teradata.com',\n url='https://github.com/teradatalabs/tempto',\n packages=['runner', 'runner.argparse_extensions'],\n include_package_data=True,\n package_data={'runner': ['*.xml']},\n keywords=['sql', 'hadoop', 'tempto'],\n entry_points={'console_scripts': ['tempto = runner.test_runner:main']},\n install_requires=['argparse>=1.3.0']\n)","sub_path":"tempto-launch-script/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"349106049","text":"# Python 2.7.13\r\n\r\nfrom buildings.core.dwelling import Dwelling\r\nfrom libs.animation import Animation\r\nfrom textures.dungeon import TexBuildings as db\r\nfrom units.dungeon.harpy import Harpy as Unit\r\n\r\n\r\nclass HarpyNest(Dwelling):\r\n\r\n cost = 130\r\n def __init__(self, position, team):\r\n super(HarpyNest, self).__init__(position, team)\r\n self.image = Animation(db.BUILDINGS, db.WIDTH * db.SCALE / 100).read(1, percent=80)\r\n self.name = \"Harpies' nest\"\r\n self.unit_type = \"Harpy\"\r\n self.unit = Unit\r\n self.max_health = 100\r\n self.current_health = 100\r\n self.armor = 50\r\n self.area = 30\r\n self.respawn_time = 40\r\n self.spawned_units = 0\r\n self.build_time = 30\r\n\r\n def render(self, screen, camera):\r\n x_offset = self.rect.x + self.width/2 - self.image.get_width() / 2 - 15\r\n y_offset = self.rect.y + self.height/2 - self.image.get_height() / 2 - 8\r\n\r\n screen.blit(self.image, camera.apply((x_offset, y_offset)))\r\n\r\n # pygame.draw.rect(screen, (255, 255, 0), camera.apply(self.rect), 1)\r\n if self.build_process < 100:\r\n self.render_building(screen, camera)\r\n else:\r\n self.draw_button(screen, camera)\r\n\r\n self.render_health_bar(self.image, screen, camera)\r\n\r\n","sub_path":"buildings/dungeon/harpynest.py","file_name":"harpynest.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"640933321","text":"# https://towardsdatascience.com/reinforcement-learning-w-keras-openai-actor-critic-models-f084612cfd69\nimport gym\nimport numpy as np\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Dropout, Input\nfrom keras.layers.merge import Add, Multiply\nfrom keras.optimizers import Adam\nimport keras.backend as K\nimport tensorflow as tf\nimport random\nfrom collections import deque\n\n\n# determines how to assign values to each state, i.e. takes the state\n# and action (two-input model) and determines the corresponding value\nclass ActorCritic:\n def __init__(self, env, sess):\n self.env = env\n self.sess = sess\n\n self.learning_rate = 0.001\n self.epsilon = 1.0\n self.epsilon_decay = 0.99\n self.gamma = .95\n self.tau = .125\n\n self.memory = deque(maxlen=100)\n self.actor_model = self.create_actor_model()\n self.target_actor_model = self.create_actor_model()\n\n self.critic_model = self.create_critic_model()\n self.target_critic_model = self.create_critic_model()\n\n self.target_reward_model = self.create_reward_model(self.target_actor_model, self.target_critic_model)\n trainer = tf.train.RMSPropOptimizer(self.learning_rate)\n self.optimize = trainer.minimize(tf.negative(self.target_reward_model.output))\n\n self.saver = tf.train.Saver()\n self.sess.run(tf.global_variables_initializer())\n\n def create_reward_model(self, actor_model, critic_model):\n state_input = Input(shape=self.env.observation_space.shape)\n action = actor_model(state_input)\n reward = critic_model([state_input, action])\n\n reward_model = Model(input=state_input, output=reward)\n reward_model.compile(loss=\"mse\", optimizer='rmsprop')\n return reward_model\n\n def create_actor_model(self):\n state_input = Input(shape=self.env.observation_space.shape)\n h1 = Dense(24, activation='relu')(state_input)\n h2 = Dense(48, activation='relu')(h1)\n h3 = Dense(24, activation='relu')(h2)\n output = Dense(self.env.action_space.shape[0], activation='relu')(h3)\n\n model = Model(input=state_input, output=output)\n model.compile(loss=\"mse\", optimizer='rmsprop')\n return model\n\n def create_critic_model(self):\n state_input = Input(shape=self.env.observation_space.shape)\n state_h1 = Dense(24, activation='relu')(state_input)\n state_h2 = Dense(48)(state_h1)\n\n action_input = Input(shape=self.env.action_space.shape)\n action_h1 = Dense(48)(action_input)\n\n merged = Add()([state_h2, action_h1])\n merged_h1 = Dense(24, activation='relu')(merged)\n output = Dense(1, activation='relu')(merged_h1)\n model = Model(input=[state_input, action_input], output=output)\n\n adam = Adam(lr=0.001)\n model.compile(loss=\"mse\", optimizer=adam)\n return model\n\n def remember(self, cur_state, action, reward, new_state, done):\n self.memory.append([cur_state, action, reward, new_state, done])\n\n def forget_all(self):\n self.memory.clear()\n\n def _train_actor(self, samples):\n for sample in samples:\n cur_state = sample[0]\n self.sess.run(self.optimize,\n feed_dict={\n self.target_reward_model.input: cur_state,\n })\n\n def _train_critic(self, samples):\n for sample in samples:\n cur_state, action, reward, new_state, done = sample\n if not done:\n target_action = self.target_actor_model.predict(new_state)\n future_reward = self.target_critic_model.predict([new_state, target_action])[0][0]\n reward += self.gamma * future_reward\n self.critic_model.fit([cur_state, action], reward, verbose=0)\n\n def train_old(self):\n batch_size = 32\n if len(self.memory) < batch_size:\n return\n\n samples = random.sample(self.memory, batch_size)\n self._train_critic(samples)\n self._train_actor(samples)\n\n def train(self, clear_after=False):\n samples = self.memory\n self._train_critic(samples)\n self._train_actor(samples)\n if clear_after:\n self.forget_all()\n\n def _update_actor_target(self):\n self._update(self.actor_model, self.target_actor_model)\n\n def _update_critic_target(self):\n self._update(self.critic_model, self.target_critic_model)\n\n def _update(self, base, target):\n base_weights = base.get_weights()\n target_weights = target.get_weights()\n\n for i in range(len(base_weights)):\n target_weights[i] = (1. - self.tau) * target_weights[i] + self.tau * base_weights[i]\n target.set_weights(target_weights)\n\n def update_target(self):\n self._update_actor_target()\n self._update_critic_target()\n\n def act(self, cur_state):\n self.epsilon *= self.epsilon_decay\n if np.random.random() < self.epsilon:\n return self.env.action_space.sample()\n return self.actor_model.predict(cur_state)\n\n def get_filename(self):\n return './tf_save_ac/{}.ckpt'.format(self.env.env.spec.id)\n\n def save(self):\n self.saver.save(self.sess, self.get_filename())\n print(\"Saved\")\n\n def restore(self):\n self.saver.restore(self.sess, self.get_filename())\n print(\"Restored\")\n\n\ndef main():\n sess = tf.Session()\n K.set_session(sess)\n #env = gym.make(\"Pendulum-v0\")\n env = gym.make(\"MountainCarContinuous-v0\")\n actor_critic = ActorCritic(env, sess)\n #actor_critic.restore()\n\n num_session = 10\n for i in range(num_session):\n num_episodes = 100\n to_show = 200\n for i in range(num_episodes):\n cur_state = env.reset()\n rewards = []\n t = 0\n done = False\n while not done:\n t += 1\n if (i+1) % to_show == 0:\n env.render()\n cur_state = cur_state.reshape((1, env.observation_space.shape[0]))\n action = actor_critic.act(cur_state)\n action = action.reshape((1, env.action_space.shape[0]))\n\n new_state, reward, done, _ = env.step(action)\n new_state = new_state.reshape((1, env.observation_space.shape[0]))\n\n k = 20\n area = 1.0*int(k * ((new_state[0][0] - env.env.min_position) / (env.env.goal_position - env.env.min_position)))\n learning_reward = area\n rewards.append(learning_reward)\n learning_reward_array = np.array([learning_reward])\n\n actor_critic.remember(cur_state, action, learning_reward_array, new_state, done)\n\n cur_state = new_state\n\n if done:\n print(\"Episode {} / {}\".format(i+1, num_episodes))\n print(\"Elapsed {} steps\".format(t))\n print('Average reward: {}'.format(np.mean(np.array(rewards))))\n print('Max reward: {}'.format(max(rewards)))\n print('-------------------')\n actor_critic.train(True)\n actor_critic.save()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"actor_critic_2.py","file_name":"actor_critic_2.py","file_ext":"py","file_size_in_byte":7265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"310215216","text":"#coding:utf-8\nimport subprocess\nfrom Sound import Sound \nfrom Image import Image\nimport sys\nimport os\n\nclass Movie(object):\n\t\"\"\"docstring for Movie\"\"\"\n\tdef __init__(self, movieFilepath,imageFilepath,soundFilePath,mfccFilePath):\n\t\tsys.path.append('../')\n\t\timport database\n\t\t#音声ファイルと画像ファイル\n\t\tself.movieFilepath = movieFilepath\n\t\tself.imageFilepath = imageFilepath\n\t\tself.soundFilePath = soundFilePath\n\t\tself.mfccFilePath = mfccFilePath\n\t\tself.database = database.Database(\"research_media_database\",\"tereka\",\"infinity14\")\n\n\tdef generateImageFromMovie(self):\n\t\tself.movieId = os.path.basename(self.movieFilepath).replace(\".mp4\",\"\")\n\t\tmovieData = self.database.getMovie(self.movieId)\n\t\tcenter = movieData[\"duration\"] / 2\n\t\timage = Image(self.imageFilepath)\n\t\timage.time_code_sec = center\n\t\timage.format_id = self.database.returnFormatId('png')\n\n\t\tsubprocess.call(\"ffmpeg -y -i '%s' -s 640*480 -vframes 1 -ss '%s' '%s'\" % (self.movieFilepath,center,self.imageFilepath), shell=True)\n\t\tself.database.registImage(image,self.movieId)\n\n\tdef generateSoundFromMovie(self):\n\t\t\"\"\"音声ファイルの切り出しを行う。\"\"\"\n\t\tsubprocess.call(\"ffmpeg -y -i '%s' -ar 16000 '%s'\" %(self.movieFilepath,\"temp.wav\"),shell=True)\n\n\t\tSoundData = Sound(\"temp.wav\",self.soundFilePath,self.mfccFilePath)\n\t\tSoundData.readFile()\n\t\tSoundData.extractMFCC()\n\t\tSoundData.format_id = self.database.returnFormatId('wav')\n\n\t\tself.database.registSound(SoundData,self.movieId)\n\n\ndef searchDir(dir):\n\tmovie_file = open(dir, \"r\")\n\n\tsys.path.append('../')\n\timport database\n\tdatabase = database.Database(\"research_media_database\",\"tereka\",\"infinity14\")\n\tdatabase.registFormat([\"png\",\"wav\"])\n\n\tfor file_line in movie_file:\n\t\tfile_status = file_line.split(' ')\n\t\tfile = file_status[0]\n\t\tif file_status[1] == '1':\n\t\t\tmovieFilepath = file\n\t\t\timageFilepath = file.replace(\".mp4\",\".png\")\n\t\t\tsoundFilePath = file.replace(\".mp4\",\".wav\")\n\t\t\tmfccFilePath = file.replace(\".mp4\",\".txt\")\n\n\t\t\tmovie = Movie(movieFilepath,imageFilepath,soundFilePath,mfccFilePath)\n\t\t\tmovie.generateImageFromMovie()\n\t\t\tmovie.generateSoundFromMovie()\n\nif __name__ == '__main__':\n\targv = sys.argv\n\tsearchDir(argv[1])\n","sub_path":"DataCollect/Media/Movie.py","file_name":"Movie.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"25190401","text":"import plotly\nimport plotly.graph_objs as go\n\nfrom rl_trader.data_processing import klines_to_ohcl\n\n\ndef build_candlestick_graph(klines_list: dict) -> plotly.graph_objs:\n \"\"\"Transform klines Binance API payload into a plotly graph\n\n Args:\n klines_list (dict): klines payload from the Binance API\n\n Returns:\n plotly.graph_objs: updated plotly candlestick graph\n \"\"\"\n\n ohcl_dict = klines_to_ohcl(klines_list)\n\n layout = {\n \"title\": \"Reinforcement learning Trader\",\n \"xaxis\": go.layout.XAxis(\n title=go.layout.xaxis.Title(text=\"Local time\"),\n rangeslider={\"visible\": False},\n ),\n \"yaxis\": go.layout.YAxis(title=go.layout.yaxis.Title(text=\"BTC/USDT\")),\n \"width\": 1000,\n \"height\": 800,\n }\n\n fig = go.Figure(\n data=[\n go.Candlestick(\n x=ohcl_dict[\"t\"],\n open=ohcl_dict[\"o\"],\n high=ohcl_dict[\"h\"],\n low=ohcl_dict[\"l\"],\n close=ohcl_dict[\"c\"],\n )\n ],\n layout=layout,\n )\n return fig\n","sub_path":"rl_trader/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"280984521","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 1 08:40:03 2017\n\n@author: ThinkPad\n\"\"\"\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport pymysql.cursors\nimport pymysql\n\ndef hupubxj():\n url_id = '/bbs/34'\n url_head = 'https://m.hupu.com'\n url = url_head + url_id\n titles = []\n headers = {'User-Agent':'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Mobile Safari/537.36'\n }\n while url:\n data = requests.get(url, headers=headers).content\n soup = BeautifulSoup(data, 'html.parser')\n topic_lists = soup.find_all('li')\n print('爬取中...')\n print(url) \n if soup.find('a', attrs = {'dace-node':'5050_nextpage'})['class'] == ['disabled']:\n url = None\n else:\n next_page = soup.find('a', attrs={'dace-node':'5050_nextpage'})['href']\n url = url_head + next_page\n \n for topic_list in topic_lists:\n try:\n topic_title = topic_list.find('h3').getText()\n print(topic_title)\n titles.append(topic_title)\n except:\n continue\n return titles\n\ndef create_table():\n print('==================爬取虎扑步行街贴子标题============================')\n titles = hupubxj()\n print('爬取成功,正在写入数据库...')\n conn = pymysql.connect(host='localhost',\n port=3306,\n user='root',\n password='12345678',\n db='hupu',\n charset='UTF8')\n cur = conn.cursor()\n # cur.execute(\"drop table if exists topics_titles\")\n cur.execute(\"create table if not exists topics_titles(name char(200))\")\n for title in titles:\n cur.execute(\"insert into topics_titles(name) value ('%s')\" %title)\n conn.commit()\n print('写入成功!')\n \n \nif __name__ == '__main__':\n create_table()","sub_path":"space_history/python_space/history/房屋数据/Python-Spider-github-1108/spider_hupu/bxj_hupu.py","file_name":"bxj_hupu.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"371201400","text":"import os\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nTEMPLATES_DIR = os.path.join(BASE_DIR, \"view\", \"templates\")\nPUBLIC_DIR = os.path.join(BASE_DIR, \"public\")\nROUTES_DIR = os.path.join(BASE_DIR, \"controller\", \"routes\")\n\nNOT_FOUND_FILE = os.path.join(TEMPLATES_DIR, \"404.html\")\n\nMIDDLEWARES = [\n \"application.henavel.controller.middlewares.session_middleware.SessionMiddleware\"\n]\n","sub_path":"application/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"548718282","text":"from random import randint\ncounter1 = 0\ncounter2 = 0\ncounter3 = 0\ncounter4 = 0\ncounter5 = 0\ncounter6 = 0\nfor i in range(100):\n i = randint(1,6)\n if i == 1:\n counter1 += 1\n elif i == 2:\n counter2 += 1\n elif i == 3:\n counter3 += 1\n elif i == 4:\n counter4 += 1\n elif i == 5:\n counter5 += 1 \n elif i == 6:\n counter6 += 1 \nprint(counter1)\nprint(counter2)\nprint(counter3)\nprint(counter4)\nprint(counter5)\nprint(counter6)","sub_path":"02-ControlStructures/Exercises-02/af_40.py","file_name":"af_40.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"652282091","text":"# Copyright 2008-2013 the original author or authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\nimport random\nimport datetime\nfrom urllib.parse import quote\nimport logging\n\nfrom bs4 import BeautifulSoup\nimport requests\n\nfrom django.contrib.humanize.templatetags.humanize import intword\n\nfrom pubbot.dispatch import receiver\nfrom pubbot.ratelimit import enforce_rate_limit\nfrom .signals import join\nfrom .utils import chat_receiver\n\n\nlogger = logging.getLogger(__name__)\n\n\n@receiver(join)\n@enforce_rate_limit(\"1/60s\") # Only say hello once every 60s\n@enforce_rate_limit(\"1/d\", limit_by=[\"user\"]) # Only say hello to a given user once a day\ndef hello(sender, user, channel, is_me, **kwargs):\n if not is_me:\n channel.msg(random.choice([\n \"hi %s\",\n \"lo %s\",\n \"lo. how you doing, %s?\",\n \"%s, we all love you\",\n \"Help me, Obi Wan %s, you're my only hope\",\n \"Wave %s, wave\",\n \"Give us a smile %s\",\n \"%s! We've missed you!\",\n \"%s is in the room. I have a bad feeling about this.\",\n \"ewwo %s\",\n \"yo, %s\",\n \"Greetings, %s\",\n \"wotcha, %s\",\n \"Frak, it's %s\",\n ]) % user)\n\n\n@chat_receiver(r'https://twitter.com/(?P[\\d\\w]+)/status/(?P[\\d]+)')\ndef twitter_link(sender, account, id, **kwargs):\n response = requests.get('https://twitter.com/%s/status/%s' % (account, id))\n\n if response.status_code != 200:\n logger.critical(\"Unable to lookup tweet %s/%s\" % (account, id))\n return\n\n bs = BeautifulSoup(response.text)\n try:\n tweet = bs.select(\"div.permalink-tweet-container p.tweet-text\")[0].text\n except Exception:\n logger.exception(\"Failed to scrape twitter html\")\n return {\n 'content': 'Look it up yourself, twitter changed the API again',\n 'useful': True,\n }\n\n return {\n 'content': '[ %s: %s ]' % (account, tweet),\n 'useful': True,\n }\n\n\n@chat_receiver(r'https://github.com/(?P[\\d\\w]+)/(?P[\\d\\w]+)/pull/(?P[\\d]+)')\ndef pull_request(sender, user, repo, id, **kwargs):\n url = 'https://api.github.com/repos/%(user)s/%(repo)s/pulls/%(id)s' % locals()\n\n pull = requests.get(url).json()\n title = pull['title']\n name = pull['user']['login']\n\n return {\n 'content': '[ %s: %s ]' % (name, title),\n 'useful': True,\n }\n\n\n@chat_receiver(r'https://alpha.app.net/(?P[\\d\\w]+)/post/(?P[\\d]+)')\ndef on_appdotnet_link(sender, account, id, **kwargs):\n res = requests.get('https://alpha-api.app.net/stream/0/posts/%s' %\n id).json()\n tweet = res['data']['text']\n screen_name = res['data']['user']['username']\n return {\n 'content': '[ %s: %s ]' % (screen_name, tweet),\n 'useful': True,\n }\n\n\n@chat_receiver(r'^(image|img) me (?P[\\s\\w]+)')\ndef image_search(sender, query, **kwargs):\n url = 'https://ajax.googleapis.com/ajax/services/search/images'\n results = requests.get(url, params=dict(\n v='1.0',\n rsz=8,\n q=query,\n )).json()\n images = results['responseData']['results']\n\n if images:\n def_image = 'https://is0.4sqi.net/userpix/FFUB3WWFGXUNFYDP.gif'\n image = random.choice(images).get('url', def_image)\n return {\n 'content': image,\n }\n\n return {\n 'content': \"There are no images matching '%s'\" % query,\n }\n\n\nWIKTIONARY_URL_FORMAT = 'https://en.wiktionary.org/w/api.php?action=query&prop=extracts&titles={titles}&format=json'\n\n\n@chat_receiver(re.compile(r'^(?Pso|very|much|many)\\s+(?P[\\w-]+)[\\.\\?!]?$', re.I))\ndef doge(sender, prefix, word, **kwargs):\n type_prefixes = {\n 'Verb': ['so', 'very', 'much', 'many'],\n 'Noun': ['so', 'very'],\n 'Adjective': ['much', 'many'],\n 'Adverb': ['much', 'many'],\n }\n response = 'wow'\n word_type = None\n # Adding word.title() may catch some extra English words, but also yields foreign words which we don't want.\n page_titles = '|'.join([quote(word)])\n url = WIKTIONARY_URL_FORMAT.format(titles=page_titles)\n resp = requests.get(url)\n data = resp.json()\n pages = data['query']['pages']\n for page_id in pages:\n if page_id == u'-1':\n continue\n doc = BeautifulSoup(pages[page_id]['extract'])\n for synonym_heading in doc.find_all(re.compile('^h[3-9]$')):\n if synonym_heading.string != u'Synonyms':\n continue\n\n for sibling in synonym_heading.previous_siblings:\n if not re.match(r'^h[3-5]', sibling.name or ''):\n continue\n if sibling.string not in type_prefixes:\n continue\n word_type = sibling.string\n break\n\n if not word_type:\n continue\n prefixes = type_prefixes[word_type]\n if prefix in prefixes:\n prefixes.remove(prefix)\n response_prefix = random.choice(prefixes)\n\n synonyms = set()\n for list_ in synonym_heading.next_siblings:\n if list_.name == u'ul':\n for item in list_.children:\n if item.name != 'li':\n continue\n match = re.match(r'^(?:\\(([^,]+(?:,\\s+[^,]+)?)\\):\\s+)?([^,]+(?:,\\s+[^,]+)?)', item.string or '')\n if not match:\n continue\n if match.group(1):\n synonyms |= set(match.group(1).split(', '))\n if match.group(2):\n synonyms |= set(match.group(2).split(', '))\n break\n synonyms = set(map(str.strip, synonyms))\n synonyms = list(filter(lambda x: ' ' not in x, synonyms))\n if not synonyms:\n continue\n synonym = random.choice(synonyms)\n\n response = '{prefix} {word}'.format(prefix=response_prefix,\n word=synonym)\n break\n break\n\n return {'content': response}\n\n\n@chat_receiver(r'^fight:[\\s]*(?P.*)(?:[;,]| vs\\.? | v\\.? )[\\s]*(?P.*)')\ndef fight(sender, word1, word2, **kwargs):\n def _score(word):\n r = requests.get('http://www.google.co.uk/search', params={\n 'q': word,\n 'safe': 'off'\n })\n soup = BeautifulSoup(r.text)\n score_string = soup.find(id='resultStats').text\n if \"(\" in score_string:\n score_string, other = score_string.split(\"(\", 1)\n return int(''.join(re.findall('\\d+', score_string)))\n\n score1 = _score(word1)\n score2 = _score(word2)\n\n winner = word1 if score1 > score2 else word2\n\n score1 = intword(score1)\n score2 = intword(score2)\n\n return {\n 'content': '%(word1)s (%(score1)s) vs %(word2)s (%(score2)s) -- %(winner)s wins!' %\n locals(),\n }\n\n\n@chat_receiver(r'^blame')\ndef blame(sender, channel, **kwargs):\n try:\n nick = random.choice(channel.users)\n except IndexError:\n nick = None\n\n if not nick: # nick.is_me:\n return {\"content\": \"It's all my fault!\"}\n\n return {\"content\": \"It's all %s's fault!\" % nick}\n\n\n@chat_receiver(r'christmas')\n@enforce_rate_limit('1/d')\ndef christmas(sender, **kwargs):\n today = datetime.datetime.today()\n\n # We've probably had enough christmas to last us till next year\n if today.month == 12 and today.day > 25:\n return\n\n if today.month == 12 and today.day == 25:\n return {\n 'content': \"It's christmas \\o/\",\n }\n\n christmas = datetime.datetime(today.year, 12, 25)\n delta = christmas - today\n if delta.days > 60:\n return\n\n return {\n 'content': \"only %s days 'til Christmas!\" % delta.days,\n }\n","sub_path":"pubbot/conversation/receivers.py","file_name":"receivers.py","file_ext":"py","file_size_in_byte":8413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"252610940","text":"import time\nimport subprocess\nimport tempfile\nimport os\n\nimport requests\n\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch.exceptions import ConnectionError, NotFoundError\n\nfrom ..test_cases import TestCase, SkipTest\n\ndata_dir = None\n\nCMD = \"\"\"\n elasticsearch \\\n -f \\\n -D es.cluster.name=%(cluster_name)s \\\n -D es.node.name=test_name \\\n -D es.http.port=%(port)s \\\n -D es.gateway.type=none \\\n -D es.index.store.type=memory \\\n -D es.discovery.zen.ping.multicast.enabled=false \\\n -D es.path.data=%(data_dir)s \\\n -D es.pidfile=%(pidfile)s \\\n >/dev/null 2>&1\n\"\"\"\n\nserver = None\npidfile = tempfile.mktemp()\n\ndef get_client(**kwargs):\n kw = {}\n if 'TEST_ES_CONNECTION' in os.environ:\n from elasticsearch import connection\n kw['connection_class'] = getattr(connection, os.environ['TEST_ES_CONNECTION'])\n kw.update(kwargs)\n return Elasticsearch([os.environ['TEST_ES_SERVER']], **kw)\n\n\ndef setup():\n global server\n\n # if use running ES instance, don't attempt to start our own\n if 'TEST_ES_SERVER' not in os.environ:\n # check installed\n if subprocess.call('which elasticsearch >/dev/null 2>&1', shell=True) != 0:\n raise SkipTest(\"No Elasticsearch server, skipping integration tests.\")\n\n args = {\n 'cluster_name': 'es_client_test',\n 'port': 9900,\n 'data_dir': tempfile.tempdir,\n 'pidfile': pidfile\n }\n\n # check running\n try:\n requests.get('http://localhost:%(port)s' % args)\n except requests.ConnectionError:\n pass\n else:\n raise SkipTest('Elasticsearch already running!')\n\n\n cmd = CMD % args\n\n server = subprocess.Popen(cmd, shell=True)\n os.environ['TEST_ES_SERVER'] = 'localhost:%(port)s' % args\n\n client = get_client()\n\n # wait for yellow status\n for _ in range(100):\n time.sleep(.1)\n try:\n client.cluster.health(wait_for_status='yellow')\n break\n except ConnectionError:\n continue\n\n else:\n # timeout\n raise SkipTest(\"Elasticsearch failed to start.\")\n\n\ndef teardown():\n if server is not None:\n with open(pidfile) as pidf:\n pid = pidf.read()\n os.kill(int(pid), 15)\n server.wait()\n\nclass ElasticTestCase(TestCase):\n client = None\n def setUp(self):\n if ElasticTestCase.client is None:\n ElasticTestCase.client = get_client()\n self.client = ElasticTestCase.client\n\n def tearDown(self):\n self.client.indices.delete()\n try:\n self.client.indices.delete_template('*')\n except NotFoundError:\n pass\n\n","sub_path":"test_elasticsearch/test_server/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"413793431","text":"graph1 = {\n \"a\": [\"b\", \"c\", \"d\"],\n \"b\": [],\n \"c\": [\"d\"],\n \"d\": []\n}\n\ngraph2 = {\n \"a\": [\"b\", \"c\", \"d\"],\n \"b\": [],\n \"c\": [\"d\"],\n \"d\": [],\n \"e\": [\"g\", \"f\", \"q\"],\n \"g\": [],\n \"f\": [],\n \"q\": []\n}\n\ngraph3 = {\n \"a\": [\"b\", \"c\", \"d\"],\n \"b\": [],\n \"c\": [\"d\", \"e\"],\n \"d\": [],\n \"e\": [\"g\", \"f\", \"q\"],\n \"g\": [\"c\"],\n \"f\": [],\n \"q\": []\n}\n\nfrom collections import deque\n\nGRAY, BLACK = 0, 1\n\ndef topological(graph):\n order, enter, state = deque(), set(graph), {}\n\n def dfs(node):\n state[node] = GRAY\n for k in graph.get(node, ()):\n sk = state.get(k, None)\n if sk == GRAY: raise ValueError(\"cycle\")\n if sk == BLACK: continue\n enter.discard(k)\n dfs(k)\n order.appendleft(node)\n state[node] = BLACK\n\n while enter: dfs(enter.pop())\n return order\n\n\ndef kahn_topsort(graph):\n in_degree = { u : 0 for u in graph }\n for u in graph:\n for v in graph[u]:\n in_degree[v] += 1\n\n Q = deque()\n for u in in_degree:\n if in_degree[u] == 0:\n Q.appendleft(u)\n\n L = []\n\n while Q:\n u = Q.pop()\n L.append(u)\n for v in graph[u]:\n in_degree[v] -= 1\n if in_degree[v] == 0:\n Q.appendleft(v)\n\n if len(L) == len(graph):\n return L\n else:\n return []","sub_path":"scripts/topo_sort.py","file_name":"topo_sort.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"238185537","text":"#########################################\n#\t\tInputs to the simulation\t\t#\n#########################################\nimport argparse\nparser = argparse.ArgumentParser(description='Configuration options for this simulation.')\n\nparser.add_argument('--program',\n dest=\"program\",\n required=True,\n action='store',\n help='The program trace to run in the simulator')\n\nparser.add_argument('--tomconfig',\n dest=\"tomconfig\",\n required=True,\n action='store',\n help='The JSON file for configuring the simulator')\n\nparser.add_argument('--output',\n dest=\"output\",\n required=True,\n action='store',\n help='The JSON file to output stats to')\n\n# Parse arguments\nargs = parser.parse_args()\n\n\n## Print info ##\nprint(\"Running simulator using program \" + args.program)\nprint(\"Configuration in \" + args.tomconfig)\n\n\n#####################################\n#\tLoad JSON file with latencies\t#\n#####################################\nimport json\nwith open(args.tomconfig, 'r') as inp_file:\n sim_config=json.load(inp_file)\n\nint_config = sim_config.get(\"integer\")\ndiv_config = sim_config.get(\"divider\")\nmult_config = sim_config.get(\"multiplier\")\nls_config = sim_config.get(\"ls\")\ncache_config = sim_config.get(\"cache\")\nclock_speed = sim_config.get(\"clock\")\n\nprint(\"Sim Config:\")\nprint(json.dumps(sim_config, indent=2))\n\n\n#####################################\n#\tSST stuff \t#\n#####################################\nimport sst\n\ncache_link_latency = \"100ps\"\n\ncore = sst.Component(\"XSim_core\",\"XSim.core\")\n\ncore.addParams({\n \"clock_frequency\": clock_speed,\n \"program\": args.program,\n \"output\": args.output,\n\t\"verbose\": 0,\n \"int_number\": int_config.get(\"number\"),\n \"int_resnumber\": int_config.get(\"resnumber\"),\n \"int_latency\": int_config.get(\"latency\"),\n \"div_number\": div_config.get(\"number\"),\n \"div_resnumber\": div_config.get(\"resnumber\"),\n \"div_latency\": div_config.get(\"latency\"),\n \"mult_number\": mult_config.get(\"number\"),\n \"mult_resnumber\": mult_config.get(\"resnumber\"),\n \"mult_latency\": mult_config.get(\"latency\"),\n \"ls_number\": ls_config.get(\"number\"),\n \"ls_resnumber\": ls_config.get(\"resnumber\"),\n \"ls_latency\": ls_config.get(\"latency\")\n})\n\nmemory = sst.Component(\"data_memory\", \"memHierarchy.MemController\")\nmemory.addParams(\n{\n 'backend':\t\t\t\t\"memHierarchy.simpleMem\",\n\t'backend.mem_size':\t\t\"10MiB\",\n\t'clock':\t\t\t\t\"1GHz\",\n 'backend.access_time': \"100ns\"\n})\n\nl1_cache = sst.Component(\"l1cache\", \"memHierarchy.Cache\")\nl1_cache.addParams({\n \"associativity\": cache_config.get(\"associativity\"),\n \"cache_line_size\": 16, # Same as block size\n \"cache_size\": cache_config.get(\"size\"),\n \"cache_frequency\": clock_speed, # Same as cpu\n \"access_latency_cycles\":1,\n \"L1\": True\n})\n\ncpu_cache_link = sst.Link(\"cpu_cache_link\")\ncache_data_memory_link = sst.Link(\"cache_data_memory_link\")\n\ncpu_cache_link.connect(\n(core, \"data_memory_link\", cache_link_latency),\n(l1_cache, \"high_network_0\", cache_link_latency)\n)\n\ncache_data_memory_link.connect(\n(l1_cache, \"low_network_0\", cache_link_latency),\n(memory, \"direct_link\", cache_link_latency)\n)\n\n# cpu_data_memory_link = sst.Link(\"cpu_data_memory_link\")\n# cpu_data_memory_link.connect(\n# (core, \"data_memory_link\", cache_link_latency),\n# (memory, \"direct_link\", cache_link_latency)\n# )\n\n# Enable SST Statistics Outputs for this simulation\n# statLevel = 16\n# statFile = \"stats.csv\"\n# sst.setStatisticLoadLevel(statLevel)\n# sst.enableAllStatisticsForAllComponents({\"type\":\"sst.AccumulatorStatistic\"})\n#\n# sst.setStatisticOutput(\"sst.statOutputCSV\")\n# sst.setStatisticOutputOptions( {\n# \"filepath\" : statFile,\n# \t\"separator\" : \", \"\n# \t} )\n","sub_path":"Tomasulo/XSim/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"285377178","text":"import plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\nfrom IPython.display import IFrame\n\ndef ex1():\n labels = ['O2','H2','Co2','N2']\n values = [4500, 2500, 1053, 500]\n\n fig = go.Figure(data=[go.Pie(labels= labels, values= values)])\n fig.show()\n# ex1()\n\ndef ex2():\n colors = ['darkgreen', 'green', 'lightgreen', 'skyblue']\n fig = go.Figure(data=[go.Pie(labels=['Oxyzen', 'Hydrogen','Carbon_dioxide','Nitrogen'],values=[4500, 2500, 1053, 500])])\n fig.update_traces(hoverinfo='label+percent', textinfo='value', textfont_size = 20,\n marker = dict(colors= colors,\n line = dict(color = '#FF6781', width=2)))\n fig.show()\n# ex2()\n\ndef ex3():\n labels = ['Cricket' , 'Hockey' , 'Tennis / Shettle', 'Football']\n values = [4500, 2500, 1053, 500]\n fig = go.Figure(data=[go.Pie(labels= labels, values= values, hole=0.3)])\n fig.show()\n# ex3()\n\ndef ex4():\n labels = ['US', 'China', 'European Union', 'Russian Federation', 'Brazil', 'India', 'Rest Of World']\n\n fig = make_subplots(rows=1, cols=2, specs=[[{'type' : 'domain'}, {'type' : 'domain'}]])\n fig.add_trace(go.Pie(labels= labels, values=[16, 15, 12, 6, 5, 4, 42], name='GHG Emissions'), 1, 1)\n fig.add_trace(go.Pie(labels= labels, values=[27, 12, 25, 8, 1, 3, 25], name='Co2 Emissions'),1,2)\n fig.update_traces(hole=0.4, hoverinfo='label+percent+name')\n\n fig.update_layout(title_text='Global Emissions 1990-2011',\n annotations=[dict(text='GHG', x=0.18, y=0.5, font_size=20, showarrow=False),\n dict(text='Co2', x=0.82, y=0.5, font_size=20, showarrow=False)])\n fig.show()\n# ex4()\n\ndef ex5():\n labels = ['1st', '2nd', '3rd', '4th', '5th']\n night_colors = ['rgb(56, 75, 126)', 'rgb(18, 36, 37)', 'rgb(34, 53, 101)',\n 'rgb(36, 55, 57)', 'rgb(6, 4, 4)']\n sunflowers_colors = ['rgb(177, 127, 38)', 'rgb(205, 152, 36)', 'rgb(99, 79, 37)',\n 'rgb(129, 180, 179)', 'rgb(124, 103, 37)']\n irises_colors = ['rgb(33, 75, 99)', 'rgb(79, 129, 102)', 'rgb(151, 179, 100)',\n 'rgb(175, 49, 35)', 'rgb(36, 73, 147)']\n cafe_colors = ['rgb(146, 123, 21)', 'rgb(177, 180, 34)', 'rgb(206, 206, 40)',\n 'rgb(175, 51, 21)', 'rgb(35, 36, 21)']\n\n # Create subplots, using 'domain' type for pie charts\n specs = [[{'type':'domain'}, {'type':'domain'}], [{'type':'domain'}, {'type':'domain'}]]\n fig = make_subplots(rows=2, cols=2, specs=specs)\n\n # Define pie charts\n fig.add_trace(go.Pie(labels = labels, values = [38, 27, 18, 10, 7], name='Starry Night', marker_colors = night_colors),1,1)\n fig.add_trace(go.Pie(labels = labels, values = [28, 26, 21, 15, 10], name='Sun Flowers', marker_colors = sunflowers_colors),1,2)\n fig.add_trace(go.Pie(labels = labels, values = [30, 19, 16, 14, 13], name='Irises', marker_colors = irises_colors),2,1)\n fig.add_trace(go.Pie(labels = labels, values = [31, 24, 19, 18, 8], name='The Night Kafe', marker_colors = cafe_colors),2,2)\n\n # Tune layout and hover info\n fig.update_traces(hoverinfo = 'label+percent+name', textinfo = 'none')\n fig.update(layout_title_text = 'Van Gogh: 5 Most Prominent Colors Shown Proportionally', layout_showlegend=False)\n fig = go.Figure(fig)\n fig.show()\n# ex5()\n\ndef ex6():\n labels = ['Asia', 'Europe', 'Africa', 'Americas', 'Oceania']\n fig = make_subplots(1,2, specs=[[{'type':'domain'}, {'type':'domain'}]],\n subplot_titles=['1980','2018'])\n fig.add_trace(go.Pie(labels = labels, values = [4, 7, 1, 7, 0.5], scalegroup = 'one', name='World GDP 1980'),1,1)\n fig.add_trace(go.Pie(labels = labels, values = [21, 15, 3, 19, 1], scalegroup = 'one', name='World GDP 2018'),1,2)\n fig.update_layout(title_text='World GDP')\n fig.show()\n# ex6()\n\ndef ex7():\n IFrame(src=\"https://dash-simple-apps.plotly.host/dash-pieplot\", width=\"100%\", height=\"650px\", frameBorder=\"0\")\nex7()","sub_path":"plotly_practise/example_pie_charts.py","file_name":"example_pie_charts.py","file_ext":"py","file_size_in_byte":4001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"47132485","text":"#!/usr/bin/env python\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom cocoex import Problem\nfrom cocoex.utilities import about_equal\nimport numpy as np\nimport sys\n\ndef read_test_vectors(fd):\n \"\"\"\n Read the number of test vectors, followed by the 40D test vectors\n from ${fd}. Return a list of numpy arrays containing the test vectors.\n \"\"\"\n number_of_test_vectors = int(fd.readline().rstrip())\n ## Preallocate the testvectors list\n test_vectors = number_of_test_vectors * [None]\n for i in range(number_of_test_vectors):\n line = fd.readline().rstrip()\n test_vectors[i] = np.fromstring(line, dtype=float, sep=\" \") \n return test_vectors\n\ndef process_test_cases(fd, suit_name, test_vectors):\n \"\"\"\n Read test cases for benchmark suit ${suit_name} from ${fd} and evaluate them.\n \"\"\"\n number_of_testcases = 0\n number_of_failures = 0\n previous_function_id = None\n for test_case in fd:\n number_of_testcases += 1\n\n ## A test case is a triple (function_id, test_vector_id,\n ## expected_y) separated by a single space. \n function_id, test_vector_id, expected_y = test_case.split(\" \")\n ## Do type conversion. Python gurus probably know an elegant\n ## one line solution...\n function_id = int(function_id)\n test_vector_id = int(test_vector_id)\n expected_y = float(expected_y)\n\n ## We cache the problem instances because creating an instance\n ## can be expensive depending on the transformation.\n if function_id != previous_function_id:\n problem = Problem(suit_name, function_id)\n previous_function_id = function_id\n test_vector = test_vectors[test_vector_id]\n y = problem(test_vector[:problem.number_of_variables])\n if not about_equal(y, expected_y):\n number_of_failures += 1\n if number_of_failures < 100:\n print(\"%8i %8i FAILED expected=%.8e observed=%.8e\" % (function_id, test_vector_id, expected_y, y))\n elif number_of_failures == 100:\n print(\"... further failed tests suppressed ...\")\n print(\"%i of %i tests passed (failure rate %.2f%%)\" % (number_of_testcases - number_of_failures, number_of_testcases, (100.0 * number_of_failures) / number_of_testcases))\n if number_of_failures > 0: \n sys.exit(-1)\n\ndef process_testfile(testfile):\n with open(testfile, \"r\") as fd:\n test_suit = fd.readline().rstrip()\n test_vectors = read_test_vectors(fd)\n process_test_cases(fd, test_suit, test_vectors)\n\ndef main(args):\n for arg in args:\n process_testfile(arg)\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","sub_path":"build/python/coco_test.py","file_name":"coco_test.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"571053596","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: sinannasir\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport project_backend as pb\r\nimport time\r\nimport collections\r\nimport json\r\nimport DQN\r\nimport copy\r\nimport os\r\nimport argparse\r\n\r\ndef main(args):\r\n \r\n json_file = args.json_file\r\n json_files_train = args.json_files_train\r\n json_file_policy_train = args.json_file_policy_train\r\n \r\n with open ('./config/deployment/'+json_file+'.json','r') as f:\r\n options = json.load(f)\r\n with open ('./config/policy/'+json_file_policy_train+'.json','r') as f:\r\n options_policy = json.load(f)\r\n if not options_policy['cuda']:\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\r\n import tensorflow as tf\r\n \r\n for json_file_train in json_files_train:\r\n with open ('./config/deployment/'+json_file_train+'.json','r') as f:\r\n options_train = json.load(f)\r\n included_train_episodes = []\r\n tot_train_episodes = int(options_train['simulation']['total_samples']/options_train['train_episodes']['T_train'])\r\n N = options['simulation']['N']\r\n if N <=20:\r\n for i in range(tot_train_episodes+1):\r\n if i<=15 or i%5==0:\r\n included_train_episodes.append(i)\r\n else:\r\n included_train_episodes.append(tot_train_episodes)\r\n \r\n train_tot_simulations = options_train['simulation']['num_simulations']\r\n tot_test_episodes = int(options['simulation']['total_samples']/options['train_episodes']['T_train'])\r\n inner_train_networks = [[]]*tot_test_episodes\r\n for i in range(tot_test_episodes):\r\n if options['simulation']['test_include'] == 'all':\r\n inner_train_networks[i] = 0\r\n else:\r\n inner_train_networks[i] = list(np.random.randint(0,train_tot_simulations,options['simulation']['test_include']))\r\n ## Kumber of samples\r\n total_samples = options['simulation']['total_samples']\r\n \r\n N = options['simulation']['N']\r\n \r\n \r\n # simulation parameters\r\n train_episodes = options['train_episodes']\r\n mobility_params = options['mobility_params']\r\n mobility_params['alpha_angle'] = options['mobility_params']['alpha_angle_rad'] * np.pi #radian/sec\r\n #Some defaults\r\n Pmax_dB = 46.0-30\r\n Pmax = np.power(10.0,Pmax_dB/10)\r\n n0_dB = -104.0-30\r\n noise_var = np.power(10.0,n0_dB/10)\r\n # Hyper aprameters\r\n neightresh = noise_var*options_policy['neightresh']\r\n \r\n for ep in included_train_episodes:\r\n #\r\n \r\n file_path = './simulations/channel/%s_network%d'%(json_file,0)\r\n data = np.load(file_path+'.npz')\r\n \r\n H_all = data['arr_1']\r\n H_all_2 = []\r\n for i in range(total_samples):\r\n H_all_2.append(H_all[i]**2)\r\n \r\n weights = []\r\n for loop in range(total_samples):\r\n weights.append(np.array(np.ones(N)))\r\n \r\n time_calculating_strategy_takes = []\r\n \r\n # Virtual neighbor placer\r\n neighbors_in = collections.deque([],2)\r\n neighbors = collections.deque([],2)\r\n \r\n sims_pos_p = np.zeros(N).astype(int) - 1\r\n \r\n policy = DQN.DQN(options,options_policy,N,Pmax,noise_var)\r\n \r\n strategy_translation = np.zeros(policy.num_actions)\r\n strategy_translation[0] = 0.0 # Tx power 0\r\n Pmin_dB = 10.0-30\r\n # Calculate steps in dBm\r\n strategy_translation_dB_step = (Pmax_dB-Pmin_dB)/(policy.num_actions-2)\r\n for i in range(1,policy.num_actions-1):\r\n strategy_translation[i] = np.power(10.0,((Pmin_dB+(i-1)*strategy_translation_dB_step))/10)\r\n strategy_translation[-1] = Pmax\r\n \r\n time_calculating_strategy_takes = []\r\n time_optimization_at_each_slot_takes = []\r\n sum_rate_distributed_policy_episode = []\r\n p_strategy_all_apisode = []\r\n i_train = 0\r\n \r\n sum_rate_distributed_policy = []\r\n sum_rate_list_distributed_policy = collections.deque([],2)\r\n # Initial allocation is just random\r\n p_central = Pmax * np.random.rand(N)\r\n p_strategy = np.array(p_central) # strategy is a completely different object\r\n p_strategy_current = np.array(p_strategy)\r\n \r\n p_strategy_all=[]\r\n \r\n with tf.Session() as sess:\r\n sess.run(policy.init)\r\n policy.initialize_updates(sess) \r\n # Start iterating voer time slots\r\n for sim in range (total_samples):\r\n # save an instance per training episode for testing purposes.\r\n if(sim %train_episodes['T_train'] == 0):\r\n train_network_idx = i_train\r\n model_destination = ('./simulations/sumrate/policy/%s_%s_network%d_episode%d.ckpt'%(\r\n json_file_train,json_file_policy_train,train_network_idx,ep)).replace('[','').replace(']','')\r\n policy.load(sess,model_destination)\r\n i_train+=1\r\n i_train = i_train % train_tot_simulations\r\n \r\n # If at least one time slot passed to get experience\r\n if (sim %train_episodes['T_train'] > 1): \r\n # Each agent picks its strategy.\r\n for agent in range (N):\r\n current_local_state = policy.local_state(sim,agent,p_strategy_all,H_all_2,neighbors,neighbors_in,sum_rate_list_distributed_policy,sims_pos_p) \r\n a_time = time.time() \r\n strategy = policy.act_noepsilon(sess,current_local_state,sim)\r\n time_calculating_strategy_takes.append(time.time()-a_time)\r\n \r\n # Pick the action\r\n p_strategy[agent] = strategy_translation[strategy]\r\n \r\n # Add current state to the short term memory to observe it during the next state\r\n policy.previous_state[agent,:] = current_local_state\r\n policy.previous_action[agent] = strategy\r\n \r\n if(sim %train_episodes['T_train'] < 2):\r\n p_strategy = Pmax * np.ones(N)#np.random.rand(N)\r\n p_strategy_current = np.array(p_strategy)\r\n policy.prev_suminterferences = np.matmul(H_all_2[sim],p_strategy) - (H_all_2[sim].diagonal()*p_strategy) + noise_var\r\n sims_pos_p[np.where(p_strategy_current>0)] = sim\r\n \r\n tmp_neighbors_in = []\r\n tmp_neighbors = []\r\n for nei_i in range(N):\r\n neigh_tmp_variab = np.where((H_all[sim][nei_i,:]**2)*p_strategy_current>neightresh)\r\n neigh_tmp_variab = np.delete(neigh_tmp_variab,np.where(neigh_tmp_variab[0]==nei_i))\r\n tmp_neighbors_in.append(neigh_tmp_variab)\r\n \r\n for nei_i in range(N):\r\n tmp_neighlist = []\r\n for nei_j in range(N):\r\n if(len(np.where(tmp_neighbors_in[nei_j]==nei_i)[0]) != 0):\r\n tmp_neighlist.append(nei_j)\r\n if (len(tmp_neighlist) == 0 and len(neighbors) >0):\r\n tmp_neighbors.append(np.array(neighbors[-1][nei_i]))\r\n else:\r\n tmp_neighbors.append(np.array(tmp_neighlist))\r\n neighbors.append(tmp_neighbors)\r\n neighbors_in.append(tmp_neighbors_in)\r\n # all sumrates in a list\r\n sum_rate_list_distributed_policy.append(pb.reward_helper(H_all[sim],p_strategy,N,noise_var,Pmax,neighbors_in[-1]))\r\n \r\n sum_rate_distributed_policy.append(pb.sumrate_weighted_clipped(H_all[sim],p_strategy,N,noise_var,weights[sim]))\r\n p_strategy_all.append(np.array(p_strategy))\r\n if(sim%2500 == 0):\r\n print('Test time %d'%(sim))\r\n sum_rate_distributed_policy_episode.append(copy.copy(sum_rate_distributed_policy))\r\n p_strategy_all_apisode.append(copy.copy(p_strategy_all))\r\n \r\n # End Train Phase\r\n np_save_path = './simulations/sumrate/test/%s_%s_%s_episode%d.ckpt'%(json_file,json_file_train,json_file_policy_train,ep)\r\n print(np_save_path)\r\n np.savez(np_save_path,options,options_policy,sum_rate_distributed_policy_episode,p_strategy_all_apisode,\r\n time_optimization_at_each_slot_takes,time_calculating_strategy_takes,included_train_episodes,inner_train_networks)\r\n \r\nif __name__ == \"__main__\": \r\n parser = argparse.ArgumentParser(description='give test scenarios.')\r\n parser.add_argument('--json-file', type=str, default='test_K10_N20_shadow10_episode5-2500_travel0_vmax2_5',\r\n help='json file for the deployment')\r\n parser.add_argument('--json-files-train', nargs='+', default=[\"train_K10_N20_shadow10_episode10-5000_travel50000_vmax2_5\",\r\n \"train_K10_N20_shadow10_episode10-5000_travel0_fd10\"],\r\n help='json files train.')\r\n parser.add_argument('--json-file-policy-train', type=str, default='dqn200_100_50',\r\n help='json file for the hyperparameters')\r\n \r\n args = parser.parse_args()\r\n print(args.json_files_train)\r\n main(args)","sub_path":"testDQN.py","file_name":"testDQN.py","file_ext":"py","file_size_in_byte":9944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"193353657","text":"from datetime import datetime, timedelta\nfrom pathlib import Path\n# openpyxl - это библиотека Python для чтения / записи файлов\n# Excel 2010 xlsx / xlsm / xltx / xltm.\nimport openpyxl\nimport random\n\n\n_TABLE_INFORMATION: dict[str, tuple] = {\n \"Header\": (\"Компания\", \"Дата\", \"Тип операции\", \"Сумма\"),\n \"Company_name\": (\"Компания_1\", \"Компания_2\", \"Компания_3\"),\n \"Operations\": (\"Выплата зп\", \"Отплата налогов\", \"Закупка оборудования\")\n}\n\n_CUSTOM_FIRST_TABLE_PATH: Path = Path(r\".\\first_table.xlsx\")\n_CUSTOM_SECOND_TABLE_PATH: Path = Path(r\".\\second_table.xlsx\")\n_MAX_ROW_NUMBER: int = 10000\n_MAX_COL_NUMBER: int = len(_TABLE_INFORMATION[\"Header\"])\n\n\ndef create_table(save_path: Path):\n wb = openpyxl.Workbook()\n ws = wb.active\n ws.title = \"Информация по операциям\"\n\n ws.append(_TABLE_INFORMATION[\"Header\"])\n\n current_date = datetime.now()\n current_row_number: int = 1\n\n while current_row_number < _MAX_ROW_NUMBER:\n date_string: str = f\"{current_date.year}-{current_date.month:02}-{current_date.day:02} \" \\\n f\"{current_date.hour:02}:{current_date.minute:02}:{current_date.second:02}\"\n\n current_row: tuple[str, str, str, int] = (\n random.choice(_TABLE_INFORMATION[\"Company_name\"]),\n date_string,\n random.choice(_TABLE_INFORMATION[\"Operations\"]),\n random.randint(100, 500000)\n )\n\n ws.append(current_row)\n\n current_date += timedelta(seconds=random.randint(0, 10))\n current_row_number += 1\n\n wb.save(save_path)\n\n\nif __name__ == \"__main__\":\n print(\"Опять работать?\")\n for save_path in [_CUSTOM_FIRST_TABLE_PATH, _CUSTOM_SECOND_TABLE_PATH]:\n create_table(save_path)\n print(\"Дело сделано!\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"465485046","text":"from app.mac import mac, signals\nimport os\nimport random\n\n\ntry:\n sjokiFile = open(\"/home/atte/yowsapp-framework/modules/KyytiBOT/sjoki.txt\", \"r\")\n sjokiFile.close()\nexcept:\n sjokiFile = open(\"/home/atte/yowsapp-framework/modules/KyytiBOT/sjoki.txt\", \"w+\")\n sjokiFile.close()\n\ntry:\n prismaFile = open(\"/home/atte/yowsapp-framework/modules/KyytiBOT/prisma.txt\", \"r\")\n prismaFile.close()\nexcept:\n prismaFile = open(\"/home/atte/yowsapp-framework/modules/KyytiBOT/prisma.txt\", \"w+\")\n prismaFile.close()\n\ntry:\n koveroFile = open(\"/home/atte/yowsapp-framework/modules/KyytiBOT/kovero.txt\", \"r\")\n koveroFile.close()\nexcept:\n koveroFile = open(\"/home/atte/yowsapp-framework/modules/KyytiBOT/kovero.txt\", \"w+\")\n koveroFile.close()\n\ntry:\n autoFile = open(\"/home/atte/yowsapp-framework/modules/KyytiBOT/auto.txt\", \"r\")\n autoFile.close()\nexcept:\n autoFile = open(\"/home/atte/yowsapp-framework/modules/KyytiBOT/auto.txt\", \"w+\")\n autoFile.close()\n\n'''\nSignals this module listents to:\n1. When a message is received (signals.command_received)\n==========================================================\n'''\n@signals.command_received.connect\ndef handle(message):\n if message.command == \"ohjeet\":\n ohjeet(message)\n elif message.command == \"sjoki\":\n sjoki(message)\n elif message.command == \"prisma\":\n prisma(message)\n elif message.command == \"kovero\":\n kovero(message)\n elif message.command == \"auto\":\n auto(message)\n elif message.command == \"peruutus\":\n peruutus(message)\n elif message.command == \"lista\":\n lista(message)\n elif message.command == \"uusilista\":\n uusilista(message)\n elif message.command == \"vapaa\":\n vapaa(message)\n elif message.command == \"hyväbotti\":\n hyväbotti(message)\n elif message.command == \"lepo\":\n lepo(message)\n\n'''\nActual module code\n==========================================================\n'''\ndef ohjeet(message):\n answer = \"\"\"Hei, olen *KyytiBOT* ja minut on koodannut kaikkien janoisten sankari, Atte Viertola.\n \\n*Mahdolliset komennot:*\n \\n*!sjoki* - Ilmoittaudut KyytiBOTille töihin seuraavaksi päiväksi ja ilmoitat tulevasi kyytiin Kivistön S-Marketilta. Sinut lisätään minun ylläpitämään listaan.\n \\n*!kovero* - Ilmoittaudut KyytiBOTille töihin seuraavaksi päiväksi, sinut lisätään Koverolta noukittavien listaan.\n \\n*!prisma* - Ilmoittaudut KyytiBOTille töihin seuraavaksi päiväksi, sinut lisätään Prisman rampilta noukittavien listaan.\n \\n*!auto* - Ilmoittaudut KyytiBOTille töihin seuraavaksi päiväksi ja ilmoitat ajavasi seuraavana päivänä, sinun EI tarvitse ilmoittautua töihin erikseen. Jos olet kerennyt jo ilmoittautua ilman autoa, tämä komento lisää auton ilmoittautumiseesi.\n \\n*!peruutus* - Peruutat ilmoittautumisesi töihin, perut samalla myös mahdolllisesti ilmoittamasi auton.\n \\n*!lista* - Lähetän viestin, johon on listattu kaikki töihin tulevat, heidän noutopaikkansa sekä luvatut autot.\n \\n*!uusilista* - Tyhjää edellisen päivän listan ja uudet ilmoittautumiset voi aloittaa.\n \\n*HUOM! En pidä kirjaa mahdollisista erikoisjärjestelyistä kuten vain toiseen suuntaan tulemisesta, joten muista edelleen kertoa näistä erikseen!*\n \"\"\"\n mac.send_message(answer, message.conversation)\n\ndef sjoki(message):\n who_name = message.who_name\n sjokiNames = getNames(\"sjoki\")\n prismaNames = getNames(\"prisma\")\n koveroNames = getNames(\"kovero\")\n autoNames = getNames(\"auto\")\n\n if who_name in sjokiNames or who_name in prismaNames or who_name in koveroNames or who_name in autoNames:\n answer = \"*{}*, olet jo ilmoittautunut.\".format(who_name)\n mac.send_message(answer, message.conversation)\n\n else:\n with open(\"/home/atte/yowsapp-framework/modules/KyytiBOT/sjoki.txt\", \"a\") as sjokiFile:\n sjokiFile.write(\"{}\\n\".format(who_name))\n answer = \"*{}*, ilmoittautumisesi on otettu vastaan.\".format(who_name)\n mac.send_message(answer, message.conversation)\n\ndef prisma(message):\n who_name = message.who_name\n sjokiNames = getNames(\"sjoki\")\n prismaNames = getNames(\"prisma\")\n koveroNames = getNames(\"kovero\")\n autoNames = getNames(\"auto\")\n\n if who_name in sjokiNames or who_name in prismaNames or who_name in koveroNames or who_name in autoNames:\n answer = \"*{}*, olet jo ilmoittautunut.\".format(who_name)\n mac.send_message(answer, message.conversation)\n\n else:\n with open(\"/home/atte/yowsapp-framework/modules/KyytiBOT/prisma.txt\", \"a\") as prismaFile:\n prismaFile.write(\"{}\\n\".format(who_name))\n answer = \"*{}*, ilmoittautumisesi on otettu vastaan.\".format(who_name)\n mac.send_message(answer, message.conversation)\n\ndef kovero(message):\n who_name = message.who_name\n sjokiNames = getNames(\"sjoki\")\n prismaNames = getNames(\"prisma\")\n koveroNames = getNames(\"kovero\")\n autoNames = getNames(\"auto\")\n\n if who_name in sjokiNames or who_name in prismaNames or who_name in koveroNames or who_name in autoNames:\n answer = \"*{}*, olet jo ilmoittautunut.\".format(who_name)\n mac.send_message(answer, message.conversation)\n\n else:\n with open(\"/home/atte/yowsapp-framework/modules/KyytiBOT/kovero.txt\", \"a\") as koveroFile:\n koveroFile.write(\"{}\\n\".format(who_name))\n answer = \"*{}*, ilmoittautumisesi on otettu vastaan.\".format(who_name)\n mac.send_message(answer, message.conversation)\n\ndef auto(message):\n who_name = message.who_name\n autoNames = getNames(\"auto\")\n\n if who_name in autoNames:\n answer = \"*{}*, olet jo ilmoittautunut.\".format(who_name)\n mac.send_message(answer, message.conversation)\n return\n\n sjokiNames = getNames(\"sjoki\")\n\n if who_name in sjokiNames:\n with open(\"/home/atte/yowsapp-framework/modules/KyytiBOT/sjoki.txt\", \"w\") as sjokiFile:\n for name in sjokiNames:\n if name != who_name:\n sjokiFile.write(\"{}\\n\".format(name))\n with open(\"/home/atte/yowsapp-framework/modules/KyytiBOT/auto.txt\", \"a\") as autoFile:\n autoFile.write(\"{}\\n\".format(who_name))\n answer = \"*{}*, sinut on siirretty autollisten listaan.\".format(who_name)\n mac.send_message(answer, message.conversation)\n return\n \n prismaNames = getNames(\"prisma\")\n koveroNames = getNames(\"kovero\")\n\n if who_name in prismaNames or who_name in koveroNames:\n answer = \"*{}*, vain Kivistön S-Marketin kautta tulevat voivat ilmoittaa auton.\".format(who_name)\n mac.send_message(answer, message.conversation)\n return\n\n else:\n with open(\"/home/atte/yowsapp-framework/modules/KyytiBOT/auto.txt\", \"a\") as autoFile:\n autoFile.write(\"{}\\n\".format(who_name))\n answer = \"*{}*, ilmoittautumisesi on otettu vastaan.\".format(who_name)\n mac.send_message(answer, message.conversation)\n\ndef peruutus(message):\n who_name = message.who_name\n\n if who_name in getNames(\"sjoki\"):\n removeName(\"sjoki\", who_name)\n answer = \"*{}*, ilmoittautumisesi on peruutettu.\".format(who_name)\n mac.send_message(answer, message.conversation)\n\n elif who_name in getNames(\"prisma\"):\n removeName(\"prisma\", who_name)\n answer = \"*{}*, ilmoittautumisesi on peruutettu.\".format(who_name)\n mac.send_message(answer, message.conversation)\n\n elif who_name in getNames(\"kovero\"):\n removeName(\"kovero\", who_name)\n answer = \"*{}*, ilmoittautumisesi on peruutettu.\".format(who_name)\n mac.send_message(answer, message.conversation)\n\n elif who_name in getNames(\"auto\"):\n removeName(\"auto\", who_name)\n answer = \"*{}*, ilmoittautumisesi on peruutettu.\".format(who_name)\n mac.send_message(answer, message.conversation)\n\n else:\n answer = \"*{}*, et ole vielä ilmoittautunut.\".format(who_name)\n mac.send_message(answer, message.conversation)\n\ndef lista(message):\n \n sjokiList = getNames(\"sjoki\")\n prismaList = getNames(\"prisma\")\n koveroList = getNames(\"kovero\")\n carList = getNames(\"auto\")\n\n total = len(sjokiList) + len(prismaList) + len(koveroList) + len(carList)\n totalSjoki = len(sjokiList)\n totalPrisma = len(prismaList)\n totalKovero = len(koveroList)\n totalCars = len(carList)\n\n sjokiString = \"\\n \".join(sjokiList)\n prismaString = \"\\n \".join(prismaList)\n koveroString = \"\\n \".join(koveroList)\n carString = \"\\n \".join(carList)\n answer = \"\"\"*Yhteensä tulossa: {}*\n \\n*Kivistön S-Market: {}*\n {}\n \\n*Prisman ramppi: {}*\n {}\n \\n*Koveron Shell: {}*\n {}\n \\n*Autolliset: {}*\n {}\"\"\".format(total, totalSjoki, sjokiString, totalPrisma, prismaString, totalKovero, koveroString, totalCars, carString)\n mac.send_message(answer, message.conversation)\n\ndef uusilista(message):\n emptyList(\"sjoki\")\n emptyList(\"prisma\")\n emptyList(\"kovero\")\n emptyList(\"auto\")\n\n answer = \"*Lista tyhjennetty, uudet ilmoittautumiset voi alkaa.*\"\n mac.send_message(answer, message.conversation)\n\ndef vapaa(message):\n mac.send_message(\"*MENISIT SINÄKI TÖI_HI*\", message.conversation)\n\ndef hyväbotti(message):\n mac.send_message(\"beep boop, botti kiittää\", message.conversation)\n\ndef lepo(message):\n mac.send_message(\"Shutdown in 3, 2, 1...\", message.conversation)\n emptyList(\"sjoki\")\n emptyList(\"prisma\")\n emptyList(\"kovero\")\n emptyList(\"auto\")\n\n\n##############################################################\n\n#APUFUNKTIOT\n\n##############################################################\n\ndef getNames(list):\n with open(\"/home/atte/yowsapp-framework/modules/KyytiBOT/{}.txt\".format(list), \"r\") as file:\n names = file.read().splitlines()\n return names\n\ndef emptyList(list):\n open(\"/home/atte/yowsapp-framework/modules/KyytiBOT/{}.txt\".format(list), \"w+\")\n return\n\ndef removeName(list, name):\n names = []\n with open(\"/home/atte/yowsapp-framework/modules/KyytiBOT/{}.txt\".format(list), \"r\") as file:\n for i in getNames(list):\n if i != name:\n names.append(i)\n with open(\"/home/atte/yowsapp-framework/modules/KyytiBOT/{}.txt\".format(list), \"w\") as file:\n for name in names:\n file.write(\"{}\\n\".format(name))\n return\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"371929142","text":"from django.contrib import admin\nfrom leaflet.admin import LeafletGeoAdmin\nfrom .models import NFuncionariosParcerias, IndQualidadeEmpregoEfetivo, IndQualidadeEmpregoTemporario\n\n\n@admin.register(NFuncionariosParcerias)\nclass NFuncionariosParceriasAdmin(LeafletGeoAdmin):\n list_display = ('id_questionario', 'permanentes', 'temporarios', 'meeiros', 'mutirao')\n\n\n@admin.register(IndQualidadeEmpregoEfetivo)\nclass IndQualidadeEmpregoEfetivoAdmin(LeafletGeoAdmin):\n list_display = ('id_questionario', 'registro_funcionarios', 'pagamento_hora_extra', 'acima_1_sm',\n 'auxilio_alimentacao', 'auxilio_moradia', 'auxilio_educacao_transporte', 'participacao_lucros',\n 'seguro_acidentes', 'acesso_lazer', 'espaco_cultivo')\n\n\n@admin.register(IndQualidadeEmpregoTemporario)\nclass IndQualidadeEmpregoTemporarioAdmin(LeafletGeoAdmin):\n list_display = ('id_questionario', 'registro_funcionarios', 'pagamento_hora_extra', 'acima_1_sm',\n 'auxilio_alimentacao', 'auxilio_moradia', 'auxilio_educacao_transporte', 'participacao_lucros',\n 'seguro_acidentes', 'acesso_lazer', 'espaco_cultivo')\n","sub_path":"car_isa/ind_qe/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"178618451","text":"#!/usr/bin/env python3\n\nimport socket\nimport datetime\n\nUDP_IP = \"0.0.0.0\"\nUDP_PORT = 1337\n\nsock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM )\nsock.bind( (UDP_IP, UDP_PORT) )\n\nprint(\"+============================+\")\nprint(\"| ESP32 UDP Logging Server |\")\nprint(\"+============================+\")\nprint(\"\")\n\nwhile True:\n\tdata, addr = sock.recvfrom(512)\n\tprint(datetime.datetime.now(), data.decode(), end='')","sub_path":"udp_listener.py","file_name":"udp_listener.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"493085875","text":"from urllib.request import urlopen\nfrom urllib.parse import urlencode,unquote,quote_plus\nimport urllib\n\n#url = 'http://apis.data.go.kr/1400000/service/cultureInfoService'\n\n\nqueryParams = '?' + urlencode({ quote_plus('servicekey') : 'cRhBhi3sxVClCIks%2FemvBBGZgcYv5HaKvFr26Ov5Q5nor0WtrgUNO9rwfYO6FkLUif9SefP0BK%2B18mBFvV8%2FCw%3D%3D',\n quote_plus('searchWrd') : quote_plus('북한산')})\n \nrequest = urllib.request.Request(url+unquote(queryParams))\nprint ('Your Request:\\n'+url+queryParams)\nrequest.get_method = lambda: 'GET'\nresponse_body = urlopen(request).read()\nprint (\"\\nResult:\")\nprint (response_body)\nprint (\"\\nDataType of Result Data:\")\nprint (type(response_body))\n\n\n\n","sub_path":"openAPI2.py","file_name":"openAPI2.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"211538422","text":"from flask import Flask,render_template,request,redirect\n\napp=Flask(__name__)\n\nimport joblib\nimport numpy as np\n\n@app.route(\"/\")\ndef home():\n\treturn render_template(\"index.html\")\n\n@app.route('/submit',methods=['POST'])\ndef submit():\n\tmodel=joblib.load(\"linear_regression.pkl\")\n\tst=float(request.form['num'])\n\tmarks=model.predict([[st]])\n\tmarks=marks[0][0]\n\t# return str(store[0][0])\n\treturn render_template(\"index.html\",your_marks='%.3f'%marks)\n# \t# return \"

        the number is {}

        \".format(int(request.form['n1'])+int(request.form['n2']))\n\t\nif __name__=='__main__':\n\tapp.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"150589206","text":"#!/usr/bin/python2.7\r\n# -*- coding: utf-8 -*-import sys\r\n###############################################################\r\n#\r\n# MAAPI 3.1\r\n# self.connection with DB\r\n#\r\n##############################################################\r\nimport psycopg2\r\nfrom datetime import datetime, timedelta\r\nfrom conf.MaaPi_Settings import *\r\n\r\nclass db(object):\r\n conn = psycopg2.connect(\"dbname='{0}' user='{1}' host='{2}' password='{3}'\".format(Maapi_dbname,Maapi_user,Maapi_host,Maapi_passwd))\r\n x = conn.cursor()\r\nclass MaaPiDBConnection(db):\r\n\r\n def __init__(self):\r\n self.filters_ = {}\r\n self.orders_ = {}\r\n self.columns_ = {}\r\n self.columns_var = {}\r\n self.table_ = {}\r\n debug = 0\r\n\r\n\r\n @classmethod\r\n def _debug(self, level, msg):\r\n if self.debug >= level:\r\n print(\"DEBUG MaaPi DB 2 {0} {1}, {2}\".format(level, datetime.now(), msg))\r\n\r\n @classmethod\r\n def insert_data(self,senor_id,value,sensor_type,status):\r\n\r\n self._debug(1,\"SENSOR ID ={0}\".format(senor_id))\r\n self._debug(1,\"value ={0}\".format(value))\r\n self._debug(1,\"sensor_type ={0}\".format(sensor_type))\r\n db.x = db.conn.cursor()\r\n self._debug(1,\"x self.connection cursor\")\r\n db.x.execute(\"SELECT dev_value, dev_rom_id, dev_collect_values_to_db FROM devices WHERE dev_id='{0}' and dev_status=True\".format(senor_id))\r\n db_data = db.x.fetchone()\r\n self._debug(1,\"get data from devices\")\r\n try:\r\n db.x.execute(\"UPDATE devices SET dev_value_old={0} WHERE dev_id='{1}' and dev_status=True\".format(db_data[0],senor_id))\r\n db.conn.commit()\r\n except:\r\n pass\r\n if status is True:\r\n self._debug(1,\"sensor status TRUE\")\r\n \"\"\"if stst is true, update actual value, date and stat on devices\"\"\"\r\n if value == True:\r\n db.x.execute(\"UPDATE devices SET dev_value={0}, dev_last_update=NOW(),dev_read_error='ok' WHERE dev_id='{1}' and dev_status=True\".format(1,senor_id))\r\n db.conn.commit()\r\n if db_data[2]:\r\n db.x.execute(\"\"\"INSERT INTO maapi_dev_rom_{0}_values VALUES (default,{1},default,{2})\"\"\".format(db_data[1].replace(\"-\", \"_\"), senor_id,1))\r\n db.conn.commit()\r\n \"\"\"if dev_collect_values_to_db true - enable write to values table\"\"\"\r\n elif value == False:\r\n db.x.execute(\"UPDATE devices SET dev_value={0}, dev_last_update=NOW(),dev_read_error='ok' WHERE dev_id='{1}' and dev_status=True\".format(0,senor_id))\r\n db.conn.commit()\r\n if db_data[2]:\r\n db.x.execute(\"\"\"INSERT INTO maapi_dev_rom_{0}_values VALUES (default,{1},default,{2})\"\"\".format(db_data[1].replace(\"-\", \"_\"), senor_id,0))\r\n db.conn.commit()\r\n \"\"\"if dev_collect_values_to_db true - enable write to values table\"\"\"\r\n else:\r\n db.x.execute(\"UPDATE devices SET dev_value={0}, dev_interval_queue = {2}, dev_last_update=NOW(), dev_read_error='ok' WHERE dev_id='{1}' and dev_status=True\".format(value,senor_id,False))\r\n db.conn.commit()\r\n\r\n if db_data[2]:\r\n db.x.execute(\"\"\"INSERT INTO maapi_dev_rom_{0}_values VALUES (default,{1},default,{2})\"\"\".format(db_data[1].replace(\"-\", \"_\"), senor_id,value))\r\n db.conn.commit()\r\n else:\r\n db.x.execute(\"UPDATE devices SET dev_interval_queue = {2},dev_value={0},dev_read_error='Error' WHERE dev_id='{1}' and dev_status=True\".format(9999,senor_id,False))\r\n db.conn.commit()\r\n\r\n\r\n @classmethod\r\n def queue(self,dev_id,status):\r\n db.x = db.conn.cursor()\r\n self._debug(1,\"Queue set {1} ={0}\".format(status,dev_id))\r\n if dev_id == '*':\r\n self._debug(2,\"Queue set {1} = {0} - done \".format(status,dev_id))\r\n db.x.execute(\"UPDATE devices SET dev_interval_queue={0} where dev_status=TRUE\".format(status))\r\n db.conn.commit()\r\n else:\r\n self._debug(2,\"Queue set {1} = {0} - done \".format(status,dev_id))\r\n db.x.execute(\"UPDATE devices SET dev_interval_queue={0} where dev_id={1}\".format(status,dev_id))\r\n db.conn.commit()\r\n\r\n\r\n# @classmethod\r\n #def queue_all(self,status):\r\n# db.x.execute(\"UPDATE devices SET dev_interval_queue={0} \".format(status))\r\n# db.conn.commit()\r\n\r\n\r\n def columns(self, *args):\r\n self._debug(2,\"columns: {0}\".format(args))\r\n self.columns_ = args\r\n return self\r\n\r\n def filters_eq(self, **kwargs):\r\n self._debug(2,\"filters_eq: {0}\".format(kwargs))\r\n self.filters_ = kwargs\r\n return self\r\n\r\n def order_by(self, *args):\r\n self._debug(2,\"order_by: {0}\".format(args))\r\n self.orders_ = args\r\n return self\r\n\r\n def if_number(self, s):\r\n self._debug(2,\"if_number: {0}\".format(s))\r\n try:\r\n float(s)\r\n return True\r\n except ValueError:\r\n return False\r\n\r\n def table(self, *args):\r\n self._debug(2,\"table: {0}\".format(args))\r\n if len(args) != 1:\r\n raise ValueError(\r\n \".get_table('table name') should only have a table name\")\r\n self.table_ = args[0]\r\n return self\r\n\r\n def get(self):\r\n self._debug(2,\"get\")\r\n if self.columns_:\r\n c_len = len(self.columns_)\r\n c_i = 1\r\n columns = \" \"\r\n for c in self.columns_:\r\n if c_len > 1:\r\n columns += \"{0}\".format(c)\r\n if c_i < c_len:\r\n columns += \", \"\r\n c_i += 1\r\n else:\r\n columns += \"{0}\".format(c)\r\n else:\r\n columns = \"*\"\n self.columns_var = \"*\"\r\n\r\n query = \"SELECT {0} FROM {1} \".format(columns, self.table_)\r\n\r\n if self.filters_:\r\n f_len = len(self.filters_)\r\n f_i = 1\r\n query += \"WHERE \"\r\n for i in self.filters_:\r\n if f_i == 1:\r\n if self.if_number(self.filters_[i]):\r\n query += \" {0} = {1}\".format(i, self.filters_[i])\r\n else:\r\n query += \" {0} = '{1}'\".format(i, self.filters_[i])\r\n else:\r\n query += \" and \"\r\n if self.if_number(self.filters_[i]):\r\n query += \" {0} = {1}\".format(i, self.filters_[i])\r\n else:\r\n query += \" {0} = '{1}'\".format(i, self.filters_[i])\r\n f_i += 1\r\n if self.orders_:\r\n try:\r\n self.orders_[1]\r\n except:\r\n query += \" ORDER BY {0}\".format(self.orders_[0])\r\n else:\r\n if self.orders_[1] == \"asc\" or self.orders_[1] == \"ASC\" or self.orders_[1] == \"desc\" or self.orders_[1] == \"DESC\":\r\n query += \" ORDER BY {0} {1}\".format(self.orders_[0],\r\n self.orders_[1])\r\n else:\r\n raise ValueError(\r\n \"order_by Second Value should be empty or ASC or DESC but get: '{0}'\".\r\n format(self.orders_[1]))\r\n query += \";\"\r\n\r\n data = self.exec_query_select(query, self.table_)\r\n\r\n return data\r\n\r\n\r\n\r\n def exec_query_select(self, query, name):\r\n db.x = db.conn.cursor()\r\n table_data_dict = {}\r\n try:\r\n db.x.execute(query)\r\n table_data = db.x.fetchall()\r\n if self.columns_var == '*':\r\n db.x.execute(\r\n \"SELECT column_name FROM information_schema.columns WHERE table_name='{0}' \".\r\n format(name))\r\n table_names = db.x.fetchall()\r\n\r\n for row_s in range(len(table_data)):\r\n sensor_rows = {}\r\n i = 0\r\n for r_s in table_data[row_s]:\r\n sensor_rows[table_names[i][0]] = r_s\r\n i += 1\r\n table_data_dict[table_data[row_s][0]] = sensor_rows\r\n\r\n else:\r\n table_names = self.columns_\r\n for row_s in range(len(table_data)):\r\n sensor_rows = {}\r\n i = 0\r\n for r_s in table_data[row_s]:\r\n sensor_rows[table_names[i]] = r_s\r\n i += 1\r\n table_data_dict[table_data[row_s][0]] = sensor_rows\r\n\r\n except (Exception, psycopg2.DatabaseError) as error:\r\n pass\r\n\r\n return table_data_dict\r\n\r\n @classmethod\r\n def update_cron(self,file_name,time):\r\n\r\n db.x.execute(\"UPDATE maapi_cron SET cron_last_file_exec=NOW(), cron_time_of_exec={1} WHERE cron_file_path ='{2}' and cron_where_exec='{3}'\".format(datetime.now(),time,file_name,Maapi_location))\r\n\r\n db.conn.commit()\r\n","sub_path":"lib/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":9148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"337645187","text":"## enemy_generation.py\r\n## Andrew Herrera and Benjamin Rose\r\n## Fall 2016\r\n\r\nimport pygame, sys\r\nfrom pygame.locals import *\r\nfrom random import randint\r\nfrom enemyTestClasses import minionTestClass, strikerTestClass, turretTestClass\r\n\r\npygame.init()\r\n\r\n## minion_gen(minion_list, minion_kill_list, DISPLAYWIDTH, DISPLAYHEIGHT)\r\n\r\ndef minion_gen(minion_list, minion_kill_list, DISPLAYWIDTH, DISPLAYHEIGHT):\r\n wing_temp = randint(3, 5)\r\n wing_vert = wing_temp * (DISPLAYHEIGHT//9)\r\n wing_y_top = randint(0, DISPLAYHEIGHT - (wing_vert + 20))\r\n wing_y_bot = wing_y_top + wing_vert\r\n for x in range(wing_temp):\r\n minion_list.append(0)\r\n minion_list[len(minion_list) - 1] = minionTestClass((DISPLAYWIDTH * 1.1, (x * DISPLAYHEIGHT//9) + wing_y_top),len(minion_list), minion_kill_list)\r\n\r\n\r\n## striker_gen(striker_list, striker_kill_list, DISPLAYWIDTH, DISPLAYHEIGHT)\r\n\r\ndef striker_gen(striker_list, striker_kill_list, DISPLAYWIDTH, DISPLAYHEIGHT):\r\n striker_y_max = DISPLAYHEIGHT - 100\r\n striker_y = randint(0, striker_y_max)\r\n striker_list.append(0)\r\n striker_list[len(striker_list) - 1] = strikerTestClass((DISPLAYWIDTH * 1.1, striker_y), len(striker_list), striker_kill_list)\r\n\r\n\r\n## turret_gen(turret_list, striker_kill_list, temp_row, lvl_type_choice)\r\n\r\ndef turret_gen(turret_list, turret_kill_list, tile_row, torf):\r\n\r\n row_assigned = 0\r\n if tile_row == 6 and row_assigned == 0:\r\n row = 0\r\n row_assigned = 1\r\n elif tile_row == 5 and row_assigned == 0:\r\n row = 1\r\n row_assigned = 1\r\n elif tile_row == 4 and row_assigned == 0:\r\n row = 2\r\n row_assigned = 1\r\n elif tile_row == 3 and row_assigned == 0:\r\n row = 3\r\n row_assigned = 1\r\n elif tile_row == 2 and row_assigned == 0:\r\n row = 4\r\n row_assigned = 1\r\n elif tile_row == 1 and row_assigned == 0:\r\n row = 5\r\n row_assigned = 1\r\n turret_list.append(0)\r\n turret_list[len(turret_list) - 1] = turretTestClass((1410, (row * 140) - 50), len(turret_list) - 1, turret_kill_list, torf)\r\n turret_list.append(0)\r\n turret_list[len(turret_list) - 1] = turretTestClass((1480, (row * 140) - 50), len(turret_list) - 1, turret_kill_list, torf)\r\n\r\n","sub_path":"enemy_generation.py","file_name":"enemy_generation.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"51779541","text":"# Takes in a pheno_pops.csv file and returns the temporal host and parasite heterogeneity\nimport sys\nimport math\n\ndef entropy(arr):\n entr = 0.0\n tot = sum(arr)\n for i in arr:\n frac = float(i)/float(tot)\n if frac!=0.0:\n entr -= frac*math.log(frac,2.0)\n return entr\n\n# Main function\ndef get_diversity(filename):\n times = []\n host_hets = []\n para_hets = []\n with open(filename) as ifile:\n header = next(ifile)\n num_pheno = (len(header.split(\",\"))-1)/2\n for line in ifile:\n words = line.split(\",\")\n if len(words)>1:\n time = float(words[0])\n host_pops = [int(i) for i in words[1:int(1+num_pheno)]]\n para_pops = [int(i) for i in words[1+int(num_pheno):]]\n host_het = entropy(host_pops)\n para_het = entropy(para_pops)\n times.append(time)\n host_hets.append(host_het)\n para_hets.append(para_het)\n return times,host_hets,para_hets \n\ndef save_diversity(times,host_hets,para_hets):\n with open(\"output/diversity.csv\",\"w+\") as ofile:\n ofile.write(\"time,host_div,para_div\\n\")\n for idx,time in enumerate(times):\n ofile.write(\"%f,%f,%f\\n\" % (time,host_hets[idx],para_hets[idx]))\n\n\n","sub_path":"lib/get_diversity.py","file_name":"get_diversity.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"46933954","text":"import os, sys\ncurrentdir = os.path.dirname(os.path.realpath(__file__))\nparentdir = os.path.dirname(currentdir)\nsys.path.append(parentdir) # PYTHON > 3.3 does not allow relative referencing\n\nimport argparse\nfrom configparser import ConfigParser\nfrom shutil import copy2\nimport os\nfrom datetime import datetime\nimport DeepDeformationMapRegistration.utils.constants as C\nimport re\nfrom COMET.augmentation_constants import LAYER_SELECTION\nTRAIN_DATASET = '/mnt/EncryptedData1/Users/javier/ext_datasets/COMET_dataset/OSLO_COMET_CT/Formatted_128x128x128/train'\n\nerr = list()\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--ini', help='Configuration file')\n args = parser.parse_args()\n\n configFile = ConfigParser()\n configFile.read(args.ini)\n print('Loaded configuration file: ' + args.ini)\n print({section: dict(configFile[section]) for section in configFile.sections()})\n print('\\n\\n')\n\n trainConfig = configFile['TRAIN']\n lossesConfig = configFile['LOSSES']\n datasetConfig = configFile['DATASETS']\n othersConfig = configFile['OTHERS']\n augmentationConfig = configFile['AUGMENTATION']\n\n try:\n print('TRAIN MODEL IN' + trainConfig['model'])\n except KeyError as er:\n trainConfig['model'] = ''\n\n simil = lossesConfig['similarity'].split(',')\n segm = lossesConfig['segmentation'].split(',')\n if trainConfig['name'].lower() == 'uw':\n from COMET.COMET_train_UW import launch_train\n output_folder = os.path.join(othersConfig['outputFolder'], '{}_Lsim_{}__Lseg_{}'.format(trainConfig['name'], '_'.join(simil), '_'.join(segm)))\n elif trainConfig['name'].lower() == 'segguided':\n from COMET.COMET_train_seggguided import launch_train\n simil = simil[0]\n segm = segm[0]\n output_folder = os.path.join(othersConfig['outputFolder'],\n '{}_Lsim_{}__Lseg_{}'.format(trainConfig['name'], simil, segm))\n else:\n from COMET.COMET_train import launch_train\n simil = simil[0]\n segm = segm[0]\n output_folder = os.path.join(othersConfig['outputFolder'], '{}_Lsim_{}'.format(trainConfig['name'], simil))\n output_folder = output_folder + '_' + datetime.now().strftime(\"%H%M%S-%d%m%Y\")\n\n try:\n froozen_layers = eval(trainConfig['freeze'])\n except KeyError as err:\n froozen_layers = None\n except NameError as err:\n froozen_layers = list(filter(lambda x: x != '', re.split(';|\\s|,|,\\s|;\\s', trainConfig['freeze'].upper())))\n\n if froozen_layers is not None:\n assert all(s in LAYER_SELECTION.keys() for s in froozen_layers), \\\n 'Invalid option for \"freeze\". Expected one or several of: ' + ', '.join(LAYER_SELECTION.keys())\n froozen_layers = list(set(froozen_layers)) # Unique elements\n\n if augmentationConfig:\n C.GAMMA_AUGMENTATION = augmentationConfig['gamma'].lower() == 'true'\n C.BRIGHTNESS_AUGMENTATION = augmentationConfig['brightness'].lower() == 'true'\n\n\n # copy the configuration file to the destionation folder\n os.makedirs(output_folder, exist_ok=True) # TODO: move this within the \"resume\" if case, and bring here the creation of the resume-output folder!\n copy2(args.ini, os.path.join(output_folder, os.path.split(args.ini)[-1]))\n\n try:\n unet = [int(x) for x in trainConfig['unet'].split(',')] if trainConfig['unet'] else [16, 32, 64, 128, 256]\n head = [int(x) for x in trainConfig['head'].split(',')] if trainConfig['head'] else [16, 16]\n except KeyError as err:\n unet = [16, 32, 64, 128, 256]\n head = [16, 16]\n\n try:\n resume_checkpoint = trainConfig['resumeCheckpoint']\n except KeyError as e:\n resume_checkpoint = None\n\n launch_train(dataset_folder=datasetConfig['train'],\n validation_folder=datasetConfig['validation'],\n output_folder=output_folder,\n gpu_num=eval(trainConfig['gpu']),\n lr=eval(trainConfig['learningRate']),\n rw=eval(trainConfig['regularizationWeight']),\n simil=simil,\n segm=segm,\n max_epochs=eval(trainConfig['epochs']),\n image_size=eval(trainConfig['imageSize']),\n early_stop_patience=eval(trainConfig['earlyStopPatience']),\n model_file=trainConfig['model'],\n freeze_layers=froozen_layers,\n acc_gradients=eval(trainConfig['accumulativeGradients']),\n batch_size=eval(trainConfig['batchSize']),\n unet=unet,\n head=head,\n resume=resume_checkpoint)\n","sub_path":"COMET/MultiTrain_config.py","file_name":"MultiTrain_config.py","file_ext":"py","file_size_in_byte":4683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"206282152","text":"from mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\nrstride = 5\ncstride = 5\nMinBound = -3\nMaxBound = 3\nu = np.linspace(0, 2*np.pi, 100)\nv = np.linspace(0, np.pi, 100)\n\n\n#SPHERE\n\nx = (1.5)*np.outer(np.cos(u), np.sin(v))\ny = (1.5)*np.outer(np.sin(u), np.sin(v))\nz = (1.5)*np.outer(np.ones(np.size(u)), np.cos(v))\n\nax.plot_surface(x, y, z, rstride = rstride, cstride = cstride, color=(0,0.8,0), linewidth=0)\n\nplt.show()\nax.set_xlim3d(MinBound, MaxBound)\nax.set_ylim3d(MinBound, MaxBound)\nax.set_zlim3d(MinBound, MaxBound)","sub_path":"3rd Energy Level/3s.py","file_name":"3s.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"594675742","text":"\n# Created by taki on 2017/03/12.\n\n\nimport numpy as np\n\n\nclass mnist_data_nc():\n\n def __init__(self, mode='train'):\n\n self.basedir = '/Users/taki/web/PycharmProjects/projects/mnist/mnist_data/'\n\n if mode == 'train':\n self.data_list = ['train-labels.idx1-ubyte', 'train-images.idx3-ubyte']\n elif mode == 'test':\n self.data_list = ['t10k-labels.idx1-ubyte', 't10k-images.idx3-ubyte']\n else:\n raise ValueError('Invalid mode')\n\n self.label_f = open(self.basedir + self.data_list[0], 'rb')\n _magic = self._read32(self.label_f)[0]\n _item_num = self._read32(self.label_f)[0]\n\n self.image_f = open(self.basedir + self.data_list[1], 'rb')\n _magic = self._read32(self.image_f)[0]\n _image_num = self._read32(self.image_f)[0]\n self.row_num = self._read32(self.image_f)[0]\n self.column_num = self._read32(self.image_f)[0]\n\n if _item_num != _image_num:\n raise ValueError(\"Number of Item doesn't match\")\n else:\n self.item_num = _item_num\n\n self.counter = 0\n\n\n def __iter__(self):\n return self\n\n def __next__(self):\n\n self.counter += 1\n if self.counter > self.item_num:\n raise StopIteration()\n\n label = ord(self.label_f.read(1))\n\n img_byte_list = []\n for i in range(self.row_num * self.column_num):\n img_byte_list.append(ord(self.image_f.read(1)))\n\n return [label, img_byte_list]\n\n\n # 事前データ取得用関数\n def _read32(self, bytestream):\n dt = np.dtype(np.uint32).newbyteorder('>')\n return np.frombuffer(bytestream.read(4), dtype=dt)\n\n\n\n\"\"\"CODE_TEST\"\"\"\n\nd = mnist_data_nc('test')\n\n# counter = 0\n#\n# for i in d:\n# counter += 1\n# print(i)\n# print(counter)\n#\n# # if counter > 5:\n# # break\n\n# print(d[0])\n# print(d[1])\n\n","sub_path":"mnist/read_f.py","file_name":"read_f.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"345829845","text":"from musiclang.script import *\nfrom musiclang.lang import MusicLang\nfrom random import choice\nimport numpy as np\n\nSEED = 24\n\nTEMPO = 120\nBASE_TONALITY = TONALITY_C.M\nINSTRUMENTS_CANDIDATES = [[INST_TIMPANI],\n [INST_TUBA], [INST_FRENCH_HORN], [INST_OBOE], [INST_FLUTE],\n [INST_CONTRABASS], [INST_CELLO], [INST_VIOLIN], [INST_VIOLIN]\n ]\nPERCS_IDX = [0]\nWINDS_IDX = [1, 2, 3, 4]\nSTRINGS_IDX = [5, 6, 7, 8]\n\nSTART_WITH_NOTE = [True,\n True, False, False, False, False,\n True, False, False, False\n ]\n\nNB_VOICES = len(INSTRUMENTS_CANDIDATES)\nDURATION = 4 * Q\nNB_CHORDS = 4\nNOTES_CANDIDATES = [[s0, s2],\n [s0, s2, s4, s0, s2, s5, s6],\n [s0, cu1, cu1, cu1, cu1, cu1],\n [s2, cu1, cu1, cu1, cu1, cu1],\n [s2, s0, s4, s6, s6.o(-1), s0, s0]\n ]\nNOTES_CANDIDATES = [s6.o(-1), s0, s1, s2, s3, s4, s5, s6, s0.o(1)]\n#NOTES_CANDIDATES = [h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11]\nNB_NOTES = 7\nCHORD_CANDIDATES = [I, V % V.M, III, IV, V.o(-1), VI.o(-1)]\n\n\n\n# 20, 19, 20237028\nscore_generator = GenerateRandomScore(DURATION, NB_CHORDS, NB_VOICES,\n chord_candidates=CHORD_CANDIDATES,\n notes_candidates=NOTES_CANDIDATES,\n instruments_candidates=INSTRUMENTS_CANDIDATES,\n #instruments_candidates=None,\n seed=SEED,\n start_with_note=START_WITH_NOTE,\n nb_notes=NB_NOTES,\n )\n\n\n\npipeline = Pipeline([\n ('partA', VerticalPipeline([\n ('score', score_generator),\n ('copy_timpani_on_bass', CopyPartIdxOnOther(1, 0)),\n ('copy_bass_parts', CopyPartsIdxOnOther(WINDS_IDX, STRINGS_IDX)),\n ('chords', ReplaceChordsIdx([0, -2, -1], [I, V.o(-1), I])),\n ('play_only1', ApplyOnChordsIdx([0, 1], PlayOnlyPartsIdxs(PERCS_IDX + WINDS_IDX))),\n ('play_only2', ApplyOnChordsIdx([2, 3], PlayOnlyPartsIdxs(STRINGS_IDX))),\n ('mask', RandomMaskGenerator(0.2, seed=SEED)),\n\n ])),\n # ('partB', VerticalPipeline([\n # ('score', score_generator),\n # ('mask', RandomMaskGenerator(0.3, seed=SEED + 1)),\n # ('modulate', Modulate(II.m)),\n # ('apply', ApplyOnPartsIdx([2], SilenceAction()))\n # #('project_on_piano', ProjectOnInstrument())\n # ]))\n])\n\n# pipeline = pipeline.set_params(\n# partB=dict(\n# score=dict(\n# seed_instruments=56,\n# seed_melody=[None, None, None, None, None, None, None, None, None],\n# seed_rythm=[None, None, None, None, None, None, None, None, None]\n# ),\n# #modulate=Identity()\n# ))\nprint(pipeline)\n\nscore = pipeline() % BASE_TONALITY\n\nprint()\nprint(score)\nprint()\n\n# instruments = {}\nscore_played = MusicLang(score).play()\nscore_played.to_midi('data/output.mid', tempo=TEMPO)\n\npipeline.save('data/pipeline.pip')\n","sub_path":"examples/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"79738411","text":"from keras.layers import Input\nfrom keras.models import Model\nimport numpy as np\nfrom keras.layers import Dense\nfrom keras.models import Sequential\nfrom keras.optimizers import SGD\nimport matplotlib.pyplot as plt\nfrom matplotlib import pyplot\nfrom pandas import DataFrame\nfrom pandas import read_csv\n\n'''\nconstruct the dataset // train and test\n'''\n\nfilename = 'C:\\\\Users\\\\Yunqing\\\\Desktop\\\\dissertation of HKU\\\\HKUresdata\\\\no_label\\\\n11_F.csv'\ndata = read_csv(filename, index_col=0)\ndata = data.values\nX_train = (data[:700])\nX_test = data[700:]\nX_train = X_train.reshape(X_train.shape[0], 1, X_train.shape[1])\nX_test = X_test.reshape(X_test.shape[0], 1, X_test.shape[1])\n\nencoding_dim = 10\n\n\nmodel = Sequential()\nmodel.add(Dense(encoding_dim, input_shape=(X_train.shape[1], X_train.shape[2])))\nmodel.add(Dense(12))\nsgd = SGD(lr=0.05, decay=1e-6, momentum=0.95, nesterov=True)\nmodel.compile(loss='mae', optimizer=sgd, metrics=['accuracy'])\n\nhistory = model.fit(X_train, X_train, epochs=100, batch_size=128, validation_data= (X_test, X_test))\nmodel.summary()\nprediction = model.predict(data)\n(DataFrame(prediction)).to_csv('C:\\\\Users\\\\Yunqing\\\\Desktop\\\\dissertation of HKU\\\\HKUresdata\\\\decoded\\\\11_re.csv')\n\npyplot.figure(1)\npyplot.plot(history.history['loss'], label='train_loss')\npyplot.plot(history.history['val_loss'], label='val_loss')\nplt.title(\"Model Loss\")\nplt.ylabel(\"loss\")\nplt.xlabel(\"epoch\")\nplt.legend([\"train_loss\", \"val_loss\"], loc=\"upper right\")\n\npyplot.figure(2)\npyplot.plot(history.history['acc'], label='train_acc')\npyplot.plot(history.history['val_acc'], label='val_acc')\n\nplt.title(\"Model Accuracy\")\nplt.ylabel(\"Accuracy\")\nplt.xlabel(\"epoch\")\nplt.legend([\"train_acc\", \"val_acc\"], loc=\"upper right\")\n\nplt.show()\n\n'''\nFinish of training\n'''","sub_path":"AutoEncoder.py","file_name":"AutoEncoder.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"185457763","text":"from Sakurajima.models import base_models as bm\n\n\nclass EpisodeList(object):\n def __init__(self, episode_list):\n self.validate_list(episode_list)\n self.__episode_list = episode_list\n\n def validate_list(self, episode_list):\n for episode in episode_list:\n if isinstance(episode, bm.Episode):\n continue\n else:\n raise ValueError(\n \"EpisodeList only take in lists that contain only Episode objects\"\n )\n\n def get_episode_by_number(self, episode_number):\n result = list(\n filter(\n lambda episode: True if episode.number == episode_number else False,\n self.__episode_list,\n )\n )\n if len(result) == 0:\n return None\n else:\n return result[0]\n\n def get_episode_by_title(self, title):\n result = list(\n filter(\n lambda episode: True if episode.title == title else False,\n self.__episode_list,\n )\n )\n if len(result) == 0:\n return None\n else:\n return result[0]\n\n def __getitem__(self, position):\n if isinstance(position, int):\n return self.__episode_list[position]\n elif isinstance(position, slice):\n return EpisodeList(self.__episode_list[position])\n\n def __len__(self):\n return len(self.__episode_list)\n\n def __reversed__(self):\n return self[::-1]\n\n def __repr__(self):\n return f\"EpisodeList({self.__episode_list})\"\n","sub_path":"Sakurajima/utils/episode_list.py","file_name":"episode_list.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"145893538","text":"\"\"\"\nTranslate the following for loops to equivalent while loops:\na) for count in range(100):\n print(count)\nb) for count in range(1, 101):\n print(count)\nc) for count in range(100, 0, -1):\n print(count)\n\"\"\"\n\n# a)\nprint(\"a) \", end=\" \")\ncount = 0\nwhile(count < 100):\n print(count, end=\" \")\n count += 1\n\n# b)\nprint(\"\\nb) \", end=\" \")\ncountb = 1\nwhile(countb < 101):\n print(countb, end=\" \")\n countb += 1\n\nprint(\"\\nc) \", end=\" \")\ncountc = 100\nwhile(countc != 0):\n print(countc, end=\" \")\n countc -= 1\n","sub_path":"Exercicios 3.5/exercicio3.5.1.py","file_name":"exercicio3.5.1.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"111985703","text":"import pytest\nimport requests\nimport json\nfrom urllib.parse import urljoin\nfrom application import atom\nfrom stub import flask_stub, simple_http_server\nfrom mock_server import flask_mock\nfrom tests.settings import APP_URL, APP_SHUTDOWN_URL, STUB_SHUTDOWN_URL, STUB2_URL, STUB2_HOST, STUB2_PORT, \\\n MOCK_URL, MOCK_SHUTDOWN_URL, MOCK_SET_USERS\n\n\nclass StubData:\n data = {\"valid\": \"True\"}\n\n\nclass AppData:\n data = {\"page\": \"base\"}\n\n\nclass Stub2Data:\n data = {\"ok\": \"ok\"}\n\n\n@pytest.fixture(scope='session')\ndef stub_server():\n server = simple_http_server.SimpleHTTPServer(STUB2_HOST, STUB2_PORT)\n server.start()\n yield server\n server.stop()\n\n\n@pytest.fixture(scope='session')\ndef mock():\n flask_mock.run_mock()\n atom.run_app()\n yield\n requests.get(MOCK_SHUTDOWN_URL)\n requests.get(APP_SHUTDOWN_URL)\n\n\n# @pytest.mark.skip('no need')\ndef test_stub():\n atom.DATA = AppData.data\n flask_stub.DATA = StubData.data\n atom.run_app()\n flask_stub.run_stub()\n response = requests.get(APP_URL)\n requests.get(APP_SHUTDOWN_URL)\n requests.get(STUB_SHUTDOWN_URL)\n assert response.status_code == 200\n assert json.loads(response.content) == AppData.data\n\n\n@pytest.mark.skip('no need')\ndef test_stub2(stub_server):\n stub_server.set_data(Stub2Data.data)\n response = requests.get(STUB2_URL)\n assert response.status_code == 200\n assert json.loads(response.content) == Stub2Data.data\n\n\n@pytest.mark.skip('no need')\ndef test_valid_user(mock):\n # кладём тестовые данные\n check_user = 'iliya'\n valid_users = 'kirill, iliya'\n mock_response = requests.put(MOCK_SET_USERS, data={'users': valid_users}, timeout=2)\n app_response = requests.get(urljoin(APP_URL, check_user))\n\n # Проверяем статус и данные из мока\n assert mock_response.status_code == 200\n assert mock_response.json() == {\"users\": f\"Users {valid_users} was set\"}\n\n # Проверяем статус и данные из приложения\n assert app_response.status_code == 200\n assert app_response.json() == f\"User {check_user} has permissions\"\n\n\n@pytest.mark.skip('no need')\ndef test_invalid_user(mock):\n # кладём тестовые данные\n check_user = 'yar'\n valid_users = 'kirill, iliya'\n mock_response = requests.put(MOCK_SET_USERS, data={'users': valid_users}, timeout=2)\n app_response = requests.get(urljoin(APP_URL, check_user))\n\n # Проверяем статус и данные из мока\n assert mock_response.status_code == 200\n assert mock_response.json() == {\"users\": f\"Users {valid_users} was set\"}\n\n # Проверяем статус и данные из приложения\n assert app_response.status_code == 401\n assert app_response.json() == f\"User {check_user} hasn't permissions\"\n","sub_path":"technoatom-2020-2/lection7/code/tests/test_stub_mock.py","file_name":"test_stub_mock.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"107770506","text":"\"\"\"\n// Time Complexity : o(n)\n// Space Complexity : o(1)\n// Did this code successfully run on Leetcode : yes\n// Any problem you faced while coding this : no\n\n\n// Your code here along with comments explaining your approach\n\n\"\"\"\n\n\nclass Solution(object):\n def maxArea(self, height):\n \"\"\"\n :type height: List[int]\n :rtype: int\n \"\"\"\n \n ar=0\n \n i=0\n j=len(height)-1\n while i[\\w|\\W]+)/add$', views.AddPaintingView.as_view(), name='add_painting'),\n url(r'^works/(?P[\\w|\\W]+)/edit$', views.EditPaintingView.as_view(),\n name='edit_painting'),\n url(r'^works/(?P\\d+)/delete$', views.DeletePaintingView.as_view(),\n name='delete_painting'),\n url(r'^works/album/(?P[\\w|\\W]+)$', views.album, name='album'),\n url(r'^about$', views.about, name='about'),\n url(r'^contact$', views.contact, name='contact'),\n url(r'^login', LoginView.as_view(), name='login'),\n # url(r'^signup$', views.signup, name='signup'),\n]\n","sub_path":"homepage/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"174562022","text":"# encoding:utf-8\n\"\"\"\nhttp://libvirt.org/docs/libvirt-appdev-guide-python/en-US/html/libvirt_application_development_guide_using_python-Connections.html\n\n\n\"\"\"\nfrom __future__ import print_function\nimport sys\nimport libvirt\n\n\n# region open\ndef open_local():\n conn = libvirt.open('qemu:///system')\n if conn is None:\n print('Failed to open connection to qemu:///system', file=sys.stderr)\n exit(1)\n conn.close()\n exit(0)\n\n\ndef open_probe():\n \"\"\"\n URI is None then libvirt will apply some heuristics and probe for\n a suitable hypervisor vmware.\n \"\"\"\n conn = libvirt.open(None)\n if conn is None:\n print('Failed to open connection to qemu:///system', file=sys.stderr)\n exit(1)\n conn.close()\n exit(0)\n\n\ndef open_local_readonly():\n conn = libvirt.openReadOnly('qemu:///system')\n if conn is None:\n print('Failed to open connection to qemu:///system', file=sys.stderr)\n exit(1)\n conn.close()\n exit(0)\n\n\ndef open_remote():\n conn = libvirt.open('qemu+tcp://controller/system')\n if conn is None:\n print('Failed to open connection to qemu:///system', file=sys.stderr)\n exit(1)\n conn.close()\n exit(0)\n# endregion\n\n\n# region capability\ndef get_capability():\n conn = libvirt.open('qemu:///system')\n caps = conn.getCapabilities() # caps will be a string of XML\n print('Capability:\\n'+caps)\n conn.close()\n exit(0)\n# endregion\n\n\n# region host information\ndef host_info():\n conn = libvirt.open('qemu:///system')\n host = conn.getHostname()\n print('Hostname:' + host)\n\n vcpus = conn.getMaxVcpus(None)\n print('Maximum support virtual CPUs: ' + str(vcpus))\n\n nodeinfo = conn.getInfo()\n print('Model: '+str(nodeinfo[0]))\n print('Memory size: '+str(nodeinfo[1])+'MB')\n print('Number of CPUs: '+str(nodeinfo[2]))\n print('MHz of CPUs: '+str(nodeinfo[3]))\n print('Number of NUMA nodes: '+str(nodeinfo[4]))\n print('Number of CPU sockets: '+str(nodeinfo[5]))\n print('Number of CPU cores per socket: '+str(nodeinfo[6]))\n print('Number of CPU threads per core: '+str(nodeinfo[7]))\n\n type = conn.getType()\n print('Hypervisor: '+str(nodeinfo[7]))\n\n ver = conn.getVersion()\n print('Version: '+str(ver))\n\n ver = conn.getLibVersion()\n print('Libvirt Version: '+str(ver))\n\n# endregion\n\n\nif __name__ == '__main__':\n try:\n get_capability()\n except Exception as ex:\n print(ex)\n","sub_path":"projects/openstack/libvirt/guide/connections.py","file_name":"connections.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"16363672","text":"from PyQt5.QtWidgets import QMainWindow, QAction, QMenu, QMessageBox, \\\n QDockWidget, QFileDialog, QWidget, QLabel, QFrame, QDesktopWidget, \\\n QToolButton, QWidgetAction, QLayout, QHBoxLayout\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtCore import Qt, QDir\n\n\nfrom app.resources.resources import RESOURCES\nfrom app.data.database import DB\n\nfrom app.editor import timer\n\nfrom app.editor.settings import MainSettingsController\n\nfrom .property_menu import PropertiesMenu\nfrom .unit_painter_menu import UnitPainterMenu\nfrom .region_painter_menu import RegionMenu\nfrom .unit_group_painter_menu import UnitGroupMenu\n\nfrom app.editor.map_view import NewMapView, EditMode\n\n# Application State\nfrom app.editor.lib.state_editor.editor_state_manager import EditorStateManager\nfrom app.editor.lib.state_editor.state_enums import MainEditorScreenStates\nfrom app.editor.lib.components.dock import Dock\n\n\nclass LevelEditor(QMainWindow):\n def __init__(self, state_manager):\n super().__init__()\n self.state_manager = state_manager\n self.state_manager.subscribe_to_key(\n LevelEditor.__name__, 'selected_level', self.set_current_level)\n self.settings = MainSettingsController()\n self.rendered = False\n self._render()\n \n # create things\n self.create_actions()\n self.set_icons()\n \n timer.get_timer().tick_elapsed.connect(self.map_view.update_view)\n\n def on_property_tab_select(self, visible):\n if visible:\n self.map_view.set_mode(EditMode.NONE)\n\n def on_region_tab_select(self, visible):\n if visible:\n self.map_view.set_mode(EditMode.REGIONS)\n\n def on_units_tab_select(self, visible):\n if visible:\n self.map_view.set_mode(EditMode.UNITS)\n\n def on_group_tab_select(self, visible):\n if visible:\n self.map_view.set_mode(EditMode.GROUPS)\n\n def create_edit_dock(self):\n self.docks = {}\n\n self.docks['Properties'] = Dock(\n 'Properties', self, self.on_property_tab_select)\n self.properties_menu = PropertiesMenu(self.state_manager)\n self.docks['Properties'].setWidget(self.properties_menu)\n self.docks['Regions'] = Dock(\n 'Regions', self, self.on_region_tab_select)\n self.region_painter_menu = RegionMenu(\n self.state_manager, self.map_view)\n self.docks['Regions'].setWidget(self.region_painter_menu)\n self.docks['Units'] = Dock('Units', self, self.on_units_tab_select)\n self.unit_painter_menu = UnitPainterMenu(\n self.state_manager, self.map_view)\n self.docks['Units'].setWidget(self.unit_painter_menu)\n self.docks['Groups'] = Dock('Groups', self, self.on_group_tab_select)\n self.group_painter_menu = UnitGroupMenu(self.state_manager)\n self.docks['Groups'].setWidget(self.group_painter_menu)\n\n for title, dock in self.docks.items():\n dock.setAllowedAreas(Qt.RightDockWidgetArea)\n dock.setFeatures(QDockWidget.NoDockWidgetFeatures)\n self.addDockWidget(Qt.RightDockWidgetArea, dock)\n\n self.tabifyDockWidget(self.docks['Properties'], self.docks['Regions'])\n self.tabifyDockWidget(self.docks['Regions'], self.docks['Units'])\n self.tabifyDockWidget(self.docks['Units'], self.docks['Groups'])\n\n for title, dock in self.docks.items():\n dock.show()\n\n self.docks['Properties'].raise_()\n self.map_view.set_mode(EditMode.NONE)\n\n def create_statusbar(self):\n self.status_bar = self.statusBar()\n self.position_bar = QLabel(\"\", self)\n self.position_bar.setFrameStyle(QFrame.Panel | QFrame.Sunken)\n self.position_bar.setMinimumWidth(100)\n self.status_bar.addPermanentWidget(self.position_bar)\n\n def set_position_bar(self, pos):\n if pos:\n self.position_bar.setText(\"Position (%d, %d)\" % (pos[0], pos[1]))\n else:\n self.position_bar.setText(\"\")\n\n def set_message(self, msg):\n if msg:\n self.status_bar.showMessage(msg)\n else:\n self.status_bar.clearMessage()\n\n def set_current_level(self, level_nid):\n level = DB.levels.get(level_nid)\n self.current_level = level\n self.map_view.set_current_level(level)\n self.update_view()\n\n def update_view(self):\n if self.rendered: # (see _render() below)\n self.map_view.update_view()\n\n def create_actions(self):\n # menu actions\n self.zoom_in_act = QAction(\n \"Zoom in\", self, shortcut=\"Ctrl++\", triggered=self.map_view.zoom_in)\n self.zoom_out_act = QAction(\n \"Zoom out\", self, shortcut=\"Ctrl+-\", triggered=self.map_view.zoom_out)\n \n # toolbar actions\n self.back_to_main_act = QAction(\n \"Back\", self, shortcut=\"E\", triggered=self.edit_global)\n \n def set_icons(self):\n theme = self.settings.get_theme(0)\n if theme == 0:\n icon_folder = 'icons/icons'\n else:\n icon_folder = 'icons/dark_icons'\n self.back_to_main_act.setIcon(QIcon(f'{icon_folder}/left_arrow.png'))\n \n def create_toolbar(self, toolbar):\n toolbar.addAction(self.back_to_main_act, 0)\n\n def create_menus(self, app_menu_bar):\n edit_menu = app_menu_bar.getMenu('Edit')\n edit_menu.addSeparator()\n edit_menu.addAction(self.zoom_in_act)\n edit_menu.addAction(self.zoom_out_act)\n\n def edit_global(self):\n self.state_manager.change_and_broadcast('main_editor_mode', MainEditorScreenStates.GLOBAL_EDITOR)\n\n def _render(self):\n self.map_view = NewMapView(self)\n self.setCentralWidget(self.map_view)\n\n self.create_edit_dock()\n self.create_statusbar()\n\n self.map_view.update_view()\n # needed to prevent some race conditions in initializing different components\n self.rendered = True\n\n\n# Testing\n# run \"python -m app.editor.level_editor.level_editor\" from main directory\nif __name__ == '__main__':\n import sys\n from PyQt5.QtWidgets import QApplication\n app = QApplication(sys.argv)\n RESOURCES.load('default.ltproj')\n DB.load('default.ltproj')\n state_manager = EditorStateManager()\n state_manager.state.selected_level = 0\n window = LevelEditor(state_manager)\n window.state_manager.change_and_broadcast('selected_level', '0')\n window.show()\n app.exec_()\n","sub_path":"app/editor/level_editor/level_editor.py","file_name":"level_editor.py","file_ext":"py","file_size_in_byte":6451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"240044921","text":"#!/usr/bin/env python\n# coding=UTF-8\n\n# Via Steve Losh\n# http://stevelosh.com/blog/2010/02/my-extravagant-zsh-prompt/\n\n# Run:\n# chmod +x .dotfiles/lib/battery.py\n# to get this file to run correctly\n\nimport math, subprocess\n\np = subprocess.Popen([\"ioreg\", \"-rc\", \"AppleSmartBattery\"], stdout=subprocess.PIPE)\noutput = p.communicate()[0]\n\no_max = [l for l in output.splitlines() if 'MaxCapacity' in l][0]\no_cur = [l for l in output.splitlines() if 'CurrentCapacity' in l][0]\n\nb_max = float(o_max.rpartition('=')[-1].strip())\nb_cur = float(o_cur.rpartition('=')[-1].strip())\n\ncharge = b_cur / b_max\ncharge_threshold = int(math.ceil(10 * charge))\n\n# Output\ncount = int(math.ceil(charge_threshold)) * u'▸'\nout = (u'⚡').encode('utf-8')\nimport sys\n\ncolor_green = '%{\u001B[32m%}'\ncolor_yellow = '%{\u001B[1;33m%}'\ncolor_red = '%{\u001B[31m%}'\ncolor_reset = '%{\u001B[00m%}'\n\ncolor_out = (\n color_green if len(count) > 6\n else color_yellow if len(count) > 4\n else color_red\n)\n\nout = color_out + out + color_reset\nsys.stdout.write(out)","sub_path":"lib/battery.py","file_name":"battery.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"478157841","text":"from odoo import models, fields, api\n\n\nclass MateriasPlanEstudios(models.Model):\n _name = 'ops4g.materias_plan'\n \"\"\"name = fields.Many2one(comodel_name='ops4g.planestudios', \n string='Plan de estudios',\n required=True\n )\"\"\"\n x_materia_ids = fields.Many2one(comodel_name='op.subject',\n string='Materia',\n required=True,\n ondelete='cascade')\n\n periodo_academico = fields.Integer(string='Periodo',\n required=True)\n\n x_carrera_ids = fields.Many2one(comodel_name='ops4g.carrera',\n string='Carrera',\n ondelete='cascade')\n\n x_plan_estudios_ids = fields.Many2one(comodel_name='ops4g.planestudios',\n string='Plan de estudios',\n readonly=True,\n ondelete='cascade')\n\n materia_pre_id = fields.Many2one('op.subject', string='Pre-requisito', required=False)\n bloque = fields.Selection(\n [\n (1, '1er Ciclo'),\n (2, '2do Ciclo'),\n (3, '3er Ciclo'),\n ]\n , string='Ciclo de formación', default=1)\n\n _sql_constraints = [\n ('unique_periodo_carrera_materia', 'UNIQUE(x_materia_ids,x_carrera_ids,periodo_academico)',\n 'No es posible ingresar, materias duplicadas'),\n ]\n\n#####\n","sub_path":"escolares/models/datos_materias_plan.py","file_name":"datos_materias_plan.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"205596571","text":"a= str(input('Gimme a string!')).lower()\nb= str(input('Gimme another string (of the same length)!')).lower()\ni=0\nh=0\nif(len(a) == len(b)):\n print('Yay, they\\'re the same length.')\n for i in range(0, len(a)):\n if(a[i] in b):\n print ('Found ' +a[i]+ ' in '+b)\n h+=1\n else:\n print ('Could not find ' +a[i]+ ' in ' + b)\n if(h >= len(a)):\n print(a + ' is a permutation of '+b+'!')\n else:\n print(a + ' is not a permutation of '+b+'!')\nelse:\n print('Those 2 aren\\'t the same length.')\n","sub_path":"week2/stephd_permutation_test.py","file_name":"stephd_permutation_test.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"104761838","text":"#Author Scon\n# -*- coding utf-8 -*-\n# help()\n\n\nimport sys\nimport os\n\n\nclass File_Coversion:\n 'Convert File'\n\n def __init__(self, file):\n self.file = file\n\n def to_unix(self):\n\n unixflie = os.path.splitext(self.file)[0] + '_unix.txt'\n with open(self.file, 'r') as fwindowsobj:\n with open(unixflie, 'w') as funixobj:\n for line in fwindowsobj:\n lines = line.rstrip() + '\\n'\n funixobj.write(lines)\n\n def to_windows(self):\n\n windowsfile = os.path.splitext(self.file)[0] + '_windows.txt'\n with open(self.file, 'r') as funixobj:\n with open(windowsfile, 'w') as fwindowsobj:\n for line in funixobj:\n lines = line.rstrip() + '\\r\\n'\n fwindowsobj.write(lines)\n\n\nif __name__ == '__main__':\n while True:\n tips = \"\"\"\n [1] Unix To Windows\n [2] Windows To Unix\n [3] Exit\n Please enter your choice (1/2/3):\n \"\"\"\n try:\n choice = input(tips).strip()[0]\n except IndexError:\n print(\"\\033[31;1mInvaild Input!\\033[0m\")\n except KeyboardInterrupt:\n break\n else:\n # 转换为windows格式\n if choice == '1':\n windows_file = input(\"\\t \\033[34;1mPlease enter you want to convert file :\\033[0m\")\n conversion = File_Coversion(windows_file)\n conversion.to_windows()\n print(\"\\t\\033[31;1mConvert Successful!\\033[0m\")\n # 转换为Unix格式\n elif choice == '2':\n unix_file = input(\"\\t \\033[34;1mPlease enter you want to convert file :\\033[0m\")\n conversion = File_Coversion(unix_file)\n conversion.to_unix()\n print(\"\\t \\033[31;1mConvert Successful!\\033[0m\")\n elif choice == \"3\":\n exit()\n else:\n print('\\t',\"\\033[31;1mInvaild Input!\\033[0m\")\n","sub_path":"Python_Practical/Unix2Windows.py","file_name":"Unix2Windows.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"538542004","text":"#1.factorial of a number with and without recursion\n#with recursion\ndef recursion(n):\n if n==0:\n print(\"Enter positive integer number\")\n elif n==1:\n return n\n else:\n return n * recursion(n-1)\nprint(recursion(3))\n\n#without recursion\n#a\na=1\nfor x in range(1,5):\n a *= x\nprint(a)\n\n#b\ni=1\nmul=1\nwhile i<=5:\n mul *= i\n i+=1\nprint(mul)","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"107015754","text":"#!/usr/bin/env python\n#\n\n\"\"\"\n\n\"\"\"\n\n\nimport os\nimport sys\nimport urllib\nimport logging\nimport re # For regular expressions\n\nfrom google.appengine.api import search\nfrom google.appengine.api import users\nfrom google.appengine.ext import ndb\nfrom google.appengine.api import memcache\n\nimport config\nfrom jinja_env import JINJA_ENVIRONMENT\nimport models.models as models\nfrom handlers.BaseHandler import BaseHandler\n\n\n# Request Handler\nclass DumpDataHandler(BaseHandler):\n def __init__(self, *args, **kwargs):\n super(DumpDataHandler, self).__init__(*args, **kwargs)\n self.m_htmlTemplate = './templates/dump_data.html'\n self.m_batchSize = 100\n\n def get(self):\n previousPage, currentPage, nextPage = \\\n self.getPrevCurrNextPages(self.request.get('page'))\n\n self.setBatchSize(self.request.get('size'))\n\n \n # Get data if the user is logged in \n data = [] \n if users.get_current_user():\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n\n # Query for all english phrases whose ancestor's key matches the language.\n english_phrases_query = models \\\n .EnglishPhrase \\\n .query(ancestor=models.phrase_key(config.ENGLISH)) \\\n .order(-models.EnglishPhrase.date)\n entryKeys = english_phrases_query.fetch(\\\n self.m_batchSize, \n keys_only=True, \n offset=self.m_batchSize*previousPage)\n\n # Query for translations and user names \n for k in entryKeys:\n data.append({\n 'entry': self.getPhrase(k).content, \n 'definition': self.getTranslation(k)\n })\n else:\n url = users.create_login_url(self.request.uri)\n url_linktext = 'Login'\n\n # Package the responses into an object for the template\n template_values = {\n 'data': data,\n 'url': url,\n 'url_linktext': url_linktext,\n 'previousPage': previousPage,\n 'currentPage': currentPage,\n 'nextPage': nextPage,\n 'size': self.m_batchSize,\n }\n\n # Write response using the template and the parameters\n template = JINJA_ENVIRONMENT.get_template(self.m_htmlTemplate)\n self.response.write(template.render(template_values))\n\n def getPrevCurrNextPages(self, page):\n if page:\n try:\n currentPage = int(page)\n except ValueError as e:\n currentPage = 1\n\n if currentPage <= 0:\n currentPage = 1\n else:\n currentPage = 1 \n previousPage = currentPage - 1\n nextPage = currentPage + 1\n\n return previousPage, currentPage, nextPage\n\n def setBatchSize(self, size):\n if size:\n try:\n inputSize = int(size)\n if inputSize > 0 and inputSize < self.m_batchSize:\n self.m_batchSize = inputSize\n except ValueError as e:\n pass\n\n def getPhrase(self, entryKey):\n\n # Try memcache first\n value = memcache.get(config.ENGLISH+'-'+str(entryKey.id()))\n if value is not None:\n return value\n\n # Else, use datastore \n phrase = entryKey.get()\n\n # Save to memcache for future reads\n memcache.add(key=config.ENGLISH+'-'+str(entryKey.id()), value = phrase)\n\n return phrase\n\n \n def getTranslation(self, entryKey):\n\n # Try memcache first\n value = memcache.get(config.KOREAN+'-'+str(entryKey.id()))\n if value is not None:\n return value\n\n # Try datastore\n translation_query = models.KoreanPhrase.query(ancestor=entryKey)\n k_phrases = translation_query.fetch(1)\n translation = ''\n if len(k_phrases) > 0:\n translation = k_phrases[0].content\n\n # Save to memcache for future reads\n memcache.add(key=config.KOREAN+'-'+str(entryKey.id()), value = translation)\n\n return translation\n \n","sub_path":"handlers/DumpDataHandler.py","file_name":"DumpDataHandler.py","file_ext":"py","file_size_in_byte":4158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"314776770","text":"__author__ = 'Konstantin Dmitriev'\n\nfrom gettext import gettext as _\nfrom optparse import OptionParser\nimport os\nfrom renderchan.core import RenderChan\nfrom renderchan.file import RenderChanFile\nimport sys\n\n\ndef process_args():\n parser = OptionParser(\n usage=_(\"\"\"\n %prog [options] [FILE] \"\"\"))\n\n parser.add_option(\"--action\", dest=\"action\",\n action=\"store\",\n help=_(\"Action: render|merge|snapshot.\"))\n parser.add_option(\"--profile\", dest=\"profile\",\n action=\"store\",\n help=_(\"Profile.\"))\n parser.add_option(\"--format\", dest=\"format\",\n action=\"store\",\n help=_(\"Format.\"))\n parser.add_option(\"--start\", dest=\"start\",\n action=\"store\", default=None,\n help=_(\"Start frame.\"))\n parser.add_option(\"--end\", dest=\"end\",\n action=\"store\", default=None,\n help=_(\"End frame.\"))\n parser.add_option(\"--stereo\", dest=\"stereo\",\n action=\"store\", default=\"\",\n help=_(\"Stereo configuration.\"))\n parser.add_option(\"--compare-time\", dest=\"compare_time\",\n action=\"store\",\n help=_(\"Don't render if there is an existing file and it is newer than specified time.\"))\n parser.add_option(\"--target-dir\", dest=\"snapshot_target\",\n action=\"store\", default=None,\n help=_(\"Target directory for snapshots.\"))\n\n options, args = parser.parse_args()\n\n # override defaults with settings from file\n if args:\n options.filename=os.path.abspath(args[0])\n else:\n print(\"ERROR: Please provide input filename\", file=sys.stderr)\n exit(1)\n\n return options, args\n\ndef updateCompletion(value):\n print(\"Rendering: %s\" % (value*100))\n\n\ndef main(argv):\n options, args = process_args()\n\n renderchan = RenderChan()\n renderchan.projects.readonly = True\n\n if options.profile:\n renderchan.setProfile(options.profile)\n if options.stereo in (\"left\",\"l\"):\n renderchan.setStereoMode(\"left\")\n elif options.stereo in (\"right\",\"r\"):\n renderchan.setStereoMode(\"right\")\n if options.compare_time:\n compare_time=float(options.compare_time)\n else:\n compare_time=None\n if not ( options.action and options.action in ['render','merge','snapshot'] ):\n options.action = 'render'\n\n if options.action != 'snapshot':\n taskfile = RenderChanFile(options.filename, renderchan.modules, renderchan.projects)\n taskfile.setFormat(options.format)\n\n if options.action == 'merge' and options.stereo and ( options.stereo[0:1]==\"v\" or options.stereo[0:1]==\"h\" ):\n pass\n else:\n (isDirty, tasklist, maxTime)=renderchan.parseDirectDependency(taskfile, compare_time)\n if isDirty:\n print(\"ERROR: There are unrendered dependencies for this file!\", file=sys.stderr)\n print(\" (Project tree changed or job started too early?)\", file=sys.stderr)\n print(\" Aborting.\", file=sys.stderr)\n exit(1)\n\n if options.action == 'render':\n if options.start and options.end:\n renderchan.job_render(taskfile, taskfile.getFormat(), updateCompletion, int(options.start), int(options.end), compare_time)\n else:\n renderchan.job_render(taskfile, taskfile.getFormat(), updateCompletion, compare_time)\n elif options.action == 'merge':\n if options.stereo and ( options.stereo[0:1]==\"v\" or options.stereo[0:1]==\"h\" ):\n renderchan.job_merge_stereo(taskfile, options.stereo)\n else:\n renderchan.job_merge(taskfile, taskfile.getFormat(), renderchan.projects.stereo, compare_time)\n elif options.action == 'snapshot':\n if not options.snapshot_target:\n print(\"ERROR: Please specify output filename using --target-dir option.\", file=sys.stderr)\n renderchan.job_snapshot(options.filename, os.path.abspath(options.snapshot_target))\n\n","sub_path":"renderchan/joblauncher.py","file_name":"joblauncher.py","file_ext":"py","file_size_in_byte":3976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"566494649","text":"#!/usr/bin/python3\n# -*- coding=utf-8 -*-\nimport socket\nimport threading\nimport struct\nimport json\nimport psutil\nimport sys\n\nfrom config import Config, Constant\nfrom log import logger\n \nHOST = Config.status_server_ip\ntry:\n PORT = int(Config.status_server_port)\nexcept:\n logger.error('配置文件meta.ini中的status_server【section】port参数配置为非数字,请检查配置文件。')\n sys.exit()\nADDR = (HOST, PORT)\n \nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ntry:\n sock.connect(ADDR)\nexcept OSError:\n logger.error('该请求的地址{}无效,与Stats Server连接失败。'.format(ADDR))\n# sys.exit()\n\nmajor = Constant.MAJOR_VERSION\nminor = Constant.MINOR_VERSION\nsrc_type = Constant.METADATA_TYPE\ndst_type = Constant.STATUS_TYPE\nsrc_id = int(0x08000000)\ndst_id = int(0x0a000000) # Status Server \ncommand = int(0x00030001)\nfmt = '!I4BII16xI4xQ8xI4x512s'\n\ninfoDict = {}\n\ndef get_info():\n infoDict['cpu_percent'] = psutil.cpu_percent(1)\n infoDict['mem_info'] = list(psutil.virtual_memory())\n infoDict['disk_info'] = list(psutil.disk_usage('/'))\n return infoDict\n\ndef generate_hb():\n d = get_info()\n ms_info = json.dumps(d)\n total = count = len(ms_info)\n length = total + Constant.HEAD_LENGTH\n data = struct.pack(fmt, length, major, minor, src_type, dst_type, src_id,\n dst_id, command, total, count, ms_info.encode('utf-8'))\n return data\n\ndef run_timer():\n data = generate_hb()\n# sock.sendall(data)\n print(data)\n global timer\n timer = threading.Timer(5.0, run_timer)\n timer.start()\n \ndef send_hb(): \n timer = threading.Timer(0, run_timer)\n timer.start()\n\nif __name__ == '__main__':\n send_hb()\n \n \n \n \n","sub_path":"backup/status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"506645823","text":"\"\"\"\nhttps://buptwc.com/2018/10/21/Leetcode-928-Minimize-Malware-Spread-II/\n\n\"\"\"\n\nimport collections\nimport itertools\n\nclass SolutionBFS:\n def minMalwareSpread(self, graph, initial):\n n = len(graph)\n sources = collections.defaultdict(list)\n # 对每个初始感染节点依次bfs,因为节点总数不超过300,所以不会超时\n for init in initial:\n visited = set(initial)\n queue = collections.deque([init])\n while queue:\n node = queue.popleft()\n for nei in range(len(graph[node])):\n if graph[node][nei] == 0:\n continue\n if nei in visited:\n continue\n visited.add(nei)\n sources[nei].append(init)\n queue.append(nei)\n # 统计出现最多次的感染节点\n count = [0] * n\n for key in sources.keys():\n #如果一个节点只有另一个能感染\n if len(sources[key]) == 1:\n count[sources[key][0]] += 1\n if max(count) == 0:\n return min(initial)\n return count.index(max(count))\n\n\n\n\n\nclass SolutionBetter:\n def minMalwareSpread(self, graph, initial) -> int:\n if not graph:\n return -1\n N = len(graph)\n clean = set(range(N)) - set(initial)\n parents = list(range(N))\n size = [1] * N\n\n def find(x):\n if parents[x] != x:\n parents[x] = find(parents[x])\n return parents[x]\n\n def union(x, y):\n a, b = find(x), find(y)\n if a != b:\n if size[a] < size[b]:\n parents[a] = b\n size[b] += size[a]\n else:\n parents[b] = a\n size[a] += size[b]\n\n for u, v in itertools.combinations(clean, 2):\n if graph[u][v]: union(u, v)\n\n table = collections.defaultdict(set)\n infectedTimes = collections.Counter()\n for u in initial:\n for v in clean:\n if graph[u][v]:\n table[u].add(find(v))\n\n for comm in table[u]:\n infectedTimes[comm] += 1\n\n count = [0] * N\n for u, comms in table.items():\n for comm in comms:\n if infectedTimes[comm] == 1:\n count[u] += size[comm]\n\n maxi = max(count)\n return count.index(maxi) if maxi != 0 else min(initial)\n\n\n\n\n\n\n","sub_path":"LeetcodeNew/python/LC_928.py","file_name":"LC_928.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"530230104","text":"# Copyright 2015 Breqwatr, Inc\n# All Rights Reserved.\n#\n# Licensed under the MIT License (the \"License\"); you may\n# not use this file except in compliance with the License.\n#\n\nimport re\nimport six\nimport time\nimport socket\nimport logging\nimport traceback\n\nfrom umemcache.exceptions import *\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass _ConnectionDeadError(Exception):\n \"\"\" Used internally to retry operations on dead connections \"\"\"\n pass\n\n\nclass Connection(object):\n\n def __init__(self, host, dead_retry=30, read_timeout=3,\n connect_timeout=0.2, flush_on_reconnect=False, **kwargs):\n self.dead_retry = dead_retry\n self.read_timeout = read_timeout\n self.connect_timeout = connect_timeout\n self.flush_on_reconnect = flush_on_reconnect\n self.weight = 1\n if isinstance(host, tuple):\n host, self.weight = host\n\n # TODO(Sergio): make this section more readable (and don't \n # require inet6 prefix?)\n # Parse connection string\n # Unix socket\n m = re.match(r'^(?Punix):(?P.*)$', host)\n if not m:\n # IPv6 address\n m = re.match(r'^(?Pinet6):'\n r'\\[(?P[^\\[\\]]+)\\](:(?P[0-9]+))?$', host)\n if not m:\n # IPv4 address\n m = re.match(r'^(?Pinet):'\n r'(?P[^:]+)(:(?P[0-9]+))?$', host)\n if not m:\n # IPv4 address (without prefix)\n m = re.match(r'^(?P[^:]+)(:(?P[0-9]+))?$', host)\n if not m:\n raise ValueError('Unable to parse connection string: \"%s\"' % host)\n\n host_info = m.groupdict()\n if host_info.get('proto') == 'unix':\n self.socket_family = socket.AF_UNIX\n self.address = host_info['path']\n else:\n if host_info.get('proto') == 'inet6':\n self.socket_family = socket.AF_INET6\n else:\n self.socket_family = socket.AF_INET\n address = host_info['host']\n port = int(host_info.get('port') or 11211)\n self.address = (address, port)\n\n self.deaduntil = 0\n self.socket = None\n self.flush_on_next_connect = False\n\n self.buffer = b''\n\n def __repr__(self):\n return '<{cls} for {address}>'.format(\n cls=self.__class__.__name__,\n address=self.address,\n )\n\n def connect(self):\n if self._dead():\n return None\n if self.socket:\n return self.socket\n connection = socket.socket(self.socket_family, socket.SOCK_STREAM)\n connection.settimeout(self.connect_timeout)\n try:\n connection.connect(self.address)\n except socket.timeout as msg:\n LOG.warning('Timeout connecting to %s: %s', self.address, msg)\n self.mark_dead()\n return None\n except socket.error:\n LOG.exception('Error connecting to %s:', self.address)\n self.mark_dead()\n return None\n connection.settimeout(self.read_timeout)\n self.socket = connection\n self.buffer = b''\n if self.flush_on_next_connect:\n self.flush()\n self.flush_on_next_connect = False\n return connection\n\n def disconnect(self):\n if self.socket:\n self.socket.close()\n self.socket = None\n LOG.info('Disconnected from %s', self.address)\n\n def mark_dead(self):\n if self._dead():\n return\n LOG.warn('Marking server %s as dead', self.address)\n self.deaduntil = time.time() + self.dead_retry\n if self.flush_on_reconnect:\n self.flush_on_next_connect = True\n self.disconnect()\n\n def _dead(self):\n if self.deaduntil > time.time():\n return True\n return False\n\n def _receive(self, length):\n \"\"\" Read directly from the underlying socket \"\"\"\n read_func = self.socket.recv\n read_buffer = self.buffer\n while len(read_buffer) < length:\n data = read_func(max(length - len(read_buffer), 4096))\n read_buffer += data\n if not data:\n raise MemcacheReadError('Expected to receive %d bytes of'\n ' data from server, but only received'\n ' %d' % (length, len(read_buffer)))\n self.buffer = read_buffer[length:]\n return read_buffer[:length]\n\n def flush(self):\n raise NotImplementedError\n\n\nclass AsciiConnection(Connection):\n \"\"\" Used for communication with memcache using its ascii protocol \"\"\"\n def __init__(self, *args, **kwargs):\n super(AsciiConnection, self).__init__(*args, **kwargs)\n\n def _send(self, command):\n \"\"\" Given command (in bytes!), append '\\r\\n' and send \"\"\"\n # command_bytes = command.encode() if six.PY3 else command\n LOG.debug(\"Sending command to server %s: %r\", self.address, command)\n self.socket.sendall(command + b'\\r\\n')\n\n def _encode_cmd(self, cmd, key, headers=None, noreply=False, value=None):\n # Note(Sergio): about usage of this function:\n # 'cmd' should be bytes already from Client or this class\n # 'key' should be bytes already from Client\n # 'headers' is built by the caller of _encode_cmd. It is a string\n # because you can't do b'%s %d' % (...) in python3 < 3.5\n # 'value' should be bytes already from Client\n fullcmd = [cmd, b' ', key]\n\n if headers:\n if six.PY3:\n headers = headers.encode('utf-8')\n fullcmd.extend((b' ', headers))\n\n if noreply:\n fullcmd.append(b' noreply')\n\n if value is not None:\n fullcmd.extend((b'\\r\\n', value))\n return b''.join(fullcmd)\n\n def _expect(self, text, raise_exception=True):\n line = self._readline(raise_exception)\n if line != text:\n msg = 'Expected \"%s\", but received \"%s\" instead', text, line\n LOG.error(msg)\n if raise_exception:\n raise MemcacheBadResponse(msg)\n return line\n\n def _readline(self, raise_exception=True):\n read_buffer = self.buffer\n if self.socket:\n read_function = self.socket.recv\n else:\n read_function = lambda bufsize: b''\n\n while True:\n index = read_buffer.find(b'\\r\\n')\n if index >= 0:\n break\n data = read_function(4096)\n if not data:\n LOG.error('Connection to %s closed during _readline()',\n self.address)\n if raise_exception:\n raise _ConnectionDeadError()\n # raise MemcachedException('Server Dead')\n else:\n return b''\n read_buffer += data\n\n # leave any data after b'\\r\\n' in the buffer\n self.buffer = read_buffer[index + 2:]\n # exclude the newline from the return value\n return read_buffer[:index]\n\n def _get_response_header(self, cas_response=False, raise_exception=False):\n line = self._readline(raise_exception)\n LOG.debug('Response header: %r', line)\n\n # Note(Sergio): slicing a string doesn't raise indexError if the\n # length of the string is exceeded on the upper end.\n # For example, b'END'[:5] works fine, and it's\n # faster than using .startswith()\n if line[:5] == b'VALUE':\n if cas_response:\n resp, key, flags, length, cas_id = line.split()\n return (key, int(flags), int(length), int(cas_id))\n else:\n resp, key, flags, length = line.split()\n return (key, int(flags), int(length))\n elif line == b'END':\n # Value not found\n if cas_response:\n return (None, None, None, None)\n else:\n return (None, None, None)\n return self._handle_unexpected_response(line)\n\n def _receive_value(self, length):\n length += 2 # include \\r\\n\n data = self._receive(length)\n if len(data) != length:\n raise MemcacheReadError('received %d bytes but expected %d bytes'\n % (len(data), length))\n data = data[:-2] # strip \\r\\n\n return data\n\n def _handle_unexpected_response(self, response):\n if response.startswith(b'SERVER_ERROR'):\n msg = response.replace(b'SERVER_ERROR ', b'', 1)\n raise MemcacheServerError(msg)\n elif response.startswith(b'CLIENT_ERROR'):\n msg = response.replace(b'CLIENT_ERROR ', b'', 1)\n raise MemcacheClientError(msg)\n elif response == b'ERROR':\n LOG.error('\"ERROR\" response from memcache. This is most likely'\n ' a bug in umemcache.')\n LOG.error('Please consider reporting this to the developers!')\n\n # Ensure the exception gets logged\n try:\n raise MemcacheBadResponse('Unexpected response from '\n 'memcache: %s' % response)\n except MemcacheException:\n LOG.exception('Unexpected response:')\n raise\n\n def get(self, cmd, key):\n def _unsafe_get():\n try:\n self._send(self._encode_cmd(cmd, key))\n\n if cmd == b'gets':\n raise NotImplementedError\n # (response_key, flags, length,\n # cas_id) = self._get_response_header(cas_response=True)\n # if response_key and self.cache_cas:\n # self.cas_ids[response_key] = cas_id\n else:\n response_key, flags, length = self._get_response_header()\n\n # Key not found\n if not response_key:\n return None\n\n value = self._receive_value(length)\n self._expect(b'END')\n\n return (value, flags)\n\n except socket.error as msg:\n LOG.warn('Socket error: %s', msg)\n LOG.debug(traceback.format_exc())\n self.mark_dead()\n return None\n\n try:\n return _unsafe_get()\n except _ConnectionDeadError:\n # retry once\n try:\n if self.connect():\n return _unsafe_get()\n except (_ConnectionDeadError, socket.error):\n LOG.warn('Server %s is unreachable', self.address)\n LOG.debug(traceback.format_exc())\n self.mark_dead()\n return None\n\n def get_multi(self, keys):\n command = self._encode_cmd(b'get', b' '.join(keys))\n\n result = {}\n get_header = self._get_response_header\n try:\n # Send the get command we just built\n self._send(command)\n\n # parse the responses\n key, flags, length = get_header(\n raise_exception=False,\n )\n while key is not None:\n value = self._receive_value(length)\n result[key] = (value, flags)\n\n # get the next value\n key, flags, length = get_header(\n raise_exception=False,\n )\n except (socket.error, _ConnectionDeadError):\n LOG.warn(\"Socket error: %s\", msg)\n LOG.debug(traceback.format_exc())\n self.mark_dead()\n return result\n\n def set(self, cmd, key, value, flags, time):\n # if cmd == b'cas' and key not in self.cas_ids:\n # return self.set(b'set', key, value, flags, time)\n\n def _unsafe_set():\n \"\"\"Unsafe because we're not checking any arguments\n for correctness.\n For example if you pass an invalid command, like cmd='bananas',\n bad things happen\n \"\"\"\n if cmd == b'cas':\n raise NotImplementedError\n else:\n header = '%d %d %d' % (flags, time, len(value))\n fullcmd = self._encode_cmd(cmd, key, header, False, value)\n\n try:\n self._send(fullcmd)\n response = self._readline()\n if response == b'STORED':\n return True\n elif response == b'NOT_STORED':\n return False\n return self._handle_unexpected_response(response)\n except socket.error as msg:\n LOG.warn('Socket error: %s', msg)\n LOG.debug(traceback.format_exc())\n self.mark_dead()\n\n return False\n\n try:\n return _unsafe_set()\n except _ConnectionDeadError:\n # retry once\n try:\n if self.connect():\n return _unsafe_set()\n except (_ConnectionDeadError, socket.error):\n LOG.warn('Server %s is unreachable', self.address)\n LOG.debug(traceback.format_exc())\n self.mark_dead()\n return False\n\n def set_multi(self, cmd, key_value_flags, time):\n \"\"\" perform 'cmd' on the given list where key_value_flags is an\n iterable of (key, value, value_flags) tuples\n \"\"\"\n commands = []\n encode = self._encode_cmd\n for key, value, flags in key_value_flags:\n header = '%d %d %d' % (flags, time, len(value))\n command = encode(cmd, key, header, False, value)\n commands.append(command)\n\n commands = b'\\r\\n'.join(commands)\n\n success = True\n try:\n self._send(commands)\n for _ in key_value_flags:\n success &= self._readline() == b'STORED'\n except (socket.error, _ConnectionDeadError):\n LOG.warn(\"Socket error: %s\", msg)\n LOG.debug(traceback.format_exc())\n self.mark_dead()\n return False\n return success\n\n def incr_decr(self, cmd, key, delta):\n try:\n header = '%d' % delta\n self._send(self._encode_cmd(cmd, key, header))\n\n response = self._readline()\n return int(response)\n except ValueError:\n if response == b'NOT_FOUND':\n return False\n return self._handle_unexpected_response(response)\n except socket.error as msg:\n LOG.warn(\"Socket error: %s\", msg)\n LOG.debug(traceback.format_exc())\n self.mark_dead()\n return False\n\n def touch(self, key, time):\n try:\n header = '%d' % time\n self._send(self._encode_cmd(b'touch', key, header))\n return self._readline() == b'TOUCHED'\n except socket.error as msg:\n LOG.warn(\"Socket error: %s\", msg)\n LOG.debug(traceback.format_exc())\n self.mark_dead()\n return False\n\n def delete(self, key):\n try:\n self._send(self._encode_cmd(b'delete', key))\n return self._readline() == b'DELETED'\n except socket.error as msg:\n LOG.warn(\"Socket error: %s\", msg)\n LOG.debug(traceback.format_exc())\n self.mark_dead()\n return False\n\n def delete_multi(self, keys):\n # example keys=['a', 'b', 'c']\n # the join produces \"a\\r\\ndelete b\\r\\ndelete c\"\n cmd = b'delete ' + (b'\\r\\ndelete '.join(keys))\n success = True\n try:\n self._send(cmd)\n for _ in keys:\n success &= self._readline() == b'DELETED'\n except (socket.error, _ConnectionDeadError):\n LOG.warn(\"Socket error: %s\", msg)\n LOG.debug(traceback.format_exc())\n self.mark_dead()\n return False\n return success\n\n def flush(self):\n self._send(b'flush_all')\n self._expect(b'OK')\n\n\nclass BinaryConnection(Connection):\n \"\"\" Used for communication with memcache using its binary protocol \"\"\"\n def __init__(self, *args, **kwargs):\n super(BinaryConnection, self).__init__(self, *args, **kwargs)\n\n\ndef get_connection_class(protocol):\n if protocol == 'binary':\n return BinaryConnection\n elif protocol == 'ascii':\n return AsciiConnection\n else:\n raise ValueError('Unknown protocol: %r' % protocol)\n","sub_path":"umemcache/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":16510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"388741764","text":"import pymongo\nimport random\n\nclient= pymongo.MongoClient(\"mongodb+srv://saksham:saksham@cluster0-nvuma.mongodb.net/test?retryWrites=true&w=majority\")\ndb = client.get_database('db')\n\n\ndef botResponse(input, uname):\n res = db.chats.find_one({\"user\":input})\n if(db.chat_history.find_one( {\"username\":uname}) == None):\n db.chat_history.insert_one( {\"username\":uname,\"bot\":[\"Hey,\"+uname+\"!\"],\"user\":[input] } )\n else:\n db.chat_history.update_one( {\"username\":uname}, {\"$push\":{\"user\":input}} )\n if(res == None):\n reply = \"Out of context!\"\n db.chat_history.update_one( {\"username\":uname}, {\"$push\":{\"bot\":reply}} )\n return reply\n #indx=random.randrange(0, len(res[\"response\"]))\n db.chat_history.update_one( {\"username\":uname}, {\"$push\":{\"bot\":res[\"response\"]}} ) \n return res[\"response\"] \n\ndef chat_history(username):\n chat = db.chat_history.find_one( {\"username\":username} )\n if(chat == None):\n return \"\"\n return chat ","sub_path":"reply_rec.py","file_name":"reply_rec.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"215390710","text":"##import imaplib\n##import email\n##\n##con=imaplib.IMAP4_SSL(host='outlook.office365.com')\n##con.login('pengfei.xu@ericsson.com','Morning173')\n##\n##con.select()\n##type1,data1=con.fetch('1','(RFC822)')\n##msg=email.message_from_string(data1[0][1].decode('utf-8'))\n##\n##for part in msg.walk():\n## if not part.is_multipart(): \n## print(part.get_payload(decode=True).decode('utf-8')) \n\n\nimport csv\nwith open(r'C:\\Users\\exuupei\\Desktop\\1.csv', 'r') as csvfile:\n \n fieldnames = ['first_name', 'last_name']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n writer.writeheader()\n writer.writerow({'first_name': 'Baked', 'last_name': 'Liang'})\n writer.writerow({'first_name': 'Lovely', 'last_name': 'Spam'})\n writer.writerow({'first_name': 'Wonderful', 'last_name': 'Spam'})\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"22553186","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 30 17:49:15 2018\n\n@author: danpal\n\"\"\"\n\nimport pyknotid.spacecurves as sc\nimport pandas as pd\nfrom Bio.PDB import PDBParser\nimport Knot\nimport centroidClosure\nimport alexanderBriggs\n\n\ndef get_struct(ID,\n file,\n parser=PDBParser(structure_builder=Knot.KnotBuilder())):\n return parser.get_structure(ID, file)\n\n\ndef reduce_prot(ID,\n chain,\n file,\n parser=PDBParser(structure_builder=Knot.KnotBuilder())):\n pdb = parser.get_structure(ID, file)\n pdb.calc_bb_coords()\n prot = pdb.bb_coords[chain]\n close = centroidClosure.centroidClosure(prot)\n return alexanderBriggs.alexanderBriggs(close)\n\n\ndef close_prot(ID,\n chain,\n file,\n parser=PDBParser(structure_builder=Knot.KnotBuilder())):\n pdb = parser.get_structure(ID, file)\n pdb.calc_bb_coords()\n prot = pdb.bb_coords[chain]\n return centroidClosure.centroidClosure(prot)\n\n\ndef identify_knot(coords):\n k = sc.Knot(coords)\n k.close()\n return k.identify()\n\n\ndef test_knot_db(knots=['(+)3_1', '(-)3_1', '4_1', '(-)5_2', '(+)6_1'],\n method=close_prot):\n parser = PDBParser(structure_builder=Knot.KnotBuilder())\n for knot in knots:\n knot_list = pd.read_csv('../db/' + knot + '/List' + knot, sep=' ',\n header=None, names=['ID', 'chain'])\n knot_col = []\n for kn in knot_list.values:\n ID, chain = kn\n try:\n red = method(ID,\n chain,\n '../db/' + knot + '/pdbs/pdb' + ID + '.ent',\n parser)\n except FileNotFoundError:\n print('No such file or directory: ../db/' + knot +\n '/pdbs/pdb' + ID + '.ent\\n')\n knot_col.append('file not found')\n continue\n if len(red) == 2:\n print(ID, chain, 'reduced to 2 atoms\\n')\n knot_col.append('red to 2 atoms')\n continue\n knot_type = identify_knot(red)\n print(ID, chain, knot_type[0], '\\n')\n knot_col.append(knot_type)\n\n knot_list_new = knot_list.assign(knot=pd.Series(knot_col))\n knot_list_new.to_csv('../db/' + knot + '/knot' + knot, index=False)\n","sub_path":"knot/test_knot.py","file_name":"test_knot.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"174596903","text":"import pytest\nfrom caesar_cypher import CaesarCypher\n\ntestdata = [\n ('abcd', 1, False, 'bcde'),\n ('THE QUICK BROWN FOX JUMPS OVER THE LAZY DOG', 3, True,\n 'QEB NRFZH YOLTK CLU GRJMP LSBO QEB IXWV ALD'),\n ('ABCDEFGHIJKLMNOPQRSTUVWXYZ', 23, False, 'XYZABCDEFGHIJKLMNOPQRSTUVW'),\n ('I came, I saw, I conquered. (Julius Caesar)', 3, True,\n 'F zxjb, F pxt, F zlknrboba. (Grifrp Zxbpxo)'),\n ('Bumbofbkzb fp qeb qbxzebo lc xii qefkdp. (Grifrp Zxbpxo)', 3, False,\n 'Experience is the teacher of all things. (Julius Caesar)'),\n ('Ljnbja\\'b fron vdbc kn jkxen bdbyrlrxw. (Sdurdb Ljnbja)', 9, True,\n 'Caesar\\'s wife must be above suspicion. (Julius Caesar)'),\n ('I found Rome a city of bricks and left it a city of marble. (Augustus)', 1, False,\n 'J gpvoe Spnf b djuz pg csjdlt boe mfgu ju b djuz pg nbscmf. (Bvhvtuvt)'),\n ('Young men, hear an old man to whom old men hearkened when he was young. (Augustus)', 40,\n False, 'Mcibu asb, vsof ob czr aob hc kvca czr asb vsofysbsr kvsb vs kog mcibu. (Oiuighig)')\n]\n\n\nclass TestCaesarCypher(object):\n caesar_cypher = CaesarCypher()\n\n @pytest.mark.parametrize(\"message,shift,left,expected\", testdata)\n def test_shift_message(self, message, shift, left, expected):\n message_shifted = self.caesar_cypher.shift_message(message, shift, left)\n\n assert message_shifted == expected\n","sub_path":"python/caesar_cypher/caesar_cypher_test.py","file_name":"caesar_cypher_test.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"94844720","text":"# main program\nfrom tkinter import *\nimport tkinter as tk\nfrom tkinter import messagebox\nimport random\nimport requests\nfrom tkinter import ttk\n\nroot = tk.Tk()\nroot.title(\"Weather\")\nroot.geometry(\"800x1000\")\nroot.configure(bg=\"black\")\n\n\ndef boxes(self):\n self.entry1 = []\n self.entry2 = []\n self.entry3 = []\n self.randomlist = []\n self.winnings1= []\n self.winnings2=[]\n self.winnings3=[]\n self.earnings = [0, 0, 20, 100.50, 2384, 8584, 10000000]\n self.total = ''\n\n\nclass main:\n def __init__(self,master):\n def adding(add):\n if len(entry1) < 6 and add not in entry1:\n entry1.append(add)\n self.ent1.config(text=entry1)\n\n elif len(entry1) == 6 and len(entry2) < 6 and add not in entry2:\n entry2.append(add)\n self.ent2.config(text=entry2)\n elif len(entry2) == 6 and len(entry3) < 6 and add not in entry3:\n entry3.append(add)\n self.ent3.config(text=entry3)\n\n else:\n if len(entry3) == 6:\n messagebox.showerror(\"Error\",\"Tries are full\")\n else:\n messagebox.showerror(\"Error\",\"you can only select the same number once per entry\")\n def play():\n global total\n # if len(entry1) ==6 and len(entry2) ==6 and len(entry3) ==6:\n while len(randomlist) < 6:\n n = random.randint(1, 49)\n if n not in randomlist:\n randomlist.append(n)\n randomlist.sort()\n self.mainwin.config(text=randomlist)\n for x in randomlist:\n if x in entry1:\n winnings1.append(x)\n self.ent1_win.config(text=str(len(winnings1)) + \" R\" + str(earnings[len(winnings1)]))\n for x in randomlist:\n if x in entry2:\n winnings2.append(x)\n self.ent2_win.config(text=str(len(winnings2)) + \" R\" + str(earnings[len(winnings2)]))\n for x in randomlist:\n if x in entry3:\n winnings3.append(x)\n self.ent3_win.config(text=str(len(winnings3)) + \" R\" + str(earnings[len(winnings3)]))\n total =\" total winnings: R\" + str(earnings[len(winnings1)]+earnings[len(winnings2)]+earnings[len(winnings3)])\n self.total.config(text=total)\n # else:\n # messagebox.showerror(\"Error\",\"Please fill entries\")\n\n def convert_to_new_currency():\n if earnings[len(winnings1)] >=2 or earnings[len(winnings2)] >=2 or earnings[len(winnings3)] >= 2:\n convert_currency()\n # else:\n # messagebox.showerror(\"Error\", \"You do not have any winnings to claim\")\n\n def convert_currency():\n root = Tk()\n\n # StringVar\n results = StringVar()\n\n # code to add widgets and style window will go here\n root.geometry(\"450x550\")\n root.title(\"Currency Converter\")\n root.config(bg='GREY')\n root.resizable(0, 0)\n\n information = requests.get('https://v6.exchangerate-api.com/v6/89dcd9e8cc7777ded2575ce1/latest/USD')\n information_json = information.json()\n\n conversion_rates = information_json['conversion_rates']\n currency = []\n\n for i in conversion_rates.keys():\n currency.append(i)\n\n currency_cb = ttk.Combobox(root)\n currency_cb['values'] = currency\n currency_cb['state'] = 'readonly'\n currency_cb.set('Select Currency')\n currency_cb.place(x=10, y=280)\n\n\n Label(root, text='Winnings: ', bg='gold').place(x=65, y=330)\n ent1 = Label(root, text=int(total[20:len(total)]),bg='white')\n ent1.place(x=200, y=330)\n ent1.focus()\n Label(root, text='Converted Amount:', bg='#00A868').place(x=65, y=380)\n Label(root, text='', textvariable=results, bg='#00A868').place(x=200, y=380)\n\n def convert( to_currency, amount):\n\n amount = round(amount * conversion_rates[to_currency], 4)\n return amount\n\n def perform():\n try:\n amount = ent1\n to_curr = currency_cb.get()\n\n converted_amount = convert(to_curr, amount)\n\n results.set(converted_amount)\n except ValueError:\n if ent1 != int and ent1 != float:\n messagebox.showerror('invalid', 'Enter numbers only')\n\n except requests.exceptions.ConnectionError:\n messagebox.showerror('Internet error', 'Poor connection')\n except KeyError:\n messagebox.showerror('ERROR', 'Select Currency')\n\n # kill program\n def kill():\n return root.destroy()\n\n def clear():\n currency_cb.set('Select Currency')\n ent1.focus()\n results.set('')\n\n Button(root, text=\"CONVERT\", borderwidth=3, bg='white', command=perform).place(x=180, y=430)\n Button(root, text=\"EXIT\", borderwidth=3, bg='white', command=kill).place(x=281, y=480)\n Button(root, text=\"CLEAR\", borderwidth=3, bg='white', command=clear).place(x=100, y=480)\n\n root.mainloop() # continuously runs program in window\n\n def play_again():\n self.ent1.config(text=\"\")\n #entry1.clear()\n self.ent2.config(text=\"\")\n #entry2.clear()\n self.ent3.config(text=\"\")\n #entry3.clear()\n self.mainwin.config(text=\"\")\n #randomlist.clear()\n self.ent1_win.config(text=\"\")\n #winnings1.clear()\n self.ent2_win.config(text=\"\")\n #winnings2.clear()\n self.ent3_win.config(text=\"\")\n #winnings3.clear()\n\n def destroy():\n messagebox.showinfo(\"warning\", \"closing game\")\n master.destroy()\n\n\n # frames within master\n self.frame = Frame(master, width=700, height=300, bg=\"\", highlightbackground=\"gold\", highlightthickness=15)\n self.frame.place(x=50, y=120)\n self.frame_two = Frame(master, width=330, height=250, bg=\"\", highlightbackground=\"gold\", highlightthickness=15)\n self.frame_two.place(x=50, y=445)\n self.frame_three = Frame(master, width=330, height=250, bg=\"\", highlightbackground=\"gold\",highlightthickness=15)\n self.frame_three.place(x=420, y=445)\n self.frame_four = Frame(master, width=700, height=230, bg=\"\", highlightbackground=\"gold\", highlightthickness=15)\n self.frame_four.place(x=50, y=720)\n\n # labels for frame 2\n self.ent1 = Label(self.frame_two, text=\"\", textvariable=entry1, fg=\"gold\",bg=\"black\")\n self.ent1.place(x=50,y=30)\n self.ent2 = Label(self.frame_two,text=\"\",fg=\"gold\",bg=\"black\", textvariable=entry2)\n self.ent2.place(x=50,y=100)\n self.ent3=Label(self.frame_two,text=\"\",fg=\"gold\",bg=\"black\", textvariable=entry3)\n self.ent3.place(x=50,y=170)\n\n # labels for frame 3\n self.mainwin=Label(self.frame_three,text=randomlist.sort(),bg=\"gold\")\n self.mainwin.place(x=100,y=30)\n self.ent1_win=Label(self.frame_three,text=\"\",bg=\"gold\")\n self.ent1_win.place(x=100,y=60)\n self.ent2_win = Label(self.frame_three, text=\"\", bg=\"gold\")\n self.ent2_win.place(x=100, y=120)\n self.ent3_win = Label(self.frame_three, text=\"\", bg=\"gold\")\n self.ent3_win.place(x=100, y=180)\n\n self.total=Label(self.frame_four,text=\"\",bg=\"gold\")\n self.total.place(x=100,y=100)\n #buttons\n\n self.one = Button(master, text=\"1\", bg=\"gold\", width=1, command=lambda: adding(1))\n self.one.place(x=100, y=200)\n self.two = Button(master, text=\"2\", bg=\"gold\", width=1, command=lambda: adding(2))\n self.two.place(x=140, y=200)\n self.three = Button(master, text=\"3\", bg=\"gold\", width=1, command=lambda: adding(3))\n self.three.place(x=180, y=200)\n self.four = Button(master, text=\"4\", bg=\"gold\", width=1, command=lambda: adding(4))\n self.four.place(x=220, y=200)\n self.five = Button(master, text=\"5\", bg=\"gold\", width=1, command=lambda: adding(5))\n self.five.place(x=260, y=200)\n self.six = Button(master, text=\"6\", bg=\"gold\", width=1, command=lambda: adding(6))\n self.six.place(x=300, y=200)\n self.seven = Button(master, text=\"7\", bg=\"gold\", width=1, command=lambda: adding(7))\n self.seven.place(x=340, y=200)\n self.eight = Button(master, text=\"8\", bg=\"gold\", width=1, command=lambda: adding(8))\n self.eight.place(x=380, y=200)\n self.nine = Button(master, text=\"9\", bg=\"gold\", width=1, command=lambda: adding(9))\n self.nine.place(x=420, y=200)\n self.ten = Button(master, text=\"10\", bg=\"gold\", width=1, command=lambda: adding(10))\n self.ten.place(x=460, y=200)\n self.eleven = Button(master, text=\"11\", bg=\"gold\", width=1, command=lambda: adding(11))\n self.eleven.place(x=500, y=200)\n self.twelve = Button(master, text=\"12\", bg=\"gold\", width=1, command=lambda: adding(12))\n self.twelve.place(x=540, y=200)\n self.thirteen = Button(master, text=\"13\", bg=\"gold\", width=1, command=lambda: adding(13))\n self.thirteen.place(x=580, y=200)\n self.fourteen = Button(master, text=\"14\", bg=\"gold\", width=1, command=lambda: adding(14))\n self.fourteen.place(x=620, y=200)\n self.fifteen= Button(master, text=\"15\", bg=\"gold\", width=1, command=lambda: adding(15))\n self.fifteen.place(x=660, y=200)\n self.sixteen = Button(master, text=\"16\", bg=\"gold\", width=1, command=lambda: adding(16))\n self.sixteen.place(x=140, y=230)\n self.seventeen = Button(master, text=\"17\", bg=\"gold\", width=1, command=lambda: adding(17))\n self.seventeen.place(x=180, y=230)\n self.eighteen = Button(master, text=\"18\", bg=\"gold\", width=1, command=lambda: adding(18))\n self.eighteen.place(x=220, y=230)\n self.nineteen = Button(master, text=\"19\", bg=\"gold\", width=1, command=lambda: adding(19))\n self.nineteen.place(x=260, y=230)\n self.twenty= Button(master, text=\"20\", bg=\"gold\", width=1, command=lambda: adding(20))\n self.twenty.place(x=300, y=230)\n self.twenty_one = Button(master, text=\"21\", bg=\"gold\", width=1, command=lambda: adding(21))\n self.twenty_one.place(x=340, y=230)\n self.twenty_two= Button(master, text=\"22\", bg=\"gold\", width=1, command=lambda: adding(22))\n self.twenty_two.place(x=380, y=230)\n self.twenty_three = Button(master, text=\"23\", bg=\"gold\", width=1, command=lambda: adding(23))\n self.twenty_three.place(x=420, y=230)\n self.twenty_four= Button(master, text=\"24\", bg=\"gold\", width=1, command=lambda: adding(24))\n self.twenty_four.place(x=460, y=230)\n self.twenty_five= Button(master, text=\"25\", bg=\"gold\", width=1, command=lambda: adding(25))\n self.twenty_five.place(x=500, y=230)\n self.twenty_six= Button(master, text=\"26\", bg=\"gold\", width=1, command=lambda: adding(26))\n self.twenty_six.place(x=540, y=230)\n self.twenty_seven = Button(master, text=\"27\", bg=\"gold\", width=1, command=lambda: adding(27))\n self.twenty_seven.place(x=580, y=230)\n self.twenty_eight= Button(master, text=\"28\", bg=\"gold\", width=1, command=lambda: adding(28))\n self.twenty_eight.place(x=620, y=230)\n self.twenty_nine = Button(master, text=\"29\", bg=\"gold\", width=1, command=lambda: adding(29))\n self.twenty_nine.place(x=180, y=260)\n self.thirty = Button(master, text=\"30\", bg=\"gold\", width=1, command=lambda: adding(30))\n self.thirty.place(x=220, y=260)\n self.thirty_one = Button(master, text=\"31\", bg=\"gold\", width=1, command=lambda: adding(31))\n self.thirty_one.place(x=260, y=260)\n self.thirty_two= Button(master, text=\"32\", bg=\"gold\", width=1, command=lambda: adding(32))\n self.thirty_two.place(x=300, y=260)\n self.thirty_three= Button(master, text=\"33\", bg=\"gold\", width=1, command=lambda: adding(33))\n self.thirty_three.place(x=340, y=260)\n self.thirty_four = Button(master, text=\"34\", bg=\"gold\", width=1, command=lambda: adding(34))\n self.thirty_four.place(x=380, y=260)\n self.thirty_five= Button(master, text=\"35\", bg=\"gold\", width=1, command=lambda: adding(35))\n self.thirty_five.place(x=420, y=260)\n self.thirty_six= Button(master, text=\"36\", bg=\"gold\", width=1, command=lambda: adding(36))\n self.thirty_six.place(x=460, y=260)\n self.thirty_seven= Button(master, text=\"37\", bg=\"gold\", width=1, command=lambda: adding(37))\n self.thirty_seven.place(x=500, y=260)\n self.thirty_eight= Button(master, text=\"38\", bg=\"gold\", width=1, command=lambda: adding(38))\n self.thirty_eight.place(x=540, y=260)\n self.thirty_nine= Button(master, text=\"39\", bg=\"gold\", width=1, command=lambda: adding(39))\n self.thirty_nine.place(x=580, y=260)\n self.forty = Button(master, text=\"40\", bg=\"gold\", width=1, command=lambda: adding(40))\n self.forty.place(x=220, y=290)\n self.forty_one = Button(master, text=\"41\", bg=\"gold\", width=1, command=lambda: adding(41))\n self.forty_one.place(x=260, y=290)\n self.forty_two = Button(master, text=\"42\", bg=\"gold\", width=1, command=lambda: adding(42))\n self.forty_two.place(x=300, y=290)\n self.forty_three= Button(master, text=\"43\", bg=\"gold\", width=1, command=lambda: adding(43))\n self.forty_three.place(x=340, y=290)\n self.forty_four= Button(master, text=\"44\", bg=\"gold\", width=1, command=lambda: adding(44))\n self.forty_four.place(x=380, y=290)\n self.forty_five = Button(master, text=\"45\", bg=\"gold\", width=1, command=lambda: adding(45))\n self.forty_five.place(x=420, y=290)\n self.forty_six = Button(master, text=\"46\", bg=\"gold\", width=1, command=lambda: adding(46))\n self.forty_six.place(x=460, y=290)\n self.forty_seven = Button(master, text=\"47\", bg=\"gold\", width=1, command=lambda: adding(47))\n self.forty_seven.place(x=500, y=290)\n self.forty_eight= Button(master, text=\"48\", bg=\"gold\", width=1, command=lambda: adding(48))\n self.forty_eight.place(x=540, y=290)\n self.forty_nine = Button(master, text=\"49\", bg=\"gold\", width=31, command=lambda: adding(49))\n self.forty_nine.place(x=260, y=322)\n\n # function button\n\n self.play_ag = Button(self.frame_four, text=\"PLAY AGAIN\", bg=\"gold\", command=play_again)\n self.play_ag.place(x=50, y=50)\n self.claim = Button(self.frame_four, text=\"convert\", bg=\"gold\", command=convert_to_new_currency)\n self.claim.place(x=500, y=100)\n\n self.play=Button(self.frame_two,text=\"PLAY\",bg=\"gold\",command=play)\n self.play.place(x=200,y=170)\n self.exit = Button(self.frame_four, text=\"exit\", bg=\"red\", command=destroy)\n self.exit.place(x=600, y=100)\n\n\n\n\nm = main(root)\nroot.mainloop()","sub_path":"nate.py","file_name":"nate.py","file_ext":"py","file_size_in_byte":15272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"441111785","text":"'''\nDB init\n'''\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.orm import scoped_session\n\n# import models for db\nfrom dbmodels import Base\n\n\ndef init_engine():\n '''\n Init and configuration DB engine\n :return: db engine instance\n '''\n\n # use a SQLite database\n # dialect+driver://username:password@host:port/database\n\n try:\n db_engine = create_engine('sqlite+pysqlite:///db.sqlite', encoding='utf-8')\n\n # Create all tables stored in this metadata.\n Base.metadata.create_all(db_engine)\n\n return db_engine\n except Exception as e:\n raise Exception(\"Problem with DB\")\n\n\ndb_engine = init_engine()\n\n# Creating a DB Session\n# http://docs.sqlalchemy.org/en/rel_0_9/orm/tutorial.html#creating-a-session\ndb_session = sessionmaker(bind=db_engine)\n\nSession = scoped_session(db_session)\n\n#---------------------------\n# some dev functions\nfrom dbmodels import *\n\n#-=-=-=-=-=-=-=-=-=-=-=-\ndata = {}\n\ndef read_course(file):\n header = file.readline()[:-1]\n current_course = []\n data[header] = current_course\n \n while True:\n question = file.readline()\n if question == '\\n':\n return True\n elif question == '':\n return False\n answer = file.readline()\n current_course.append((question[:-1], answer[:-1]))\n\ndef read_text_data():\n with open('data.txt') as data_file:\n while read_course(data_file):\n pass\n\n#-=-=-=-=-=-=-=-=-=-=-=-\n\ndef fill_db():\n print('Filling db...')\n \n s = Session()\n read_text_data()\n \n for course_name in data.keys():\n print(course_name)\n \n c = Courses(courseName=course_name)\n s.add(c)\n s.commit()\n \n for word in data[course_name]:\n w = Words(word=word[1], question=word[0])\n s.add(w)\n s.commit()\n \n cw = CoursesWords(id_course=c.id, id_word=w.id)\n s.add(cw)\n s.commit()\n \n print('Done')\n\n#fill_db()\n\n#-=-=-=-=-=-=-=-=-=-=-=-\n\nfrom datetime import date\nfrom sqlalchemy import and_\nimport json\nimport base64\n\ndef get_today_date():\n start = date(2000, 1, 1)\n delta = date.today() - start\n return delta.days\n\n# workaround java's JSON parser refusal to touch unicode strings\ndef ghetto_encode(string):\n result = ''\n for c in string:\n code = ord(c)\n if code > 128:\n h = hex(code)[2:]\n if len(h) < 4:\n h = '0'*(4-len(h)) + h\n result += '~' + h\n else:\n result += c\n return result\n\ndef get_card(user_id, course_id):\n session = Session()\n \n today_date = get_today_date()\n \n # test if user has studied this course before\n sample_word = session.query(WordRecall).filter(\n and_(\n WordRecall.id_user == user_id,\n WordRecall.id_course == course_id\n )).first()\n if sample_word == None:\n # user studies this course for the first time\n print('Filling WordRecall for user', user_id, 'and course', course_id)\n \n word_ids_in_course = session.query(CoursesWords.id_word).filter(CoursesWords.id_course == course_id).all()\n \n for word_id_ in word_ids_in_course:\n word_id = word_id_[0]\n record = WordRecall(id_user=user_id, id_word=word_id, id_course=course_id, last_date=today_date, next_date=today_date)\n session.add(record)\n \n session.commit()\n\n word = session.query(WordRecall).filter(\n and_(\n WordRecall.id_user == user_id,\n WordRecall.id_course == course_id,\n WordRecall.next_date <= today_date\n )).first()\n \n if word == None:\n print('No more words to learn for user', user_id, 'course', course_id)\n return json.dumps({\n 'no_more': True\n })\n else:\n word_record = session.query(Words).filter(Words.id == word.id_word).first()\n print('Found word', word_record.word)\n return json.dumps({\n 'card_id': word.id,\n 'question': str(base64.b32encode(word_record.question.encode()))[2:-1],\n 'answer': str(base64.b32encode(word_record.word.encode()))[2:-1]\n })\n\ndef check_card(card_id, is_correct):\n session = Session()\n \n today = get_today_date()\n \n card = session.query(WordRecall).filter(WordRecall.id == card_id).first()\n #print(card)\n next_date = card.next_date\n last_date = card.last_date\n #print('next_date', next_date)\n #print('last_date', last_date)\n \n if is_correct:\n next_date = today + (next_date - last_date)*2 + 1\n print('Answer correct, card', card_id, 'next date', next_date-today, 'days ahead')\n else:\n next_date = today\n print('Answer incorrect, card', card_id, 'resetting')\n \n last_date = today\n \n #print('next_date', next_date)\n #print('last_date', last_date)\n \n card.next_date = next_date\n card.last_date = last_date\n\n session.commit()\n\nprint(get_card(3, 2))\n#print(ghetto_encode('lulфыа'))\n#check_card(2, True)\n\n","sub_path":"Server/data_import.py","file_name":"data_import.py","file_ext":"py","file_size_in_byte":4729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"34993413","text":"# coding: utf-8\nimport re\nimport json\nimport csv\nfrom datetime import datetime\nimport time\nimport scrapy\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport logging\n\n\nclass WukongSelSpider(scrapy.Spider):\n name = 'WukongSel'\n home_url = 'https://www.wukong.com/'\n insurance = 'https://www.wukong.com/tag/6213187423061412353/'\n invest = 'https://www.wukong.com/tag/6213187420293171713/'\n fund = 'https://www.wukong.com/tag/6213187421769566721/'\n stock = 'https://www.wukong.com/tag/6213187421031369217/'\n plicai = 'https://www.wukong.com/tag/6213185666709195265/'\n licai = 'https://www.wukong.com/tag/6213187422323214849/'\n finance = 'https://www.wukong.com/tag/6213185657561418242/'\n economy = 'https://www.wukong.com/tag/6213187411782928897/'\n america = 'https://www.wukong.com/tag/6213187424823020033/'\n hongkong = 'https://www.wukong.com/tag/6347928532739426818/'\n driver = None\n faq_list = []\n count = 0\n cookie = 'ga=GA1.2.221268613.1520582867; tt_webid=6568660869164287501; tt_webid=6568660869164287501; _gid=GA1.2.1689909911.1529385541; sessionid=4b140a8d7d7b641997fb4a32d5396a50; sessionid=4b140a8d7d7b641997fb4a32d5396a50; uid_tt=0222855da1baf6cd7da7cc63a2a6ad05; uid_tt=0222855da1baf6cd7da7cc63a2a6ad05; wenda_login_status=1; wendacsrftoken=12a39c2a8f814fca485b6c6f4fe3e0ee; answer_finalFrom=; cookie_tt_page=b0d596e027031f96d348200dc62fbba1; answer_enterFrom=; _gat=1'\n headers = {\n ':authority': 'www.wukong.com',\n ':method': 'GET',\n ':path': '',\n ':scheme': 'https',\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.117 Safari/537.36',\n 'wendacsrftoken': '12a39c2a8f814fca485b6c6f4fe3e0ee',\n 'x-requested-with': 'XMLHttpRequest',\n }\n questions = []\n answers = []\n\n def scroll_down(self):\n self.driver.implicitly_wait(2)\n self.driver.execute_script(\"window.scrollTo(0,document.body.scrollHeight)\")\n # time.sleep(1)\n\n def start_requests(self):\n self.driver = webdriver.Firefox()\n print('create driver')\n self.driver.get(self.hongkong)\n try:\n # caijing = WebDriverWait(self.driver, 20).until(\n # EC.presence_of_element_located((By.XPATH, '//a/span[text()=\"财经\"]'))\n # )\n # caijing.click()\n # actions = ActionChains(self.driver)\n # actions.click(caijing)\n # actions.perform()\n\n # WebDriverWait(self.driver, 10).until(\n # EC.presence_of_element_located((By.XPATH, '//div[@id=\"main-index-question-list\"]'))\n # )\n time.sleep(3)\n\n for i in range(15000): # 这里循环次数尽量大,保证加载到底\n ActionChains(self.driver).key_down(Keys.DOWN).perform() # 相当于一直按着DOWN键\n print('已完成%d次' % i)\n\n time.sleep(2)\n questions = self.driver.find_elements(By.XPATH, '//div[@class=\"question-title\"]//h2//a')\n answers = self.driver.find_elements(By.XPATH, '//div[@class=\"answer-item-content\"]//p//a')\n self.questions = list([q.get_property(\"text\") for q in questions])\n self.answers = list([a.get_attribute('href') for a in answers])\n print(len(self.questions), len(self.answers))\n assert len(self.questions) == len(self.answers), 'questions and answers not matched'\n # with open('wukong_insurance_faq.csv', 'w', newline='') as f:\n # writer = csv.writer(f, dialect='excel', delimiter=',')\n # for question, answer in zip(questions, answers):\n # q = question.get_property(\"text\")\n # a = answer.get_attribute('href')\n # print(q, a)\n # writer.writerow([q, a])\n\n time.sleep(1)\n finally:\n self.driver.quit()\n for question, answer in zip(self.questions, self.answers):\n self.headers[':path'] = answer.replace(self.home_url, '')\n yield scrapy.Request(\n url=answer,\n headers=self.headers,\n cookies=self.cookie,\n callback=self.parse,\n meta={'question': question}\n )\n\n def parse(self, response):\n logging.info('| Now --%s-- spider is crawling the site: %s' % (\n self.name, response.url\n ))\n first = response.xpath('(//div[@class=\"answer-text-full rich-text\"])[1]//text()').extract()\n first = '\\n'.join(first) if first else ''\n second = response.xpath('(//div[@class=\"answer-text-full rich-text\"])[2]//text()').extract()\n second = '\\n'.join(second) if second else ''\n third = response.xpath('(//div[@class=\"answer-text-full rich-text\"])[3]//text()').extract()\n third = '\\n'.join(third) if third else ''\n print(first)\n\n if not response.meta.get('question'):\n return\n faq = [response.meta.get('question'), first, second, third]\n self.faq_list.append(faq)\n\n if len(self.faq_list) >= 1000:\n with open('wukong_faq_' + str(self.count) + '.csv', 'w', newline='') as f:\n writer = csv.writer(f, dialect='excel', delimiter=',')\n writer.writerows(self.faq_list)\n self.count += 1\n self.faq_list = []\n\n def close(self, spider, reason):\n if len(self.faq_list):\n with open('wukong_faq_' + str(self.count) + '.csv', 'w', newline='') as f:\n writer = csv.writer(f, dialect='excel', delimiter=',')\n writer.writerows(self.faq_list)\n self.count += 1\n self.faq_list = []\n\n super(WukongSelSpider, self).close('WukongSel', 'finished')\n","sub_path":"harvester/collector/collector/spiders/wukong_sel.py","file_name":"wukong_sel.py","file_ext":"py","file_size_in_byte":6114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"228006320","text":"from astropy.io import fits\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport math\nfrom math import *\n\nimage_width = 250\nimage_height = 250\nradius = 100\n\na = image_width/2.\nb = image_height/2.\n\nim = Image.new(\"RGB\", (image_width,image_height))\npix = im.load()\n\nfor x in range(image_width):\n for y in range(image_height):\n pix[x,y] = (0,0,0)\n\n\nfor x in range(image_width):\n for y in range(image_height):\n r = sqrt(((x-a))**2 + ((y-b)/0.4)**2)\n if(int(r) == radius or int(r) == -radius):\n for i in range(3):\n pix[x+i,y] = (200,200,0)\n\n\n\nim.rotate(0).save(\"cell1.png\", \"PNG\")\n","sub_path":"create_test_png.py","file_name":"create_test_png.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"526551050","text":"from keras.layers.core import Lambda\nfrom keras.layers.merge import Concatenate\nimport itertools\n\n\nclass RegionLayer(object):\n\n _regions = None\n\n def __init__(self):\n pass\n\n def split(self, layer, n_cols, n_rows):\n self._n_rows = int(n_rows)\n self._n_cols = int(n_cols)\n layer_shape = layer.get_shape().as_list()\n\n if len(layer_shape) != 4:\n raise ValueError('Input tensor must be 4D.')\n\n (height, width) = layer_shape[1:3]\n\n if height % self._n_rows > 0:\n raise ValueError(\n 'Invalid image height size for the number of rows.')\n\n if width % self._n_cols > 0:\n raise ValueError(\n 'Invalid image widht size for the number of collumns.')\n\n region_height = height // self._n_rows\n region_width = width // self._n_cols\n\n self._regions = []\n indices = itertools.product(range(n_rows), range(n_cols))\n\n for (i, j) in indices:\n i_begin = i * region_height\n j_begin = j * region_width\n rectangle = (i_begin, j_begin, region_width, region_height)\n region = Lambda(self._crop_region(rectangle))(layer)\n self._regions.append(region)\n\n def _crop_region(self, rectangle):\n (top, left, width, height) = rectangle\n bottom = top + height\n right = left + width\n return lambda x: x[:, top:bottom, left:right, :]\n\n def concatenate_convolution(self):\n rows = []\n for i in range(self._n_rows):\n row = []\n for j in range(self._n_cols):\n index = i * self._n_cols + j\n row.append(self._regions[index])\n rows.append(Concatenate(axis=2)(row))\n\n return Concatenate(axis=1)(rows)\n\n def concatenate_fully_connected(self):\n regions = []\n for i in range(self._n_rows):\n for j in range(self._n_cols):\n index = i * self._n_cols + j\n regions.append(self._regions[index])\n return Concatenate(axis=1)(regions)\n\n def add(self, operation):\n for (i, region) in enumerate(self._regions):\n self._regions[i] = operation(region)\n","sub_path":"layers/region_layer.py","file_name":"region_layer.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"605785335","text":"from django.shortcuts import render, redirect\nfrom django.http.response import HttpResponse, HttpResponseRedirect\nfrom django.contrib.auth import authenticate, logout, login as authlogin\nfrom django.contrib.auth.decorators import login_required\nfrom django.core import serializers\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom .models import usuario, veiculos, modelo_carro, marca_carro, leilao\nfrom .forms import UsuarioForm, VeiculoForm\nfrom datetime import datetime\n\ndef index(request):\n if request.POST:\n form = UsuarioForm(request.POST)\n else:\n form = UsuarioForm()\n if request.user.id:\n current_user = usuario.objects.get(email=request.user)\n form = UsuarioForm()\n form_veic = VeiculoForm()\n modelo_veic = modelo_carro.objects.all()\n marca_veic = marca_carro.objects.all()\n return render(request, 'index_logado.html', {'modelos': modelo_veic, 'marcas': marca_veic,'usuario': current_user, 'form': form, 'form_veic': form_veic})\n return render(request, 'index.html', {'form': form})\n\ndef cadastro_usuario(request):\n\n if request.method == \"POST\":\n u = usuario()\n try:\n current_user = usuario.objects.get(cpf=request.POST.get('cpf'))\n except:\n current_user = \"\"\n if current_user == \"\":\n u.nome = request.POST.get('nome')\n u.cpf = request.POST.get('cpf')\n u.data_nascimento = request.POST.get('data_nascimento')\n u.telefone = request.POST.get('telefone')\n u.email = request.POST.get('email')\n u.senha = request.POST.get('senha')\n u.save()\n return index(request)\n else:\n form = UsuarioForm(request.POST)\n return render(request, 'cadastros/cad_user_error.html', {'form': form, \"usuario\": current_user})\n\n\ndef validacao(request):\n if request.user.id:\n return index(request)\n\n if request.POST:\n emailUser = request.POST.get('email')\n senhaUser = request.POST.get('senha')\n\n u = authenticate(username=emailUser, password=senhaUser)\n if u is not None:\n if u.is_active:\n authlogin(request, u)\n\n if request.POST.get('next'):\n return HttpResponseRedirect(request.POST.get('next'))\n\n return index(request)\n return login_error(request)\n\ndef login_error(request):\n if request.POST:\n form = UsuarioForm()\n email = request.POST.get('email')\n senha = request.POST.get('senha')\n mensagem_error = 'e-mail ou senha invalidos'\n return render(request, 'index.html', {'form': form, 'mensagem':mensagem_error, 'email':email, 'senha': senha})\n\n\n@login_required\ndef usuario_editar(request):\n if request.GET['us']:\n user_id = request.GET['us']\n current_user = usuario.objects.filter(id=user_id)\n json = serializers.serialize(\"json\", current_user)\n return HttpResponse(json)\n\n@login_required\ndef usuario_editar_final(request):\n if request.POST['id_usuario']:\n user_id = request.POST['id_usuario']\n current_user = usuario.objects.get(id=user_id)\n if current_user != \"\":\n current_user.nome = request.POST.get('nome')\n current_user.telefone = request.POST.get('telefone')\n current_user.data_nascimento = request.POST.get('data_nascimento')\n current_user.save()\n u = authenticate(username=current_user.email, password=current_user.senha)\n if u is not None:\n if u.is_active:\n authlogin(request, u)\n\n if request.POST.get('next'):\n return HttpResponseRedirect(request.POST.get('next'))\n\n return index(request)\n\n@login_required\ndef ajax_pesquisa(request):\n if 'marca' in request.GET:\n id = request.GET['marca']\n modelos = modelo_carro.objects.filter(marca=id)\n json = serializers.serialize(\"json\", modelos)\n return HttpResponse(json)\n\n@login_required\ndef cadastro_veiculo(request):\n current_user = usuario.objects.get(email=request.user)\n if request.POST:\n v = veiculos()\n try:\n current_veiculo = veiculos.objects.get(placa=request.POST.get('placa'))\n except:\n current_veiculo = \"\"\n\n if current_veiculo == \"\":\n current_modelo = modelo_carro.objects.get(id = request.POST.get('modelo'))\n v.modelo = current_modelo\n v.ano_veiculo = request.POST.get('ano_veiculo')\n v.cor = request.POST.get('cor')\n v.placa = request.POST.get('placa')\n v.combustivel = request.POST.get('combustivel')\n v.valor = request.POST.get('valor')\n v.ipva = request.POST.get('ipva')\n v.documentacao = request.POST.get('documentacao')\n v.img = request.POST.get('img')\n v.observacao = request.POST.get('observacao')\n v.usuario_id = current_user\n v.save()\n return index(request)\n\n@login_required\ndef leiloar_veiculo(request):\n modelo_veic = modelo_carro.objects.all()\n marca_veic = marca_carro.objects.all()\n current_user = usuario.objects.get(email=request.user)\n lista_veiculos = []\n list_veiculos = veiculos.objects.filter(usuario_id = current_user)\n for lv in list_veiculos:\n dict_veiculos = {}\n try:\n test_leilao = leilao.objects.get(veiculo_id=lv.id)\n dict_veiculos['id'] = test_leilao.veiculo_id.id\n dict_veiculos['modelo'] = test_leilao.veiculo_id.modelo\n dict_veiculos['usuario_id'] = test_leilao.veiculo_id.usuario_id\n dict_veiculos['ano_veiculo'] = test_leilao.veiculo_id.ano_veiculo\n dict_veiculos['cor'] = test_leilao.veiculo_id.cor\n dict_veiculos['combustivel'] = test_leilao.veiculo_id.combustivel\n dict_veiculos['placa'] = test_leilao.veiculo_id.placa\n dict_veiculos['valor'] = test_leilao.veiculo_id.valor\n dict_veiculos['documentacao'] = test_leilao.veiculo_id.documentacao\n dict_veiculos['ipva'] = test_leilao.veiculo_id.ipva\n dict_veiculos['img'] = test_leilao.veiculo_id.img\n dict_veiculos['observacao'] = test_leilao.veiculo_id.observacao\n dict_veiculos['status'] = test_leilao.status\n lista_veiculos.append(dict_veiculos)\n except:\n dict_veiculos['id'] = lv.id\n dict_veiculos['modelo'] = lv.modelo\n dict_veiculos['usuario_id'] = lv.usuario_id\n dict_veiculos['ano_veiculo'] = lv.ano_veiculo\n dict_veiculos['cor'] = lv.cor\n dict_veiculos['combustivel'] = lv.combustivel\n dict_veiculos['placa'] = lv.placa\n dict_veiculos['valor'] = lv.valor\n dict_veiculos['documentacao'] = lv.documentacao\n dict_veiculos['ipva'] = lv.ipva\n dict_veiculos['img'] = lv.img\n dict_veiculos['observacao'] = lv.observacao\n lista_veiculos.append(dict_veiculos)\n \n form_veic = VeiculoForm()\n paginator = Paginator(lista_veiculos, 2)\n page = request.GET.get('page')\n try:\n dados = paginator.page(page)\n except PageNotAnInteger:\n dados = paginator.page(1)\n except EmptyPage:\n dados = paginator.page(paginator.num_pages)\n\n data_atual = datetime.now().strftime(\"%Y-%m-%d\")\n data_final = str(datetime.now().year)+\"-\"+str(datetime.now().month + 1) + \"-\" + str(datetime.now().day)\n return render(request, 'veiculos/lista.html', {'modelos': modelo_veic, 'marcas': marca_veic, 'form_veic': form_veic, 'dados': dados, \"data_atual\": data_atual, \"data_final\":data_final})\n\n@login_required\ndef leiloando(request):\n if request.POST:\n current_user = usuario.objects.get(email=request.user)\n current_veic = veiculos.objects.get(id=request.POST.get('id_veic'))\n data_atual = datetime.now()\n l = leilao()\n l.veiculo_id = current_veic\n l.usuario_id = current_user\n l.data_inicio = data_atual\n l.data_final = request.POST.get('data_final')\n l.valor_atual = current_veic.valor\n l.status = 'ativo'\n l.save()\n return index(request)\n\n@login_required\ndef em_leilao(request):\n current_user = usuario.objects.get(email = request.user)\n current_leilao = leilao.objects.filter(usuario_id = current_user)\n leiloes = []\n for l in current_leilao:\n l_dict = {}\n try:\n user_buy = usuario.objects.get(id = int(l.usuario_id_comprador))\n l_dict['id_veiculo'] = l.veiculo_id.id\n l_dict['modelo'] = l.veiculo_id.modelo\n l_dict['usuario_id'] = l.veiculo_id.usuario_id\n l_dict['usuario_id_comprador'] = user_buy\n l_dict['ano_veiculo'] = l.veiculo_id.ano_veiculo\n l_dict['cor'] = l.veiculo_id.cor\n l_dict['combustivel'] = l.veiculo_id.combustivel\n l_dict['placa'] = l.veiculo_id.placa\n l_dict['valor'] = l.veiculo_id.valor\n l_dict['documentacao'] = l.veiculo_id.documentacao\n l_dict['ipva'] = l.veiculo_id.ipva\n l_dict['img'] = l.veiculo_id.img\n l_dict['observacao'] = l.veiculo_id.observacao\n l_dict['status'] = l.status\n l_dict['valor_inicial'] = l.valor_inicial\n l_dict['valor_atual'] = l.valor_atual\n l_dict['data_final'] = l.data_final\n l_dict['mensagem'] = \"proposta ativa\"\n leiloes.append(l_dict)\n \n except:\n l_dict['id_veiculo'] = l.veiculo_id.id\n l_dict['modelo'] = l.veiculo_id.modelo\n l_dict['usuario_id'] = l.veiculo_id.usuario_id\n l_dict['usuario_id_comprador'] = l.usuario_id_comprador\n l_dict['ano_veiculo'] = l.veiculo_id.ano_veiculo\n l_dict['cor'] = l.veiculo_id.cor\n l_dict['combustivel'] = l.veiculo_id.combustivel\n l_dict['placa'] = l.veiculo_id.placa\n l_dict['valor'] = l.veiculo_id.valor\n l_dict['documentacao'] = l.veiculo_id.documentacao\n l_dict['ipva'] = l.veiculo_id.ipva\n l_dict['img'] = l.veiculo_id.img\n l_dict['observacao'] = l.veiculo_id.observacao\n l_dict['status'] = l.status\n l_dict['valor_inicial'] = l.veiculo_id.valor\n l_dict['valor_atual'] = l.valor_atual\n l_dict['data_final'] = l.data_final\n l_dict['mensagem'] = \"nenhuma proposta\"\n leiloes.append(l_dict)\n\n paginator = Paginator(leiloes, 2)\n page = request.GET.get('page')\n try:\n dados = paginator.page(page)\n except PageNotAnInteger:\n dados = paginator.page(1)\n except EmptyPage:\n dados = paginator.page(paginator.num_pages)\n\n return render(request, 'veiculos/em_leilao.html', {'dados': dados})\n\n@login_required\ndef leilao_ativo(request):\n current_user = usuario.objects.get(email = request.user)\n current_leilao = leilao.objects.filter(status = \"ativo\").exclude(usuario_id = current_user)\n\n\n paginator = Paginator(current_leilao, 2)\n page = request.GET.get('page')\n try:\n dados = paginator.page(page)\n except PageNotAnInteger:\n dados = paginator.page(1)\n except EmptyPage:\n dados = paginator.page(paginator.num_pages)\n\n return render(request, 'veiculos/leilao_ativo.html', {'dados': dados})\n\n@login_required\ndef leilao_ativar(request):\n if request.POST:\n current_user = usuario.objects.get(email = request.user)\n current_leilao = leilao.objects.get(id = request.POST.get('id_leilao'))\n if request.POST.get('valor_proposto') < current_leilao.valor_atual:\n return leilao_ativo(request)\n else:\n current_leilao.valor_atual = request.POST.get('valor_proposto')\n current_leilao.usuario_id_comprador = current_user.id\n current_leilao.save()\n return lances(request)\n\n@login_required\ndef lances(request):\n current_user = usuario.objects.get(email = request.user)\n current_leilao = leilao.objects.filter(usuario_id_comprador = current_user.id)\n\n\n paginator = Paginator(current_leilao, 2)\n page = request.GET.get('page')\n try:\n dados = paginator.page(page)\n except PageNotAnInteger:\n dados = paginator.page(1)\n except EmptyPage:\n dados = paginator.page(paginator.num_pages)\n\n return render(request, 'veiculos/lances.html', {'dados': dados})\n\n\n@login_required\ndef sair(request):\n logout(request)\n return index(request)","sub_path":"aa/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"316731448","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # PRODUCT FEEDBACK ANALYSIS \n# \n\n# #### Social Media is playing an important role in marketing of a product. This social media can also be used to know the customer's opinions on improvising the product's features, it's quality and many other things.\n# #### Sentimental Analysis can roughly give us an idea about the future demand of the product which can help us in increasing the revenue and cutting down on manufacturing costs.\n# #### In the Jupyter Notebook, we will do the sentimental analysis on a car model and for analysis purpose we will take the comments as the data from it's Facebook post.\n# #### We will use Python libraries along with Facebook Graph API to achieve our goal.\n\n# In[10]:\n\n\nfrom IPython.display import Image\nImage(\"/Users/saurabhkarambalkar/Desktop/Picture.jpg\", width=1000)\n\n\n# # Business Aspects\n#
      • Reduce the Manufactuing units and cost of the product\n#
      • Improve the quality of product by taking the feedback from the customers \n# \n\n# # Assumptions\n#
      • The assumptions for the following analysis is that all the reviews provided by the customers are authentic and unbiased\n\n# # Limitations \n# - The API provides limited access to the data due to which the models don't have much data to be trained upon\n# - If all the reviews given by the customers turn out to be facts, sentiment analysis could not be carried out on the data \n\n# ## Facebook\n\n# In[5]:\n\n\nimport pandas as pd\nimport numpy as np\nimport facebook\n\n\n# In[71]:\n\n\ngraph = facebook.GraphAPI(access_token=\"EAACcYJ4a3rQBAJhxgkoqEy6aNGmj5QdTMLsOxqXpHR2eomKHE0DeYvAf7syzc7HK7KYfYzAOSRZAqxhVuPvXguZALFa8TpWVoyFlyybn0d6asCK34cRZA6aFeC6LyVfTmYkjtbEK6H81bzDWUsBoelImOZBJXzRlhBMauZBOZAg2inW6QIqfTjY21EVQZChJZB8ZD\", version=\"2.6\")\n\n\n# In[105]:\n\n\n#### Get comments from post\npost = graph.get_object(id='10155966298429003', fields='comments')\nprint(post)\n\n\n# In[107]:\n\n\ndata = post['comments']['data']\nlen(data)\n\n\n# In[119]:\n\n\nmessages=[]\nfor i in data:\n messages.append(i['message'])\n #messages[].append(i['id'])\nmessages\n\n\n# In[240]:\n\n\nFacebook_Messages=pd.DataFrame()\nFacebook_Messages['Messages']=messages\nFacebook_Messages\n\n\n# In[218]:\n\n\nimport json\nfrom pandas.io.json import json_normalize\n\n\n# In[219]:\n\n\ndf1=pd.DataFrame(messages)\ndf1\n\n\n# In[220]:\n\n\nimport textblob as tb\nfrom textblob import TextBlob\n\n\n# In[221]:\n\n\ncomments=df1[0]\ncomments\n\n\n# In[224]:\n\n\ncm1=comments[8]\nblob=TextBlob(cm1)\nblob.sentiment\n\n\n# In[159]:\n\n\nimport matplotlib.pyplot as plot\nget_ipython().run_line_magic('matplotlib', 'inline')\npolarity=[]\nsubj=[]\nfor t in commennts:\n tx=TextBlob(t)\n polarity.append(tx.sentiment.polarity)\n subj.append(tx.sentiment.subjectivity)\n\n\n# In[160]:\n\n\npoltweet= pd.DataFrame({'polarity':polarity,'subjectivity':subj}) \npoltweet.plot(title='Polarity and Subjectivity')\n\n\n# In[161]:\n\n\nlist=[]\nlist= comments\nwordstring = list\nwordstring\n\n\n# # Twitter\n\n# In[168]:\n\n\nimport numpy as np\nimport pandas as pd\n\n\n# In[169]:\n\n\nimport twitter\nfrom twitter import Twitter\nfrom twitter import OAuth\nfrom twitter import TwitterHTTPError\nfrom twitter import TwitterStream\n\n\n# In[170]:\n\n\nck= 'hq1ikoFrNoXH32vVwH4tdYewd'\ncs= 'UpsUIWyTXNtOoKGCuQZw7dQ9LO2lzx5vxnw069g4gg9BGfWl3Z'\nat='753638455-fQJkPQDV4aDafISatU7ZjRBjf4UMb2ufYyKSI2bU' \nats='Cjym1sTksiwAKp7Zeh78ngdqZYUI29r981Dw4eFqMCJ4e'\n\n\n# In[172]:\n\n\noauth= OAuth(at,ats,ck,cs)\ntwit_api=Twitter(auth=oauth)\nt_loc= twit_api.trends.available()\nt_loc\nts= TwitterStream(auth=oauth)\n\n\n# In[173]:\n\n\niterator = ts.statuses.filter(track=\"Volkswagen\",language=\"en\")\n\n\n# In[174]:\n\n\nb=[]\nfor t in iterator:\n print(t)\n b.append(t)\n if len(b)==25:\n break\n\n\n# In[175]:\n\n\nlen(b)\n\n\n# In[176]:\n\n\nimport json\nfrom pandas.io.json import json_normalize\n\n\n# In[177]:\n\n\ndf=json_normalize(b)\ndf.head()\n\n\n# In[178]:\n\n\n# Textblob\nimport textblob as tb\nfrom textblob import TextBlob\n\n\n# In[179]:\n\n\nget_ipython().system('python -m textblob.download_corpora')\n\n\n# In[180]:\n\n\ntweettext=df['text']\n\n\n# In[189]:\n\n\ntx=tweettext[16]\nblob=TextBlob(tx)\ntx\n\n\n# In[190]:\n\n\nblob.sentiment\n\n\n# In[183]:\n\n\n# Plot Sentiments\nimport matplotlib.pyplot as plot\nget_ipython().run_line_magic('matplotlib', 'inline')\npolarity=[]\nsubj=[]\nfor t in tweettext:\n tx=TextBlob(t)\n polarity.append(tx.sentiment.polarity)\n subj.append(tx.sentiment.subjectivity)\n\n \npoltweet= pd.DataFrame({'polarity':polarity,'subjectivity':subj}) \npoltweet.plot(title='Polarity and Subjectivity')\n\n\n# In[191]:\n\n\nlist=[]\nlist= df['text']\nwordstring = list[0]\nwordstring\n\n\n# In[192]:\n\n\nn=1\nwhile n < 25:\n wordstring += list[n]\n n=n+1\n\n\n# In[193]:\n\n\nwordstring\nwordlist = wordstring.split()\n\n\n# In[194]:\n\n\ntweettext=df['text']\n\n\n# In[195]:\n\n\nblob=TextBlob(wordstring)\nblob\n\n\n# In[203]:\n\n\ndf_twit = pd.DataFrame()\ndf_twit['data']=tweettext\ndf_twit\n\n\n# In[211]:\n\n\nimport re\nimport nltk\nnltk.download('stopwords')\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\n\n\n# In[213]:\n\n\ncorpus = []\nfor i in range(0,24):\n review = re.sub('[^a-zA-Z]', ' ', df_twit['data'][i])\n review = review.lower()\n review = review.split()\n ps = PorterStemmer() # Taking roots of different versions of the same word; \n # Not to have too many words in the end; \n # To regroup same versions of the words;\n review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]\n review = ' '.join(review)\n corpus.append(review)\ncorpus\n\n\n# In[196]:\n\n\nblob.sentiment\n\n\n# In[197]:\n\n\n# Plots\nimport matplotlib.pyplot as plot\nget_ipython().run_line_magic('matplotlib', 'inline')\npolarity=[]\nsubj=[]\nfor t in tweettext:\n tx=TextBlob(t)\n polarity.append(tx.sentiment.polarity)\n subj.append(tx.sentiment.subjectivity)\n \npoltweet= pd.DataFrame({'polarity':polarity,'subjectivity':subj}) \npoltweet.plot(title='Polarity and Subjectivity')\n\n\n# In[245]:\n\n\ncombined_data=pd.DataFrame()\ncombined_data=pd.concat([df_twit['data'],Facebook_Messages['Messages']],ignore_index=True)\ncombined_data\n\n\n# In[278]:\n\n\ncombined_data['Messages']=combined_data[0]\n\n\n# In[253]:\n\n\ncombined_data\n\n\n# In[261]:\n\n\ntweettext=combined_data\n\n\n# In[275]:\n\n\ntx=tweettext[1]\nblob=TextBlob(tx)\ntx\n\n\n# In[276]:\n\n\nblob.sentiment\n\n\n# In[268]:\n\n\n# Plots\nimport matplotlib.pyplot as plot\nget_ipython().run_line_magic('matplotlib', 'inline')\npolarity=[]\nsubj=[]\nfor t in tweettext:\n tx=TextBlob(t)\n polarity.append(tx.sentiment.polarity)\n subj.append(tx.sentiment.subjectivity)\n \npoltweet= pd.DataFrame({'polarity':polarity,'subjectivity':subj}) \npoltweet.plot(title='Polarity and Subjectivity')\n\n\n# # Conclusion and Future Scope\n\n# \n# #### We are planning on sending the feedback from the above analysis to the owners of the companies, using which they can improve their service, inturn maximizing their profits.\n","sub_path":"Facebook Analysis.py","file_name":"Facebook Analysis.py","file_ext":"py","file_size_in_byte":6977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"173095166","text":"from tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nimport numpy as np\nfrom numpy import array\n\n#1. 데이터\nx = np.array(range(1,101))\n#x2 = array(range(1,101))\ny = np.array(range(101,201))\nfrom sklearn.model_selection import train_test_split \nx_train, x_test, y_train, y_test = train_test_split(\n x, y, test_size = 0.4, train_size=0.6\n #, shuffle=True, train_size=0.8\n)\nx_test, x_val, y_test, y_val = train_test_split(\n x_test, y_test, train_size=0.5\n #, shuffle=True, train_size=0.8\n)\nprint(x_train)\nprint(x_val)\nprint(x_test)\nprint(x_train.shape)\nprint(x_val.shape)\nprint(x_test.shape)\n\n#2. 모델 구성\nmodel = Sequential()\nmodel.add(Dense(5, activation='relu', input_dim=1))\nmodel.add(Dense(3))\nmodel.add(Dense(4))\nmodel.add(Dense(10))\nmodel.add(Dense(1))\n\n#3. 컴파일, 훈련\nmodel.compile(optimizer=\"adam\", loss='mse')\nmodel.fit(x_train, y_train, epochs=1000, \n validation_data=(x_val,y_val))\n\n#4. 평가, 예측\ny_predict = model.predict([101,102,103])\nprint('y_predict : ',y_predict)\n\n\n","sub_path":"study/keras/keras05_split2.py","file_name":"keras05_split2.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"201148756","text":"import numpy as np\nimport math\nimport collections\n\n''' CONSTANTS '''\nnum_d_squared_centers = 40\nmapper_coreset_size = 4000 - num_d_squared_centers\nk = 200\nalpha = 20 * (math.log(k, 2) + 1)\nk_means_iterations = 15\n\n\n# key: None\n# value: 2d numpy array\ndef mapper(key, value):\n centers = list()\n\n ''' D^2 sampling '''\n\n curr_center = value[np.random.randint(value.shape[0])]\n centers.append(curr_center)\n\n # initialization of minimum squared distances\n min_squared_distances = np.full(value.shape[0], np.iinfo(np.int64).max)\n\n # gets the squared distance to the current center from a specified vector\n def get_squared_dist_to_curr_center(vec):\n return np.linalg.norm(vec - curr_center)**2\n\n # generate the centers\n for _ in xrange(num_d_squared_centers - 1):\n min_squared_distances = np.minimum(np.apply_along_axis(get_squared_dist_to_curr_center, 1, value), min_squared_distances)\n next_center_idx = np.random.choice(value.shape[0], p=(min_squared_distances / np.sum(min_squared_distances)))\n centers.append(value[next_center_idx])\n curr_center = value[next_center_idx]\n\n ''' significance sampling '''\n\n # gets the squared distance to the nearest center from a given vector\n def squared_distance_to_nearest_center(vec):\n return get_distance_to_nearest_center(vec, centers)**2\n\n # sum of distances from each point to its nearest D^2 center\n distances_sum = np.sum(np.apply_along_axis(squared_distance_to_nearest_center, 1, value))\n # map from each center to a list of the distances of the points that are closest to that center\n centers_to_nearest_squared_dists = get_centers_to_nearest_point_squared_dists(value, centers)\n\n # get the probability of sampling the specified vector\n def significance_sampling_prob(vec):\n # c_phi value\n ave_nearest_dist = (1.0 / value.shape[0]) * distances_sum\n nearest_center, squared_dist_to_nearest_center = get_nearest_center_and_squared_dist_tuple(vec, centers)\n # B_i, the list of all the distances to the nearest center\n squared_dists_to_nearest_center = centers_to_nearest_squared_dists[nearest_center.tostring()]\n\n # (alpha * d(x, B)^2) / c_phi\n first_term = alpha * squared_dist_to_nearest_center / ave_nearest_dist\n # (2 * alpha summation(all x' in X: d(x', B)^2)) / (size(B_i) * c_phi)\n second_term = 2.0 * alpha * sum(squared_dists_to_nearest_center) / (len(squared_dists_to_nearest_center) * ave_nearest_dist)\n # 4 * size(X) / size(B_i)\n third_term = 4.0 * value.shape[0] / len(squared_dists_to_nearest_center)\n\n return first_term + second_term + third_term\n\n # the significance sampling values for all the vectors in value\n sig_sampling_vals = np.apply_along_axis(significance_sampling_prob, 1, value)\n sig_sampling_probs = sig_sampling_vals / np.sum(sig_sampling_vals)\n sig_sampled_indices = np.random.choice(value.shape[0], size=mapper_coreset_size, p=sig_sampling_probs)\n\n # add all the vectors that we sampled to the centers list\n for index in sig_sampled_indices:\n centers.append(value[index])\n\n yield 0, centers\n\n\n# key: key from mapper used to aggregate\n# values: list of all value for that key\n# Note that we do *not* output a (key, value) pair here.\ndef reducer(key, values):\n centers = list()\n center_indices = np.random.choice(len(values), k)\n\n for idx in center_indices:\n centers.append(values[idx])\n\n for i in xrange(1, k_means_iterations + 1):\n centers_to_indices = get_centers_to_nearest_point_indices(values, centers)\n new_centers = list()\n\n for center in centers_to_indices.keys():\n vec_sum = np.zeros(values.shape[1])\n for idx in centers_to_indices[center]:\n vec_sum = vec_sum + values[idx]\n\n mean_vec = vec_sum / len(centers_to_indices[center])\n new_centers.append(mean_vec)\n\n # in the case that some centers we picked were outliers\n if len(new_centers) < k:\n for j in xrange(k - len(new_centers)):\n new_centers.append(values[np.random.randint(values.shape[0])])\n\n centers = new_centers\n\n yield centers\n\n\n''' helper functions '''\n\n\n# Given a vector and a list of vectors that are the current centers, return\n# the distance to the closest center.\ndef get_distance_to_nearest_center(vec, centers):\n min_dist = np.iinfo(np.int64).max\n for center in centers:\n dist = np.linalg.norm(vec - center)\n if dist < min_dist:\n min_dist = dist\n\n return min_dist\n\n\n# Given a 2d numpy array and a list of vectors that are the current centers, return a map\n# from each center to a list of scalars that represents the distances to the points in arr2d\n# closest to that center.\ndef get_centers_to_nearest_point_squared_dists(arr2d, centers):\n ret = collections.defaultdict(list)\n for i in range(arr2d.shape[0]):\n center, dist = get_nearest_center_and_squared_dist_tuple(arr2d[i], centers)\n ret[center.tostring()].append(dist)\n\n return ret\n\n\n# Given a vector and a list of vectors that are the current centers, return the center\n# that is closest to the specified vector.\ndef vec_to_nearest_center(vec, centers):\n min_dist = np.iinfo(np.int32).max\n nearest_center = None\n for center in centers:\n dist = np.linalg.norm(vec - center)\n if dist < min_dist:\n min_dist = dist\n nearest_center = center\n\n return nearest_center\n\n\n# Given a 2d numpy array and a list of vectors that are the current centers, return a\n# map from each center to a list of indices that represents the indices of the vectors\n# arr2d closest to that center.\ndef get_centers_to_nearest_point_indices(arr2d, centers):\n ret = collections.defaultdict(list)\n for j in range(arr2d.shape[0]):\n curr_center = vec_to_nearest_center(arr2d[j], centers)\n ret[curr_center.tostring()].append(j)\n\n return ret\n\n\n# Given a vector and a list of vectors that are the current centers, return a tuple\n# of (nearest_center, squared_min_dist).\ndef get_nearest_center_and_squared_dist_tuple(vec, centers):\n min_dist = np.iinfo(np.int32).max\n ret_center = None\n for center in centers:\n dist = np.linalg.norm(vec - center)\n if dist < min_dist:\n min_dist = dist\n ret_center = center\n\n return ret_center, min_dist**2\n","sub_path":"k_means.py","file_name":"k_means.py","file_ext":"py","file_size_in_byte":6428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"652238545","text":"'''\nGregorius Jovan Kresnadi - 13518135\nNama program : 15 Puzzle Solver\nDeskripsi program:\nProgram yang menerima sebuah puzzle matriks 16 kotak, dan \nmenyelesaikannya. Program menerima masukan berupa teks \nyang berisi matriks, menggunakan Algoritma Branch and Bound\nuntuk menyelesaikan puzzle tersebut.\nFinal goalnya adalah sebagai berikut\n| 1 | 2 | 3 | 4 |\n| 5 | 6 | 7 | 8 |\n| 9 | 10| 11| 12|\n| 13| 14| 15| |\n'''\nimport time\n# $ ====================== FUNCTIONS & METHODS ======================\ndef readPuzzle(menu):\n filein = \"puzzle.txt\"\n if menu == 1:\n filein = \"../test/puzzle.txt\"\n elif menu == 2:\n filein = str(input(\"File to read (without .txt): \"))\n filein = \"../test/\" + filein + \".txt\"\n try:\n f = open(filein,\"r\")\n except:\n print(\"File not found, using default file...\")\n f = open(\"../test/puzzle.txt\",\"r\")\n return f\n\ndef printPuzzle(puzzle):\n for x in puzzle:\n print(\"+----+----+----+----+\")\n print(\"|\", end=\"\")\n for y in x:\n out = \"\"\n num = y\n if (num == 16):\n out += \" \"\n elif (num < 10):\n out += (\" \"+str(num))\n else:\n out += (\" \"+str(num))\n print(out, end=\" |\")\n print()\n print(\"+----+----+----+----+\")\n\ndef Posisi(puzzle, idx):\n pos = 1\n for x in puzzle:\n for y in x:\n if idx == y:\n return pos\n else:\n pos += 1\n return pos\n\ndef Kurang(puzzle, idx):\n if idx == 1:\n return 0\n else:\n j = 1\n count = 0\n idxPos = Posisi(puzzle,idx)\n while (j < idx):\n if Posisi(puzzle,j) > idxPos:\n count += 1\n j += 1\n return count\n\ndef Koordinat(puzzle, x):\n for i in range(len(puzzle)):\n for j in range(len(puzzle[i])):\n if x == puzzle[i][j]:\n return [i,j]\n\ndef copyPuzzle(puzzle):\n temp = []\n for i in range(len(puzzle)):\n tempI = puzzle[i].copy()\n temp.append(tempI)\n return temp\n\n# $ ====================== MOVEMENT FUNCTIONS ======================\ndef moveUp(puzzle):\n koor = Koordinat(puzzle,16)\n koorNew = [koor[0]-1,koor[1]]\n temp = copyPuzzle(puzzle)\n t = temp[koorNew[0]][koorNew[1]]\n temp[koorNew[0]][koorNew[1]] = 16\n temp[koor[0]][koor[1]] = t\n return temp\n\ndef moveDown(puzzle):\n koor = Koordinat(puzzle,16)\n koorNew = [koor[0]+1,koor[1]]\n temp = copyPuzzle(puzzle)\n t = temp[koorNew[0]][koorNew[1]]\n temp[koorNew[0]][koorNew[1]] = 16\n temp[koor[0]][koor[1]] = t\n return temp\n\ndef moveLeft(puzzle):\n koor = Koordinat(puzzle,16)\n temp = copyPuzzle(puzzle)\n temp[koor[0]].remove(16)\n temp[koor[0]].insert(koor[1]-1,16)\n return temp\n\ndef moveRight(puzzle):\n koor = Koordinat(puzzle,16)\n temp = copyPuzzle(puzzle)\n temp[koor[0]].remove(16)\n temp[koor[0]].insert(koor[1]+1,16)\n return temp\n\n# $ ====================== SOLVING FUNCTIONS ======================\ndef Ci(node, depth):\n return depth + Gi(node)\n\ndef Gi(puzzle):\n count = 0\n idx = 1\n for x in puzzle:\n for y in x:\n if (y!=16 and y!=idx):\n count += 1\n idx += 1\n return count\n\ndef generateNodes(node, vis):\n new = []\n koor = Koordinat(node[0],16)\n depth = node[1] + 1\n if (koor[0] != 0) and node[3][-1] != \"d\": # MOVEUP\n up = moveUp(node[0])\n if not(str(up) in vis):\n new.append([up,depth,Ci(up,depth),node[3]+\"u\"])\n vis.append(str(up))\n if (koor[1] != 0) and node[3][-1] != \"r\": # MOVELEFT\n left = moveLeft(node[0])\n if not(str(left) in vis):\n new.append([left,depth,Ci(left,depth),node[3]+\"l\"])\n vis.append(str(left))\n if (koor[0] != 3) and node[3][-1] != \"u\": # MOVEDOWN\n down = moveDown(node[0])\n if not(str(down) in vis):\n new.append([down,depth,Ci(down,depth),node[3]+\"d\"])\n vis.append(str(down))\n if (koor[1] != 3) and node[3][-1] != \"l\": # MOVERIGHT\n right = moveRight(node[0])\n if not(str(right) in vis):\n new.append([right,depth,Ci(right,depth),node[3]+\"r\"])\n vis.append(str(right))\n return new\n\ndef findLowestCost(nodes):\n costArr=[]\n for node in nodes:\n costArr.append(node[2])\n lowCost = min(costArr)\n return costArr.index(lowCost)\n \n\ndef solve(puzzle):\n vis = []\n nodeCount = 1\n if Gi(puzzle) != 0:\n depth = 0\n nodes = [[puzzle,depth,Ci(puzzle,depth),\".\"]]\n while len(nodes)!=0:\n idxMin = findLowestCost(nodes)\n evalNode = nodes.pop(idxMin)\n if (Gi(evalNode[0]) == 0):\n sol = evalNode\n break\n newQueue = generateNodes(evalNode,vis)\n if len(newQueue) == 0:\n continue\n nodeCount += len(newQueue)\n nodes.extend(newQueue)\n print(\"===== GOAL STATE REACHED =====\")\n printPuzzle(sol[0])\n return [nodeCount,sol[3][1:]]\n\n# $ ====================== PRINT RESULTS ======================\n\ndef traceback(puzzle,path):\n printPuzzle(puzzle)\n for c in path:\n print()\n print(\"V V V V V\")\n print()\n if c == \"u\":\n puzzle = moveUp(puzzle)\n printPuzzle(puzzle)\n elif c == \"d\":\n puzzle = moveDown(puzzle)\n printPuzzle(puzzle)\n elif c == \"l\":\n puzzle = moveLeft(puzzle)\n printPuzzle(puzzle)\n elif c == \"r\":\n puzzle = moveRight(puzzle)\n printPuzzle(puzzle)\n time.sleep(.2)\n\ndef printPath(path):\n for c in path:\n if c == \"u\":\n print(\"Up \", end=\"\")\n elif c == \"d\":\n print(\"Down \", end=\"\")\n elif c == \"l\":\n print(\"Left \", end=\"\")\n elif c == \"r\":\n print(\"Right \", end=\"\")\n print()\n\n# $ ========================================================\n# $ ====================== INPUT FILE ======================\nprint(\"[]=======================[]\")\nprint(\"|| 15 Puzzle Solver ||\")\nprint(\"|| - - - - - - - - - - - ||\")\nprint(\"|| File : ||\")\nprint(\"|| 1. Use Default ||\")\nprint(\"|| (puzzle.txt) ||\")\nprint(\"|| 2. Input file ||\")\nprint(\"[]=======================[]\")\nprint(\"||\")\ntry:\n menu = int(input(\"[]=>> \"))\nexcept:\n print(\"Reading default file...\")\n menu = 1\npuzzleFile = readPuzzle(menu)\nprint()\n# $ ====================== READ & PROCESS FILE ======================\npuzzle=[] # Menyimpan matrix\nfor line in puzzleFile.readlines():\n puzzle.append( [ int (x) for x in line.split(' ') ] )\n# $ ====================== POSISI AWAL ======================\nprint(\"===== Posisi Awal ======\")\nprintPuzzle(puzzle)\nprint()\n\n# $ ====================== KURANG(i) ======================\nprint(\"== Reachable Goal ==\")\nprint(\"+-------+-----------+\")\nprint(\"| i\\t| Kurang(i) |\")\nprint(\"+-------+-----------+\")\nsumKurang = 0\nfor idx in range(16):\n kur = Kurang(puzzle,idx+1)\n sumKurang += kur\n print(\"| \" + str(idx+1) + \"\\t| \" + str(kur) +\"\\t |\")\nprint(\"+-------+-----------+\")\nif (Posisi(puzzle, 16) in [2,4,5,7,10,12,13,15]):\n X = 1\nelse:\n X = 0\n\n# $ ====================== SIGMA KURANG(i) + X ======================\nsolvable = sumKurang + X\nprint(\"SIGMA(Kurang(i)) + X = \" + str(solvable))\nprint()\n\nif (solvable%2 == 1):\n print(\"Sayang sekali, puzzle tidak bisa diselesaikan\")\n print(\"dengan metode Branch and Bound :(\")\nelse: # $ ================== SOLVE =====================\n start_time = time.time() # $ ================== MEASURE TIME =====================\nif solvable%2 == 0:\n ans = solve(puzzle)\n print(\"=== Simpul yang dibangkitkan: \" + str(ans[0]))\n print(\"=== Path: \", end=\"\")\n printPath(ans[1])\n print(\"=== Path Length: \" + str(len(ans[1])))\n time_elapsed = time.time() - start_time\n print(\"=== Time Elapsed : \" + str(time_elapsed) + \" seconds =====\")\n print()\n printResult = input(\"=====[] Visualise Path? : (y/n) \")\n# $ ====================== PRINT ALL NODES ======================\n if printResult == \"y\":\n traceback(puzzle,ans[1])\n print()\n print(\"=== Simpul yang dibangkitkan: \" + str(ans[0]))\n print(\"=== Path: \", end=\"\")\n printPath(ans[1])\n print(\"=== Path Length: \" + str(len(ans[1])))\n print(\"=== Time Elapsed : \" + str(time_elapsed) + \" seconds =====\")\nelse:\n print(\"=== Simpul yang dibangkitkan: 0\")\n print(\"=== Path: \")\n print(\"=== Path Length: 0\")\n print(\"=== Time Elapsed : 0.00000000 seconds =====\")\n","sub_path":"stima3/Tucil3StrAlgo-13518135/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"267388153","text":"# -*- coding: utf-8 -*-\nfrom PySide import QtGui, QtCore\nimport pymel.core as pm\nimport miraCore\nfrom miraLibs.pipeLibs.pipeMaya import get_asset_names, assign_shader, get_current_project\nfrom miraLibs.mayaLibs import get_maya_win\nfrom miraFramework.Filter import ButtonLineEdit\nfrom miraLibs.pyLibs import join_path\nfrom miraLibs.pipeLibs.pipeDb import sql_api\n\n\nclass PopDialog(QtGui.QDialog):\n def __init__(self, data_list=[], parent=None):\n super(PopDialog, self).__init__(parent)\n # data_list --->first argument: asset_name; second argument: not exist model list\n self.data_list = data_list\n self.resize(600, 300)\n self.setWindowTitle(\"Assign shader information\")\n main_layout = QtGui.QHBoxLayout(self)\n main_layout.setContentsMargins(1, 0, 1, 0)\n self.create_table()\n main_layout.addWidget(self.info_table)\n\n def create_table(self):\n self.info_table = QtGui.QTableWidget()\n self.info_table.verticalHeader().setVisible(False)\n self.info_table.setColumnCount(2)\n self.info_table.setRowCount(len(self.data_list))\n self.info_table.setHorizontalHeaderLabels([\"Asset\", \"Not exist models\"])\n self.info_table.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)\n self.info_table.horizontalHeader().setStretchLastSection(True)\n self.info_table.setSelectionMode(QtGui.QAbstractItemView.NoSelection)\n self.set_table_data()\n self.info_table.resizeColumnToContents(0)\n\n def set_table_data(self):\n if not self.data_list:\n return\n for index, data in enumerate(self.data_list):\n asset_name = data[0]\n not_exist_model_list = data[1]\n asset_item = QtGui.QTableWidgetItem(asset_name)\n self.info_table.setItem(index, 0, asset_item)\n if not_exist_model_list:\n model_list_widget = QtGui.QListWidget()\n model_list_widget.addItems(not_exist_model_list)\n model_list_widget.itemDoubleClicked.connect(self.set_item_editable)\n self.info_table.setCellWidget(index, 1, model_list_widget)\n self.info_table.setRowHeight(index, 100)\n else:\n model_item = QtGui.QTableWidgetItem(u\"√\")\n model_item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.info_table.setItem(index, 1, model_item)\n\n @staticmethod\n def set_item_editable(item):\n item.setFlags(item.flags() | QtCore.Qt.ItemIsEditable)\n\n\nclass AssetTableModel(QtCore.QAbstractTableModel):\n def __init__(self, arg=[], header=[], parent=None):\n super(AssetTableModel, self).__init__(parent)\n self.__arg = arg\n self.__header = header\n\n @property\n def arg(self):\n return self.__arg\n\n @arg.setter\n def arg(self, value):\n self.__arg = value\n\n @property\n def header(self):\n return self.__header\n\n @header.setter\n def header(self, value):\n self.__header = value\n\n def rowCount(self, parent):\n return len(self.__arg)\n\n def columnCount(self, parent):\n return len(self.__arg[0])\n\n def data(self, index, role=QtCore.Qt.DisplayRole):\n if role == QtCore.Qt.DisplayRole:\n row = index.row()\n column = index.column()\n return self.__arg[row][column]\n\n def flags(self, index):\n return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable\n\n def setData(self, index, value, role=QtCore.Qt.DisplayRole):\n if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:\n row = index.row()\n column = index.column()\n self.__arg[row][column] = value\n self.dataChanged.emit(index, index)\n return True\n return False\n\n def headerData(self, section, orientation, role):\n if role == QtCore.Qt.DisplayRole:\n if orientation == QtCore.Qt.Horizontal:\n return self.__header[section]\n\n\nclass ComboDelegate(QtGui.QItemDelegate):\n def __init__(self, parent=None):\n super(ComboDelegate, self).__init__(parent)\n\n def createEditor(self, parent, option, index):\n if index.column() == 1:\n combo = QtGui.QComboBox(parent)\n combo.currentIndexChanged.connect(self.onCurrentIndexChanged)\n return combo\n\n def setEditorData(self, editor, index):\n editor.blockSignals(True)\n value = index.model().data(index, QtCore.Qt.DisplayRole)\n if isinstance(value, list):\n editor.addItems(value)\n editor.setCurrentIndex(editor.findText(\"default\"))\n editor.blockSignals(False)\n\n def setModelData(self, editor, model, index):\n value = editor.currentText()\n model.setData(index, value, QtCore.Qt.DisplayRole)\n\n def updateEditorGeometry(self, editor, option, index):\n editor.setGeometry(option.rect)\n\n def onCurrentIndexChanged(self, index):\n self.commitData.emit(self.sender())\n\n\nclass AssignShader(QtGui.QDialog):\n def __init__(self, parent=None):\n super(AssignShader, self).__init__(parent)\n self.setWindowFlags(QtCore.Qt.Dialog | QtCore.Qt.WindowCloseButtonHint | QtCore.Qt.WindowMinimizeButtonHint)\n self.setWindowTitle(\"Assign Shader\")\n self.resize(400, 350)\n current_project = get_current_project.get_current_project()\n self.__db = sql_api.SqlApi(current_project)\n main_layout = QtGui.QVBoxLayout(self)\n main_layout.setContentsMargins(4, 4, 4, 4)\n\n self.filter_layout = QtGui.QHBoxLayout()\n self.filter_le = ButtonLineEdit()\n self.filter_layout.addStretch()\n self.filter_layout.addWidget(self.filter_le)\n self.update_btn = QtGui.QToolButton()\n icon_path = join_path.join_path2(miraCore.get_icons_dir(), \"update.png\")\n self.update_btn.setIcon(QtGui.QIcon(icon_path))\n self.update_btn.setStyleSheet(\"QToolButton{background:transparent;}\"\n \"QToolButton::hover{background:#00BFFF;border-color:#00BFFF;}\")\n self.filter_layout.addWidget(self.update_btn)\n\n self.table_view = QtGui.QTableView()\n self.table_view.verticalHeader().hide()\n self.table_view.horizontalHeader().setStretchLastSection(True)\n self.table_view.setSortingEnabled(True)\n self.table_view.setAlternatingRowColors(True)\n self.table_view.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)\n\n self.btn_layout = QtGui.QHBoxLayout()\n self.assign_shader_btn = QtGui.QPushButton(\"Assign Shader\")\n self.assign_lambert_btn = QtGui.QPushButton(\"Assign Lambert\")\n self.btn_layout.addStretch()\n self.btn_layout.addWidget(self.assign_shader_btn)\n self.btn_layout.addWidget(self.assign_lambert_btn)\n\n main_layout.addLayout(self.filter_layout)\n main_layout.addWidget(self.table_view)\n main_layout.addLayout(self.btn_layout)\n\n self.set_model()\n self.set_signals()\n\n def set_model(self):\n headers = [\"Assets\", \"Shader Version\"]\n model_data = list()\n assets = get_asset_names.get_asset_names()\n if not assets:\n return\n assets = [asset.name() for asset in assets]\n # {'assetName':'goushi','assetType':'asset','assetChildType':'character'}\n asset_dict = {\"char\": \"character\", \"prop\": \"prop\", \"env\": \"environment\"}\n for asset in assets:\n asset_child_type, asset_name, temp = asset.split(\":\")[-1].split(\"_\")\n arg_dict = {'assetName': asset_name, 'assetType': 'asset', 'assetChildType': asset_dict[asset_child_type]}\n shd_version = self.__db.getShadeVersion(arg_dict)\n model_data.append([asset, shd_version])\n self.model = AssetTableModel(model_data, headers)\n self.proxy_model = QtGui.QSortFilterProxyModel()\n self.proxy_model.setFilterKeyColumn(0)\n self.proxy_model.setDynamicSortFilter(True)\n self.proxy_model.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)\n self.proxy_model.setSourceModel(self.model)\n self.filter_le.textChanged.connect(self.set_filter)\n self.table_view.setModel(self.proxy_model)\n # delegate\n self.set_item_delegate()\n self.table_view.resizeColumnToContents(0)\n\n def set_item_delegate(self):\n image_delegate = ComboDelegate(self)\n self.table_view.setItemDelegateForColumn(1, image_delegate)\n self.show_item_delegate()\n\n def show_item_delegate(self):\n for i in xrange(self.proxy_model.rowCount()):\n self.table_view.openPersistentEditor(self.proxy_model.index(i, 1))\n\n def set_filter(self, value):\n self.proxy_model.setFilterRegExp(value)\n self.show_item_delegate()\n\n def set_signals(self):\n selection = self.table_view.selectionModel()\n if selection:\n selection.selectionChanged.connect(self.select_model)\n self.table_view.clicked.connect(self.select_model)\n self.update_btn.clicked.connect(self.set_model)\n self.assign_shader_btn.clicked.connect(self.assign_shader)\n self.assign_lambert_btn.clicked.connect(self.assign_lambert)\n\n def get_selected_assets(self):\n selected_indexes = self.table_view.selectedIndexes()\n if not selected_indexes:\n return\n selected_rows = list(set([self.proxy_model.mapToSource(i).row() for i in selected_indexes]))\n selected_assets = list()\n for row in selected_rows:\n asset_full_name = pm.PyNode(self.model.index(row, 0).data())\n shader_version = self.model.index(row, 1).data()\n shader_version = shader_version if isinstance(shader_version, basestring) else \"default\"\n selected_assets.append([asset_full_name, shader_version])\n return selected_assets\n\n def select_model(self):\n pm.select(clear=1)\n selected_assets = self.get_selected_assets()\n if not selected_assets:\n return\n for asset in selected_assets:\n pm.select(asset[0], add=1)\n\n def assign_shader(self):\n selected_assets = self.get_selected_assets()\n data_list = list()\n for asset in selected_assets:\n not_exist_list = assign_shader.assign_shader(*asset)\n data_list.append([asset[0].name(), not_exist_list])\n pop_dialog = PopDialog(data_list, self)\n pop_dialog.show()\n\n def assign_lambert(self):\n selected_assets = self.get_selected_assets()\n if not selected_assets:\n return\n for asset in selected_assets:\n pm.sets(\"initialShadingGroup\", fe=asset[0])\n self.message_box(\"Assign lambert done.\")\n\n @staticmethod\n def message_box(message):\n QtGui.QMessageBox.information(None, \"Warming Tip\", message)\n\n\ndef main():\n ass = AssignShader(get_maya_win.get_maya_win(\"PySide\"))\n ass.show()\n","sub_path":"miraScripts/pipeTools/shelf/assign_shader.py","file_name":"assign_shader.py","file_ext":"py","file_size_in_byte":10943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"591324917","text":"# ------@gROUP 18---------------#\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport numpy as np\r\n\r\nhtml_text = requests.get('https://kpkesihatan.com/').text\r\nlinkFirst = BeautifulSoup(html_text,'html.parser') #Get page source\r\nlink_First=linkFirst.find('h2',class_='posttitle').a['href']\r\n\r\nurl=link_First #Latest link covid\r\n\r\npage=requests.get(url).text\r\nsoup=BeautifulSoup(page,'html.parser')\r\n\r\nf = soup.findAll(\"figure\",{\"class\":\"wp-block-table\"})\r\nd= soup.findAll(\"h1\",{\"class\":\"title\"}) #Get specific code\r\nd=str(d)\r\nd1= d.split('Kenyataan Akhbar KPK ',1)[1] #ICU Cases\r\ndate= d1.split(' – Situasi',1)[0]\r\nt = f[1].find(\"table\")\r\n\r\ntable_rows=t.find_all(\"tr\")[1:] #First row\r\nnum_cases={}\r\nfor row in table_rows:\r\n td_symbol=row.find_all('td')[0].text #First column\r\n td_cases=row.find_all('td')[1].text #Second column\r\n td={}\r\n sep = '(' #Remove parentheses\r\n sep1= '\\xa0'\r\n sep2= ' '\r\n td_cases = td_cases.split(sep, -1)[0]\r\n td_symbol=td_symbol.split(sep1, -1)[0]\r\n td_symbol=td_symbol.replace(\"WP KUALA LUMPUR\", \"KL\")\r\n td_symbol=td_symbol.replace(\"WP PUTRAJAYA\", \"PUTRAJAYA\")\r\n td_symbol=td_symbol.replace(\"WP LABUAN\", \"LABUAN\")\r\n td_symbol=td_symbol.split(sep2, -1)[0]\r\n num_cases[td_symbol]=td_cases\r\n num_cases= num_cases\r\n\r\ndata = {\r\n \"Article Date\": date, #Store \"date\" in dict\r\n \"Selangor\": int(num_cases['SELANGOR'].replace(',' , '')), #Assigned int and remove \" , \"\r\n \"Sabah\": int(num_cases['SABAH'].replace(',' , '')),\r\n \"Johor\": int(num_cases['JOHOR'].replace(',' , '')),\r\n \"WP Kuala Lumpur\": int(num_cases['KL'].replace(',' , '')),\r\n \"Negeri Sembilan\": int(num_cases['NEGERI'].replace(',' , '')),\r\n \"Sarawak\": int(num_cases['SARAWAK'].replace(',' , '')),\r\n \"Pulau Pinang\": int(num_cases['PULAU'].replace(',' , '')),\r\n \"Perak\": int(num_cases['PERAK'].replace(',' , '')),\r\n \"Kedah\": int(num_cases['KEDAH'].replace(',' , '')),\r\n \"Melaka\": int(num_cases['MELAKA'].replace(',' , '')),\r\n \"Kelantan\": int(num_cases['KELANTAN'].replace(',' , '')), \r\n \"Pahang\": int(num_cases['PAHANG'].replace(',' , '')),\r\n \"Terengganu\": int(num_cases['TERENGGANU'].replace(',' , '')),\r\n \"WP Labuan\": int(num_cases['LABUAN'].replace(',' , '')),\r\n \"WP Putrajaya\": int(num_cases['PUTRAJAYA'].replace(',' , '')),\r\n \"Perlis\": int(num_cases['PERLIS'].replace(',' , '')),\r\n \"Jumlah Keseluruhan\": int(num_cases['JUMLAH'].replace(',' , '')),\r\n\r\n }\r\n\r\n###------------------------------------------------Upload to firebase-------------------------------###\r\nimport firebase_admin\r\nfrom firebase_admin import credentials\r\nfrom firebase_admin import firestore\r\nimport datetime\r\nfrom datetime import datetime\r\nimport pytz\r\n\r\ntz = pytz.timezone('Asia/Singapore')\r\ntime = datetime.now(tz).strftime('%Y-%m-%d %H:%M:%S')\r\n\r\ncred = credentials.Certificate(\"path.json\") ##Put your json file\r\nfirebase_admin.initialize_app(cred)\r\ndb=firestore.client()\r\n\r\ndb.collection(\"Current covid states cases\").document(\"Current\").set(data)\r\ndb.collection(\"Current covid states cases\").document(\"Current\").update({u'data':True})\r\n","sub_path":"web scrape/Current states cases.py","file_name":"Current states cases.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"424263028","text":"from django.contrib.auth import get_user_model\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom orders.models import Department, Order\nfrom multiselectfield import MultiSelectField\n\nUser = get_user_model()\n\nLEVEL = [ # define employee levels\n (0, 'Head of Department'),\n (1, 'Purchasing Manager'),\n (2, 'Financial Controller'),\n (3, 'General Manager')\n]\n\nLEVEL_TITLE = [x for x, y in LEVEL] # list of level titles only\n\nLEVEL_VIEW_ALL = [1, 2, 3, ] # list of levels to view all requests\n\nclass Profile(models.Model):\n user = models.OneToOneField(User, on_delete=models.SET('Retired'))\n sign_as = MultiSelectField(choices=LEVEL, default=None, null=True, blank=True)\n department = models.ForeignKey(\n Department,\n on_delete=models.SET('Retired'),\n related_name='profiles',\n blank=True, null=True,\n )\n\n def __str__(self):\n return f'{self.user} - Profile'\n\n @classmethod\n def create(profile, user, department=None):\n if department == None:\n if not Department.objects.all().exists():\n department = Department.objects.create(\n code='NA',\n name='Not assigned')\n profile = profile(user=user, department=department)\n return profile\n\n@receiver(post_save, sender=User)\ndef create_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)\n\n@receiver(post_save, sender=User)\ndef save_user_profile(sender, instance, **kwargs):\n instance.profile.save()\n\n\nclass Signature(models.Model):\n user = models.ForeignKey(\n User,\n on_delete=models.SET(None),\n related_name='signatures'\n )\n level = models.CharField(choices=LEVEL, max_length=5)\n order = models.ForeignKey(\n Order,\n on_delete=models.CASCADE,\n related_name='signatures'\n )\n approved = models.BooleanField(default=False, null=False)\n date_created = models.DateTimeField(auto_now_add=True, null=False)\n win_username = models.CharField(default=None, null=False, max_length=200)\n win_pcname = models.CharField(default=None, null=False, max_length=200)\n\n def __str__(self):\n order = self.order.id\n user = self.user\n level = LEVEL[int(self.level)][1]\n result = 'APPROVED' if self.approved else 'DECLINED'\n return (f'{order} - {user} as {level} - {result}')\n","sub_path":"users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"424668741","text":"import imutils\nimport dlib\nimport cv2\nimport numpy as np\nimport argparse\nfrom imutils import face_utils\nimport faceBlendCommon as fbc\nimport os\nimport tensorflow as tf, sys\n\n# 3D model points.\nmodel_points = np.array([\n (0.0, 0.0, 0.0), # Nose tip\n (0.0, -330.0, -65.0), # Chin\n (-225.0, 170.0, -135.0), # Left eye left corner\n (225.0, 170.0, -135.0), # Right eye right corne\n (-150.0, -150.0, -125.0), # Left Mouth corner\n (150.0, -150.0, -125.0) # Right mouth corner\n \n ])\nleftEye = [36, 37, 38, 39, 40, 41]\nrightEye = [42, 43, 44, 45, 46, 47]\nmouth = [48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59 ]\nleftBrows = [17, 18, 19, 20, 21]\nrightBrows = [22, 23, 24, 25, 26]\n\n# kernal size for morphological opening \nk = 5\n\ndef applyMask(skinImage, points):\n\n tempMask = np.ones((skinImage.shape[0], skinImage.shape[1]), dtype = np.uint8)\n \n temp = []\n for p in leftEye:\n temp.append(( points[p][0], points[p][1] ))\n\n cv2.fillConvexPoly(tempMask, np.int32(temp), 0, 16, 0)\n\n temp = []\n for p in rightEye:\n temp.append(( points[p][0],points[p][1] ))\n\n cv2.fillConvexPoly(tempMask, np.int32(temp), 0, 16, 0)\n\n temp = []\n for p in leftBrows:\n temp.append(( points[p][0],points[p][1] ))\n\n cv2.fillConvexPoly(tempMask, np.int32(temp), 0, 16, 0)\n\n temp = []\n for p in rightBrows:\n temp.append(( points[p][0],points[p][1] ))\n\n cv2.fillConvexPoly(tempMask, np.int32(temp), 0, 16, 0)\n\n temp = []\n for p in mouth:\n temp.append(( points[p][0],points[p][1] ))\n\n cv2.fillConvexPoly(tempMask, np.int32(temp), 0, 16, 0)\n\n return cv2.bitwise_and(skinImage, skinImage, mask = tempMask)\n\ndef findSkinYCB(meanimg, frame):\n\n # Specify the offset around the mean value\n CrOffset = 15\n CbOffset = 15\n YValOffset = 100\n \n # Convert to the YCrCb color space\n ycb = cv2.cvtColor(meanimg,cv2.COLOR_BGR2YCrCb)[0][0]\n frameYCB = cv2.cvtColor(frame,cv2.COLOR_BGR2YCrCb)\n\n # Find the range of pixel values to be taken as skin region\n minYCB = np.array([ycb[0] - YValOffset,ycb[1] - CrOffset, ycb[2] - CbOffset])\n maxYCB = np.array([ycb[0] + YValOffset,ycb[1] + CrOffset, ycb[2] + CbOffset])\n\n # Apply the range function to find the pixel values in the specific range\n skinRegionycb = cv2.inRange(frameYCB,minYCB,maxYCB)\n\n # Apply Gaussian blur to remove noise\n skinRegionycb = cv2.GaussianBlur(skinRegionycb, (5, 5), 0)\n\n # Get the kernel for performing morphological opening operation\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (k, k))\n skinRegionycb = cv2.morphologyEx(skinRegionycb, cv2.MORPH_OPEN, kernel, iterations = 3)\n #skinRegionycb = cv2.dilate(skinRegionycb, kernel, iterations=3)\n\n # Apply the mask to the image\n skinycb = cv2.bitwise_and(frame, frame, mask = skinRegionycb)\n return skinRegionycb,skinycb\n\ndef line_intersection(line1, line2):\n xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])\n ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1]) #Typo was here\n\n def det(a, b):\n return a[0] * b[1] - a[1] * b[0]\n\n div = det(xdiff, ydiff)\n if div == 0:\n raise Exception('lines do not intersect')\n\n d = (det(*line1), det(*line2))\n x = det(d, xdiff) / div\n y = det(d, ydiff) / div\n return x, y\ndef handconvex(bin_img,img):\n _,contours, hierarchy = cv2.findContours(bin_img,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n drawing = np.zeros(img.shape,np.uint8)\n max_area=0\n for i in range(len(contours)):\n cnt=contours[i]\n area = cv2.contourArea(cnt)\n if(area>max_area):\n max_area=area\n ci=i\n\n cnt=contours[ci]\n x,y,w,h = cv2.boundingRect(cnt)\n cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)\n hull = cv2.convexHull(cnt)\n moments = cv2.moments(cnt)\n if moments['m00']!=0:\n cx = int(moments['m10']/moments['m00']) # cx = M10/M00\n cy = int(moments['m01']/moments['m00']) # cy = M01/M00\n centr=(cx,cy) \n cv2.circle(img,centr,5,[0,0,255],2) \n cv2.drawContours(drawing,[cnt],0,(0,255,0),2) \n cv2.drawContours(drawing,[hull],0,(0,0,255),2) \n \n cnt = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True)\n hull = cv2.convexHull(cnt,returnPoints = False)\n\n return drawing,x,y,w,h\ndef classifyhand(image_data):\n os.environ['TF_CPP_MIN_LOG_LEVEL']='2'\n label_lines = [line.rstrip() for line \n in tf.gfile.GFile(\"retrained_labels_massay_mobile.txt\")]\n\n # Unpersists graph from file\n with tf.gfile.FastGFile(\"retrained_graph_massay_mobile.pb\", 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='') \n with tf.Session() as sess:\n # Feed the image_data as input to the graph and get first prediction\n softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')\n \n predictions = sess.run(softmax_tensor, \\\n {'DecodeJpeg:0': image_data})\n \n # Sort to show labels of first prediction in order of confidence\n top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]\n pf=[]\n pfs=[]\n for node_id in top_k:\n human_string = label_lines[node_id]\n pf.append(human_string)\n score = predictions[0][node_id]\n pfs.append(score)\n print('%s (score = %.5f)' % (human_string, score))\n print('-------------------------------------------')\n text=str(pf[0])\n #text=str(pf[0])+'(Confidance: '+str(pfs[0])+')' \n return text\ndef distance(x,y):\n import math\n return math.sqrt((x[0]-y[0])**2+(x[1]-y[1])**2) \n\n\n\n\n \n\nif __name__ == '__main__':\n\n \n print(\"[INFO] loading facial landmark predictor...\")\n #faceDetector = dlib.get_frontal_face_detector()\n detector = dlib.get_frontal_face_detector()\n # Load landmark detector.\n #landmarkDetector = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\n predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')\n print(\"[INFO] camera sensor warming up...\")\n\n camera = cv2.VideoCapture(0)\n\n # keep looping over the frames in the video\nfr=1;\nim=1;\nwhile True:\n # grab the current frame\n (grabbed, frame) = camera.read()\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n size = frame.shape\n #print(frame[479][848][0])\n cv2.circle(gray,(0,0),100,(0,0,255),-1)\n\n # Camera internals\n \n focal_length = size[1]\n center = (size[1]/2, size[0]/2)\n camera_matrix = np.array(\n [[focal_length, 0, center[0]],\n [0, focal_length, center[1]],\n [0, 0, 1]], dtype = \"double\"\n )\n\n # detect faces in the grayscale frame\n rects = detector(gray, 0)\n if len(rects) != 0:\n maxArea = 0\n maxRect = None\n # TODO: test on images with multiple faces\n for rect in rects:\n if rect.area() > maxArea:\n maxArea = rect.area()\n maxRect = [rect.left(),rect.top(),rect.right(),rect.bottom()]\n \n rect = dlib.rectangle(*maxRect)\n shape = predictor(gray, rect)\n \n landmarks = fbc.dlibLandmarksToPoints(shape)\n landmarks = np.array(landmarks)\n \n ix = landmarks[32][0]\n fx = landmarks[34][0]\n iy = landmarks[29][1]\n fy = landmarks[30][1]\n\n # Take a patch on the nose\n tempimg = frame[iy:fy,ix:fx,:]\n \n # Compute the mean image from the patch\n meanimg = np.uint8([[cv2.mean(tempimg)[:3]]])\n skinRegionycb,skinycb = findSkinYCB(meanimg, frame)\n\n\n maskedskinycb = applyMask(skinycb, landmarks)\n #cv2.putText(skinycb, \"YCrCb\", (50, 50), cv2.FONT_HERSHEY_COMPLEX, .9, (255,255,255), 1, cv2.LINE_AA)\n cv2.imshow('masked',maskedskinycb)\n cv2.imshow(\"YCrCb\",skinRegionycb)\n x1=int(landmarks[0][0])\n for (x, y) in landmarks:\n cv2.circle(frame, (x, y), 1, (0, 0, 255), -1)\n #cv2.circle(frame, (400,200),10, (255,0,0),-1)\n \n newframe=frame[:,0:x1]\n maskedframe=maskedskinycb[:,0:x1]\n handregion=skinRegionycb[:,0:x1]\n drawing,xh,yh,wh,hh=handconvex(handregion,newframe)\n #print(\"[INFO] Extracting the Hand region...\")\n cv2.imshow('output',drawing)\n #crphand=newframe[yh:yh+hh,xh:xh+wh]\n crphand=maskedframe[yh:yh+hh,xh:xh+wh]\n #cv2.imshow('crophand',crphand)\n \n\n ## Resizeing the image ##\n r = 90.0 / crphand.shape[1]\n dim = (90, int(crphand.shape[0] * r))\n # perform the actual resizing of the image and show it\n #resized = cv2.resize(crphand, dim, interpolation = cv2.INTER_AREA)\n resized = crphand\n #cv2.imshow(\"resized\", resized)\n #cv2.imshow(\"hand\",newframe)\n opdir='/finger/op_imgs/'\n img_files=os.listdir(opdir)\n len(img_files)\n if len(img_files)<2000:\n # #print(\"[INFO] Capturing the frames for classification...\")\n if fr==3:\n cv2.imwrite(opdir+\"hand-\" + str(im) + \".jpg\", resized)\n # #print(\"[INFO] classifying...\")\n # #text=classifyhand(resized)\n fr=1\n\n fr=fr+1\n\n\n \n font = cv2.FONT_HERSHEY_SIMPLEX\n \n # Display image\n im=im+1\n cv2.imshow(\"Output\", frame)\n key = cv2.waitKey(1) & 0xFF\n\n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n# cleanup the camera and close any open windows\n\ncamera.release()\ncv2.destroyAllWindows()\n","sub_path":"hand_extract.py","file_name":"hand_extract.py","file_ext":"py","file_size_in_byte":9759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"241123098","text":"from django.conf import settings\n\nimport django_filters\nfrom drf_yasg.utils import swagger_auto_schema\nfrom rest_framework import status\nfrom rest_framework.generics import ListAPIView, RetrieveAPIView\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom web3 import Web3\n\nfrom safe_transaction_service.version import __version__\n\nfrom .filters import DefaultPagination, MultisigTransactionFilter\nfrom .models import InternalTx, MultisigTransaction, SafeContract\nfrom .serializers import (IncomingTransactionResponseSerializer,\n SafeBalanceResponseSerializer,\n SafeMultisigTransactionResponseSerializer,\n SafeMultisigTransactionSerializer)\nfrom .services import BalanceServiceProvider\n\n\nclass AboutView(APIView):\n \"\"\"\n Returns info about the project.\n \"\"\"\n renderer_classes = (JSONRenderer,)\n\n def get(self, request, format=None):\n content = {\n 'name': 'Safe Transaction Service',\n 'version': __version__,\n 'api_version': self.request.version,\n 'secure': self.request.is_secure(),\n 'settings': {\n 'ETHEREUM_NODE_URL': settings.ETHEREUM_NODE_URL,\n 'ETHEREUM_TRACING_NODE_URL': settings.ETHEREUM_TRACING_NODE_URL,\n 'ETH_INTERNAL_TXS_BLOCK_PROCESS_LIMIT ': settings.ETH_INTERNAL_TXS_BLOCK_PROCESS_LIMIT,\n 'ETH_REORG_BLOCKS': settings.ETH_REORG_BLOCKS,\n }\n }\n return Response(content)\n\n\n@swagger_auto_schema(responses={200: 'Ok',\n 404: 'Not found'})\nclass SafeMultisigTransactionDetailView(RetrieveAPIView):\n serializer_class = SafeMultisigTransactionResponseSerializer\n lookup_field = 'safe_tx_hash'\n lookup_url_kwarg = 'tx_hash'\n\n def get_queryset(self):\n return MultisigTransaction.objects.prefetch_related(\n 'confirmations'\n ).select_related(\n 'ethereum_tx'\n )\n\n\nclass SafeMultisigTransactionListView(ListAPIView):\n pagination_class = DefaultPagination\n filter_backends = (django_filters.rest_framework.DjangoFilterBackend, )\n filterset_class = MultisigTransactionFilter\n\n def get_queryset(self):\n return MultisigTransaction.objects.filter(\n safe=self.kwargs['address']\n ).prefetch_related(\n 'confirmations'\n ).select_related(\n 'ethereum_tx'\n ).order_by(\n '-nonce',\n '-created'\n )\n\n def get_serializer_context(self):\n context = super().get_serializer_context()\n # TODO I think this is not useful anymore\n # Check if the 'owners' query parameter was passed in input\n query_owners = self.request.query_params.get('owners', None)\n if query_owners:\n context['owners'] = [owner for owner in query_owners.split(',') if owner != '']\n return context\n\n def get_serializer_class(self):\n \"\"\"\n Proxy returning a serializer class according to the request's verb.\n \"\"\"\n if self.request.method == 'GET':\n return SafeMultisigTransactionResponseSerializer\n elif self.request.method == 'POST':\n return SafeMultisigTransactionSerializer\n\n @swagger_auto_schema(responses={400: 'Invalid data',\n 404: 'Not found',\n 422: 'Invalid ethereum address'})\n def get(self, request, address, format=None):\n \"\"\"\n Returns the history of a multisig tx (safe)\n \"\"\"\n if not Web3.isChecksumAddress(address):\n return Response(status=status.HTTP_422_UNPROCESSABLE_ENTITY, data='Invalid ethereum address')\n\n response = super().get(request, address)\n if response.data['count'] == 0:\n response.status_code = status.HTTP_404_NOT_FOUND\n\n return response\n\n @swagger_auto_schema(responses={202: 'Accepted',\n 400: 'Invalid data',\n 422: 'Invalid ethereum address/User is not an owner or tx not approved/executed'})\n def post(self, request, address, format=None):\n \"\"\"\n Creates a Multisig Transaction with its confirmations and retrieves all the information related.\n \"\"\"\n if not Web3.isChecksumAddress(address):\n return Response(status=status.HTTP_422_UNPROCESSABLE_ENTITY, data='Invalid ethereum address')\n\n request.data['safe'] = address\n serializer = self.get_serializer_class()(data=request.data)\n\n if not serializer.is_valid():\n return Response(status=status.HTTP_422_UNPROCESSABLE_ENTITY, data=serializer.errors)\n else:\n serializer.save()\n\n # Create task if transaction hash\n # data = serializer.validated_data\n # transaction_hash = data.get('transaction_hash')\n # if transaction_hash:\n # check_approve_transaction_task.delay(safe_address=address,\n # safe_tx_hash=data['contract_transaction_hash'].hex(),\n # transaction_hash=transaction_hash.hex(),\n # owner=data['sender'])\n\n return Response(status=status.HTTP_202_ACCEPTED)\n\n\nclass SafeBalanceView(APIView):\n serializer_class = SafeBalanceResponseSerializer\n\n @swagger_auto_schema(responses={200: SafeBalanceResponseSerializer(many=True),\n 404: 'Safe not found',\n 422: 'Safe address checksum not valid'})\n def get(self, request, address, format=None):\n \"\"\"\n Get status of the safe\n \"\"\"\n if not Web3.isChecksumAddress(address):\n return Response(status=status.HTTP_422_UNPROCESSABLE_ENTITY)\n else:\n try:\n SafeContract.objects.get(address=address)\n except SafeContract.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n safe_balances = BalanceServiceProvider().get_balances(address)\n serializer = self.serializer_class(data=safe_balances, many=True)\n serializer.is_valid()\n return Response(status=status.HTTP_200_OK, data=serializer.data)\n\n\nclass SafeIncomingTxListView(ListAPIView):\n serializer_class = IncomingTransactionResponseSerializer\n\n def get_queryset(self):\n return InternalTx.objects.incoming_txs_with_events(self.kwargs['address'])\n\n @swagger_auto_schema(responses={200: IncomingTransactionResponseSerializer(many=True),\n 404: 'Txs not found',\n 422: 'Safe address checksum not valid'})\n def get(self, request, address, format=None):\n \"\"\"\n Returns the history of a multisig tx (safe)\n \"\"\"\n if not Web3.isChecksumAddress(address):\n return Response(status=status.HTTP_422_UNPROCESSABLE_ENTITY, data='Invalid ethereum address')\n\n response = super().get(request, address)\n if response.data['count'] == 0:\n response.status_code = status.HTTP_404_NOT_FOUND\n\n return response\n","sub_path":"safe_transaction_service/history/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"470430516","text":"import unittest\n\nimport numpy as np\nfrom pycompss.api.api import compss_wait_on\nfrom scipy.sparse import csr_matrix, issparse\nfrom sklearn.datasets import make_blobs\nfrom sklearn.preprocessing import StandardScaler as SKScaler\n\nimport dislib as ds\nfrom dislib.preprocessing import StandardScaler\n\n\nclass StandardScalerTest(unittest.TestCase):\n def test_fit_transform(self):\n \"\"\" Tests fit_transform against scikit-learn.\n \"\"\"\n n_samples = 1500\n x, y = make_blobs(n_samples=n_samples, random_state=170)\n transformation = [[0.6, -0.6], [-0.4, 0.8]]\n x = np.dot(x, transformation)\n ds_arr = ds.array(x, block_size=(300, 2))\n\n sc1 = SKScaler()\n scaled_x = sc1.fit_transform(x)\n sc2 = StandardScaler()\n ds_scaled = sc2.fit_transform(ds_arr)\n\n self.assertTrue(np.allclose(scaled_x, ds_scaled.collect()))\n self.assertTrue(np.allclose(sc1.mean_, sc2.mean_.collect()))\n self.assertTrue(np.allclose(sc1.var_, sc2.var_.collect()))\n self.assertEqual(ds_scaled._top_left_shape,\n ds_scaled._blocks[0][0].shape)\n self.assertEqual(ds_arr._reg_shape, ds_scaled._reg_shape)\n self.assertEqual(ds_arr._top_left_shape, ds_scaled._top_left_shape)\n self.assertEqual(ds_arr.shape, ds_scaled.shape)\n self.assertEqual(ds_arr._n_blocks, ds_scaled._n_blocks)\n\n def test_sparse(self):\n \"\"\" Tests fit_transforms with sparse data\"\"\"\n n_samples = 1500\n x, y = make_blobs(n_samples=n_samples, random_state=170)\n transformation = [[0.6, -0.6], [-0.4, 0.8]]\n x = np.dot(x, transformation)\n\n dense_arr = ds.array(x, block_size=(300, 2))\n sparse_arr = ds.array(csr_matrix(x), block_size=(300, 2))\n\n sc = StandardScaler()\n dense_scaled = sc.fit_transform(dense_arr)\n dense_mean = sc.mean_.collect()\n dense_var = sc.var_.collect()\n\n sparse_scaled = sc.fit_transform(sparse_arr)\n sparse_mean = sc.mean_.collect()\n sparse_var = sc.var_.collect()\n\n csr_scaled = sparse_scaled.collect()\n arr_scaled = dense_scaled.collect()\n\n self.assertTrue(issparse(csr_scaled))\n self.assertTrue(sparse_scaled._sparse)\n self.assertTrue(sc.var_._sparse)\n self.assertTrue(sc.mean_._sparse)\n self.assertTrue(issparse(sparse_mean))\n self.assertTrue(issparse(sparse_var))\n\n self.assertTrue(np.allclose(csr_scaled.toarray(), arr_scaled))\n self.assertTrue(np.allclose(sparse_mean.toarray(), dense_mean))\n self.assertTrue(np.allclose(sparse_var.toarray(), dense_var))\n\n def test_irregular(self):\n \"\"\" Test with an irregular array \"\"\"\n n_samples = 1500\n x, y = make_blobs(n_samples=n_samples, random_state=170)\n transformation = [[0.6, -0.6], [-0.4, 0.8]]\n x = np.dot(x, transformation)\n ds_arr = ds.array(x, block_size=(300, 2))\n ds_arr = ds_arr[297:602]\n x = x[297:602]\n\n sc1 = SKScaler()\n scaled_x = sc1.fit_transform(x)\n sc2 = StandardScaler()\n ds_scaled = sc2.fit_transform(ds_arr)\n\n self.assertTrue(np.allclose(scaled_x, ds_scaled.collect()))\n self.assertTrue(np.allclose(sc1.mean_, sc2.mean_.collect()))\n self.assertTrue(np.allclose(sc1.var_, sc2.var_.collect()))\n self.assertEqual(ds_scaled._top_left_shape,\n compss_wait_on(ds_scaled._blocks[0][0]).shape)\n self.assertEqual(ds_arr._reg_shape, ds_scaled._reg_shape)\n self.assertEqual(ds_arr._top_left_shape, ds_scaled._top_left_shape)\n self.assertEqual(ds_arr.shape, ds_scaled.shape)\n self.assertEqual(ds_arr._n_blocks, ds_scaled._n_blocks)\n\n\ndef main():\n unittest.main()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tests/test_preproc.py","file_name":"test_preproc.py","file_ext":"py","file_size_in_byte":3810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"604683237","text":"class TreeNode(object):\n def __init__(self, x, left=None, right=None):\n self.val = x\n self.left = left\n self.right = right\n\n\nclass Solution(object):\n def hasPathSum(self, root, target):\n \"\"\"\n :type root: TreeNode\n :type sum: int\n :rtype: bool\n \"\"\"\n if not root:\n return False\n\n if not root.left and not root.right and root.val == target:\n return True\n\n target -= root.val\n return self.hasPathSum(root.left, target) or self.hasPathSum(root.right,\n target)\n\n\ndef main():\n tree = TreeNode(5, left=TreeNode(4, left=TreeNode(11, left=TreeNode(7),\n right=TreeNode(2))),\n right=TreeNode(8, left=TreeNode(13),\n right=TreeNode(4, right=TreeNode(1))))\n solution = Solution()\n ret = solution.hasPathSum(tree, 18)\n print(ret)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"simple/112_path_sum.py","file_name":"112_path_sum.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"221546025","text":"#!/usr/bin/env python3\n\"\"\"\nSetup configuration for llvm--emulator.\n\nNote that due to naming issues with egg/pypi, the program is known as\n'llvm-minusminus-emulator' (without the 'quotes').\n\n\"\"\"\n\n\nfrom setuptools import setup\n\npackages = ['llvm_emulator']\n\nwith open('pypi_readme.rst') as f:\n long_description = f.read()\n\nwith open('{}/__about__.py'.format(packages[0])) as f:\n about = {}\n exec(f.read(), about)\n version = about['__version__']\n\nsetup(\n name='llvm-minusminus-emulator',\n version=version,\n description='A simple hacky emulator/debugger for LLVM--',\n long_description=long_description,\n url='https://gitlab.com/cfreksen/llvm--emulator',\n author='Casper Freksen',\n author_email='cfreksen@cs.au.dk',\n license='GPLv3+',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Topic :: System :: Emulators',\n 'Topic :: Software Development :: Debuggers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Environment :: Console'\n ],\n keywords='llvm debugger',\n packages=packages,\n include_package_data=True,\n install_requires=[\n 'ply >= 3'\n ],\n python_requires='~=3.5',\n package_data={},\n data_files=[],\n scripts=['bin/llvm--emulator']\n)\n","sub_path":"pypi_install_script/llvm-minusminus-emulator-1.1.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"563048847","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.7-x86_64/egg/airflow/www_rbac/validators.py\n# Compiled at: 2019-09-11 03:47:35\n# Size of source mod 2**32: 2109 bytes\nfrom wtforms.validators import EqualTo\nfrom wtforms.validators import ValidationError\n\nclass GreaterEqualThan(EqualTo):\n __doc__ = 'Compares the values of two fields.\\n\\n :param fieldname:\\n The name of the other field to compare to.\\n :param message:\\n Error message to raise in case of a validation error. Can be\\n interpolated with `%(other_label)s` and `%(other_name)s` to provide a\\n more helpful error.\\n '\n\n def __call__(self, form, field):\n try:\n other = form[self.fieldname]\n except KeyError:\n raise ValidationError(field.gettext(\"Invalid field name '%s'.\" % self.fieldname))\n\n if field.data is None or other.data is None:\n return\n if field.data < other.data:\n d = {'other_label':hasattr(other, 'label') and other.label.text or self.fieldname, \n 'other_name':self.fieldname}\n message = self.message\n if message is None:\n message = field.gettext('Field must be greater than or equal to %(other_label)s.' % d)\n else:\n message = message % d\n raise ValidationError(message)","sub_path":"pycfiles/apache_airflow_arup-1.10.5-py3.6/validators.cpython-36.py","file_name":"validators.cpython-36.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"123894047","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# adversarial_attack.py\n# @Author : yuanwenjin\n# @Mail : \n# @Date : 11/11/2019, 5:31:44 PM\n# @Docs :\n#\n\nimport os, sys\nimport numpy as np\nimport time\n\nsys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'slim'))\n\nfrom scipy.misc import imread, imresize, imsave\n\nimport tensorflow as tf\nfrom nets import nets_factory\n\ntf.flags.DEFINE_string(\n 'model_name', '', 'model name for mobile network.')\n\ntf.flags.DEFINE_string(\n 'checkpoint_path', '', 'Path to checkpoint for mobile network.')\n\ntf.flags.DEFINE_string(\n 'input_dir', './test_images', 'Input directory with images.')\n\ntf.flags.DEFINE_string(\n 'output_dir', './test_images_results', 'Output directory with images.')\n\ntf.flags.DEFINE_integer(\n 'image_width', 256, 'Width of each input images.')\n\ntf.flags.DEFINE_integer(\n 'image_height', 256, 'Height of each input images.')\n\ntf.flags.DEFINE_integer(\n 'image_resize', 240, 'Height of each input images, must be smaller than image_height')\n\ntf.flags.DEFINE_integer(\n 'batch_size', 1, 'How many images process at one time.')\n\ntf.flags.DEFINE_float(\n 'max_epsilon', 3.0, 'Maximum size of adversarial perturbation.')\n\ntf.flags.DEFINE_float(\n 'prob', 0.5, 'probability of using diverse inputs.')\n\n# if momentum = 1, this attack becomes M-DI-2-FGSM\ntf.flags.DEFINE_float(\n 'momentum', 0.0, 'Momentum.')\n\n# if iter = 0, this attack becomes FGSM\ntf.flags.DEFINE_bool(\n 'iter', 1, 'Iter.')\n\nFLAGS = tf.flags.FLAGS\n\nmodel_name = FLAGS.model_name\nnetwork_fn = nets_factory.get_network_fn(model_name, num_classes=(1001 - 0), is_training=False)\nmethod = 'IFGSM' if FLAGS.iter else 'FGSM'\nif FLAGS.momentum > 0:\n method = 'M' + method\nif FLAGS.prob > 0.0:\n method = 'DI-' + method\n\ndef load_images(input_dir, output_dir, batch_shape):\n '''\n ### Docs: 载入图像\n ### Args:\n - input_dir: str, 图像目录\n - output_dir: str, 保存目录\n - batch_shape: list, 输入模型size,[batch_size, height, width, 3]\n ### Returns:\n - filenames: list, 图像名,没有路径\n - images: array, 一个batch图像\n ### Examples:\n '''\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n temp_name = str.split(filepath, '/')\n output_name = output_dir + '/'+ temp_name[-1]\n # check if the file exist\n if os.path.isfile(output_name) == False:\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images\n\ndef save_images(images, filenames, output_dir):\n '''\n ### Docs: 保存图像\n ### Args:\n - images: array, 一个batch图像\n - filenames: list, 对应的图像名\n - output_dir: str, 保存路径\n ### Returns:\n ### Examples:\n '''\n\n for i, filename in enumerate(filenames):\n # Images for inception classifier are normalized to be in [-1, 1] interval,\n # so rescale them back to [-1, 1].\n with tf.gfile.Open(os.path.join(output_dir, filename[:-4]+'-'+model_name+'_'+method+'.png'), 'w') as f:\n imsave(f, (images[i, :, :, :] + 1.0) * 0.5 * 255, format='png')\n\ndef graph(x, y, i, x_max, x_min, grad):\n '''\n ### Docs: 计算图\n ### Args:\n - x: array, 一个batch图像\n\n ### Returns:\n ### Examples:\n '''\n eps = 2.0 * FLAGS.max_epsilon / 255.0\n num_classes = 1001\n momentum = FLAGS.momentum\n\n logits, _ = network_fn(input_diversity(x))\n # logits, _ = network_fn(x)\n pred = tf.argmax(logits, 1)\n\n # here is the way to stable gt lables\n first_round = tf.cast(tf.equal(i, 0), tf.int64)\n y = first_round * pred + (1 - first_round) * y\n \n one_hot = tf.one_hot(y, num_classes)\n cross_entropy = tf.losses.softmax_cross_entropy(one_hot, logits)\n\n # compute the gradient info \n noise = tf.gradients(cross_entropy, x)[0]\n noise = noise / tf.reduce_mean(tf.abs(noise), [1,2,3], keep_dims=True)\n # accumulate the gradient \n noise = momentum * grad + noise\n\n x = x + eps * tf.sign(noise)\n x = tf.clip_by_value(x, x_min, x_max)\n i = tf.add(i, 1)\n return x, y, i, x_max, x_min, noise\n\ndef stop(x, y, i, x_max, x_min, grad):\n '''\n ### Docs:\n ### Args:\n ### Returns:\n ### Examples:\n '''\n if FLAGS.iter:\n num_iter = int(min(FLAGS.max_epsilon+4, 1.25*FLAGS.max_epsilon))\n else:\n num_iter = 1\n return tf.less(i, num_iter)\n\ndef input_diversity(input_tensor):\n '''\n ### Docs:\n ### Args:\n ### Returns:\n ### Examples:\n '''\n rnd = tf.random_uniform((), FLAGS.image_resize, FLAGS.image_width, dtype=tf.int32)\n rescaled = tf.image.resize_images(input_tensor, [rnd, rnd], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n h_rem = FLAGS.image_width - rnd\n w_rem = FLAGS.image_width - rnd\n pad_top = tf.random_uniform((), 0, h_rem, dtype=tf.int32)\n pad_bottom = h_rem - pad_top\n pad_left = tf.random_uniform((), 0, w_rem, dtype=tf.int32)\n pad_right = w_rem - pad_left\n padded = tf.pad(rescaled, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]], constant_values=0.)\n padded.set_shape((input_tensor.shape[0], FLAGS.image_width, FLAGS.image_width, 3))\n return tf.cond(tf.random_uniform(shape=[1])[0] < tf.constant(FLAGS.prob), lambda: padded, lambda: input_tensor)\n\ndef main(_):\n\n eps = 2.0 * FLAGS.max_epsilon / 255.0\n batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]\n with tf.Graph().as_default():\n # Prepare graph\n x_input = tf.placeholder(tf.float32, shape=batch_shape)\n x_max = tf.clip_by_value(x_input + eps, -1.0, 1.0)\n x_min = tf.clip_by_value(x_input - eps, -1.0, 1.0)\n\n y = tf.constant(np.zeros([FLAGS.batch_size]), tf.int64)\n i = tf.constant(0)\n grad = tf.zeros(shape=batch_shape)\n x_adv, _, _, _, _, _ = tf.while_loop(stop, graph, [x_input, y, i, x_max, x_min, grad])\n # Run computation\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.restore(sess, FLAGS.checkpoint_path)\n for filenames, images in load_images(FLAGS.input_dir, FLAGS.output_dir, batch_shape):\n adv_images = sess.run(x_adv, feed_dict={x_input: images})\n save_images(adv_images, filenames, FLAGS.output_dir)\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"adv_attack/adversarial_attack.py","file_name":"adversarial_attack.py","file_ext":"py","file_size_in_byte":6927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"8193548","text":"import numpy as np\nimport time\nimport math\nimport torch\nfrom torch.nn import functional as F\n\nPAD_ID, SOS_ID, EOS_ID, UNK_ID = [0, 1, 2, 3]\n\ndef cos_np(data1,data2):\n \"\"\"numpy implementation of cosine similarity for matrix\"\"\"\n dotted = np.dot(data1,np.transpose(data2))\n norm1 = np.linalg.norm(data1,axis=1)\n norm2 = np.linalg.norm(data2,axis=1)\n matrix_vector_norms = np.multiply(norm1, norm2)\n neighbors = np.divide(dotted, matrix_vector_norms)\n return neighbors\n\ndef normalize(data):\n \"\"\"normalize matrix by rows\"\"\"\n normalized_data = data/np.linalg.norm(data,axis=1).reshape((data.shape[0], 1))\n return normalized_data\n\ndef dot_np(data1,data2):\n \"\"\"cosine similarity for normalized vectors\"\"\"\n return np.dot(data1,np.transpose(data2))\n\n#######################################################################\n\ndef asMinutes(s):\n m = math.floor(s / 60)\n s -= m * 60\n return '%d:%d'% (m, s)\n\ndef timeSince(since, percent):\n now = time.time()\n s = now - since\n es = s / (percent)\n rs = es - s\n return '%s<%s'%(asMinutes(s), asMinutes(rs))\n\n#######################################################################\n\ndef sent2indexes(sentence, vocab, max_len=None):\n '''sentence: a string or list of string\n return: a numpy array of word indices\n '''\n def convert_sent(sent, vocab):\n return np.array([vocab.get(word, UNK_ID) for word in sent.split()])\n if type(sentence) is list:\n indexes=[convert_sent(sent, vocab) for sent in sentence]\n sent_lens = [len(idxes) for idxes in indexes]\n if max_len is None:\n max_len = max(sent_lens)\n inds = np.zeros((len(sentence), max_len), dtype=np.int)\n for i, idxes in enumerate(indexes):\n inds[i,:len(idxes)]=indexes[i][:max_len]\n return inds\n else:\n return convert_sent(sentence, vocab)\n\n########################################################################\n","sub_path":"pytorch/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"472393842","text":"import time\nfrom typing import List, Dict\nfrom urllib.parse import urljoin\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom scihub.scihub import SciHub, CaptchaNeedException\n\nHEADERS = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:27.0) Gecko/20100101 Firefox/27.0'}\n\n\nclass JournalFetcher:\n\n def __init__(self):\n self.sess = requests.Session()\n self.sess.headers = HEADERS\n\n def _get_html(self, url: str):\n resp = self.sess.get(url)\n if resp.ok:\n print(resp.text)\n return resp.text\n else:\n print(\"Boo! {}\".format(resp.status_code))\n print(resp.text)\n\n def _get_soup(self, html: str) -> BeautifulSoup:\n \"\"\"\n Return html soup.\n \"\"\"\n return BeautifulSoup(html, 'html.parser')\n\n\nclass ScienceDirectJournalFetcher(JournalFetcher):\n\n def __init__(self):\n super().__init__()\n self.scihub = SciHub()\n\n def get_journal_issue_article_urls(self, url: str) -> List[Dict[str, str]]:\n html = self._get_html(url)\n\n # find all article links in the html\n soup = self._get_soup(html)\n\n journal_title = soup.select('#journal-title span')[0].string.lower().replace(' ', '-')\n\n volume_text = soup.select('.js-vol-issue')[0].string.lower().replace(' ', '-')\n\n articles_content = soup.select('.article-content')\n articles = []\n for ac in articles_content:\n article_anchor = ac.select('a.article-content-title')[0]\n article_url = urljoin(url, article_anchor.get('href'))\n\n # article_title = article_anchor.select('.js-article-title')[0]\n # article_title_text = article_title.string.lower().replace(' ', '-')\n\n article_page_range = ac.select('.js-article-page-range')[0]\n article_page_range_text = article_page_range.string.lower().replace(' ', '-')\n\n filename = f'{journal_title}-{volume_text}-{article_page_range_text}.pdf'\n\n articles.append({\n 'url': article_url,\n 'filename': filename\n })\n\n return articles\n\n def get_journal_issue(self,\n url: str,\n destination: str = None,\n return_val=False):\n articles_data = []\n # get all article links from the issue\n articles = self.get_journal_issue_article_urls(url)\n print(articles)\n articles_len = len(articles)\n process_num = 0\n\n # fetch and download all the articles via scihub\n for article in articles:\n try:\n process_num += 1\n print(f'Getting scihub article {process_num} of {articles_len} for {article}')\n scihub_article_data = self.scihub.download(article['url'],\n destination=destination,\n path=article['filename'])\n if 'err' in scihub_article_data:\n print(scihub_article_data['err'])\n else:\n articles_data.append(scihub_article_data)\n except CaptchaNeedException as e:\n print(str(e))\n except Exception as e:\n print(str(e))\n time.sleep(.25)\n\n print('Finished fetching issue')\n\n if return_val:\n return articles_data\n","sub_path":"scihub/journals.py","file_name":"journals.py","file_ext":"py","file_size_in_byte":3443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"488049182","text":"# encoding=utf8\r\n\r\nimport re, pprint, os, io, numpy\r\nimport nltk\r\nfrom bs4 import BeautifulSoup\r\nfrom sklearn.metrics.cluster import *\r\nfrom sklearn.cluster import AgglomerativeClustering\r\nfrom sklearn.metrics.cluster import adjusted_rand_score\r\nfrom nltk.stem import WordNetLemmatizer\r\nfrom nltk.corpus import wordnet\r\nimport string\r\nfrom nltk.corpus import stopwords\r\nfrom tikapp import TikaApp\r\nfrom Tika_reader import TikaReader\r\n\r\ndef cluster_texts(texts, clustersNumber, distance):\r\n #Load the list of texts into a TextCollection object.\r\n collection = nltk.TextCollection(texts)\r\n print(\"Created a collection of\", len(collection), \"terms.\")\r\n\r\n #get a list of unique terms\r\n unique_terms = list(set(collection))\r\n print(\"Unique terms found: \", len(unique_terms))\r\n\r\n ### And here we actually call the function and create our array of vectors.\r\n vectors = [numpy.array(TF(f,unique_terms, collection)) for f in texts]\r\n print(\"Vectors created.\")\r\n\r\n # initialize the clusterer\r\n clusterer = AgglomerativeClustering(n_clusters=clustersNumber,\r\n linkage=\"average\", affinity=distanceFunction)\r\n clusters = clusterer.fit_predict(vectors)\r\n\r\n return clusters\r\n\r\n# Function to create a TF vector for one document. For each of\r\n# our unique words, we have a feature which is the tf for that word\r\n# in the current document\r\ndef TF(document, unique_terms, collection):\r\n word_tf = []\r\n for word in unique_terms:\r\n word_tf.append(collection.tf_idf(word, document))\r\n return word_tf\r\n\r\ndef extract_entity_names(t):\r\n entity_names = []\r\n if hasattr(t, 'label') and t.label:\r\n if t.label() == 'NE':\r\n entity_names.append(' '.join([child[0] for child in t]))\r\n else:\r\n for child in t:\r\n entity_names.extend(extract_entity_names(child))\r\n return entity_names\r\n\r\ndef to_unicode(s):\r\n if type(s) is unicode:\r\n return s\r\n elif type(s) is str:\r\n d = chardet.detect(s)\r\n (cs, conf) = (d['encoding'], d['confidence'])\r\n if conf > 0.80:\r\n try:\r\n return s.decode( cs, errors = 'replace' )\r\n except Exception as ex:\r\n pass\r\n # force and return only ascii subset\r\n return unicode(''.join( [ i if ord(i) < 128 else ' ' for i in s ]))\r\n\r\nif __name__ == \"__main__\":\r\n folder = \"CorpusHTMLNoticiasPractica1819\"\r\n # Empty list to hold text documents.\r\n texts = []\r\n\r\n listing = sorted(os.listdir(folder))\r\n print(listing)\r\n for file in listing:\r\n if file.endswith(\".html\"):\r\n url = folder+\"/\"+file\r\n print(url)\r\n f = io.open(url,encoding=\"latin-1\")\r\n raw = f.read()\r\n f.close()\r\n #soup = BeautifulSoup(f, 'lxml')\r\n soup = BeautifulSoup(raw, 'lxml')\r\n text = \"\"\r\n for node in soup.findAll('p'):\r\n text = text + node.text\r\n print(\"Texto: \", text)\r\n sentences = nltk.sent_tokenize(text)\r\n tokenized_sentences = [nltk.word_tokenize(sentence) for sentence in sentences]\r\n\r\n ## Prueba 1. Lematizacion. Descomentar:\r\n ## INICIO PRUEBA 1\r\n # Seleccionamos el lematizador.\r\n wordnet_lemmatizer = WordNetLemmatizer()\r\n # Obtenemos los tokens de las sentencias.\r\n tagged_sentences = [nltk.pos_tag(sentence) for sentence in tokenized_sentences]\r\n # print(tagged_sentence)\r\n tokens = []\r\n def wordnet_value(value):\r\n result = ''\r\n # Filtramos las palabras y nos quedamos solo las que nos pueden interesar.\r\n # Estas son Adjetivos, Verbos, Sustantivos y Adverbios.\r\n if value.startswith('J'):\r\n return wordnet.ADJ\r\n elif value.startswith('V'):\r\n return wordnet.VERB\r\n elif value.startswith('N'):\r\n return wordnet.NOUN\r\n elif value.startswith('R'):\r\n return wordnet.ADV\r\n return result\r\n\r\n for sentence in tagged_sentences:\r\n for token in sentence:\r\n if token[0] is not None and len(token[0]) > 0:\r\n pos = wordnet_value(token[1])\r\n # Filtramos las palabras que no nos interesan.\r\n if pos != '':\r\n tokens.append(wordnet_lemmatizer.lemmatize(str(token[0]).lower(), pos=pos))\r\n print(\"Lemmas: \", tokens)\r\n print(\"Type of lemmas: \", type(tokens))\r\n ## FIN PRUEBA 1\r\n\r\n\r\n ## Prueba 2. Quitar puntuacion\r\n ## INICIO PRUEBA 2\r\n punctuation = string.punctuation\r\n tokens_wo_punctuation = []\r\n for t in tokens:\r\n if t not in punctuation:\r\n tokens_wo_punctuation.append(t)\r\n print(\"Wo punctuation: \", tokens_wo_punctuation)\r\n tokens = tokens_wo_punctuation\r\n ## FIN PRUEBA 2\r\n\r\n # ## Prueba 3. Quitar stopwords\r\n # ## INICIO PRUEBA 3\r\n # tokens_wo_stopwords = []\r\n # for t in tokens:\r\n # if t not in stopwords.words('english'):\r\n # tokens_wo_stopwords.append(t)\r\n # print(\"Wo stopwords: \", tokens_wo_stopwords)\r\n # tokens = tokens_wo_stopwords\r\n # ## FIN PRUEBA 3\r\n\r\n ## Prueba 3.a. Detectar idioma y quitar stoprwords para cada idioma\r\n ## INICIO PRUEBA 3.a\r\n processor = TikaReader(url)\r\n tokens_wo_stopwords = []\r\n if processor.detect_language() == \"en\":\r\n for t in tokens:\r\n if t not in stopwords.words('english'):\r\n tokens_wo_stopwords.append(t)\r\n elif processor.detect_language() == \"es\":\r\n for t in tokens:\r\n if t not in stopwords.words('spanish'):\r\n tokens_wo_stopwords.append(t)\r\n else:\r\n for t in tokens:\r\n if t not in stopwords.words('spanish'):\r\n tokens_wo_stopwords.append(t)\r\n print(\"Idioma: \", processor.detect_language())\r\n print(\"Wo stopwords: \", tokens_wo_stopwords)\r\n tokens = tokens_wo_stopwords\r\n # FIN PRUEBA 3.a\r\n\r\n ## Prueba 4. Chunker por defecto de NLTK - roconocedor de NE\r\n ## INICIO PRUEBA 4\r\n tagged_sentences = [nltk.pos_tag(sentence) for sentence in tokenized_sentences]\r\n print(\"Tagged: \", tagged_sentences)\r\n #universal_tagged_sentences = [nltk.pos_tag(sentence, tagset='universal') for sentence in\r\n #tokenized_sentences]\r\n chunked_sentences = nltk.ne_chunk_sents(tagged_sentences, binary=True)\r\n entity_names = []\r\n for tree in chunked_sentences:\r\n entity_names.extend(extract_entity_names(tree))\r\n print(\"NEs: \", entity_names)\r\n tokens = entity_names\r\n ## FIN PRUEBA 4\r\n\r\n text = nltk.Text(tokens)\r\n texts.append(text)\r\n\r\n print(\"Prepared \", len(texts), \" documents...\")\r\n print(\"They can be accessed using texts[0] - texts[\" + str(len(texts)-1) + \"]\")\r\n\r\n distanceFunction =\"cosine\"\r\n #distanceFunction = \"euclidean\"\r\n test = cluster_texts(texts,6,distanceFunction)\r\n print(\"test: \", test)\r\n # Gold Standard\r\n reference =[0, 1, 2, 2, 2, 3, 2, 2, 2, 4, 0, 0, 3, 3, 4, 2, 3, 0, 4, 4, 5, 1]\r\n print(\"reference: \", reference)\r\n\r\n # Evaluation\r\n print(\"rand_score: \", adjusted_rand_score(reference,test))","sub_path":"BasicNewsClustering_1.py","file_name":"BasicNewsClustering_1.py","file_ext":"py","file_size_in_byte":7832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"588577204","text":"import json\nfrom tabulate import tabulate\n\ndef load_json(filename):\n \"Load a JSON file and return its contents.\"\n with open(filename) as f:\n data = json.load(f)\n return data\n\nentries = [(\"MS COCO\", load_json('./Results/coco_results.json')),\n (\"Flickr30K\", load_json('./Results/flickr_results.json')),\n (\"Places\", load_json('./Results/places_results.json'))]\n\nrows = []\nfor name, data in entries:\n # Make smaller differences visible.\n data['words']['consciousness_permille'] = data['words']['consciousness_percent'] * 10\n data['words']['self_reference_permille'] = data['words']['self_reference_percent'] * 10\n data['words']['attributives_permille'] = data['words']['attributives_percent'] * 10\n \n row = [name,\n \"{:,d}\".format(data['lengths']['num_descriptions']),\n \"{:,d}\".format(data['lengths']['num_tokens']),\n \"{:.2f}\".format(data['lengths']['avg_token_length_syll']),\n \"{:.2f}\".format(data['lengths']['avg_token_length_char']),\n \"{:.2f}\".format(data['lengths']['avg_desc_length_syll']),\n \"{:.2f}\".format(data['lengths']['avg_desc_length_tok']),\n \"{:.2f}\".format(data['words']['attributives_per_description']),\n \"{:.2f}\".format(data['words']['attributives_permille']),\n \"{:.2f}\".format(data['words']['adverbs_per_description']),\n \"{:.2f}\".format(data['words']['adverbs_permille']),\n \"{:.2f}\".format(data['words']['prepositions_per_description']),\n \"{:.2f}\".format(data['words']['prepositions_permille']),]\n rows.append(row)\n\ntable = tabulate(rows,\n headers = ['Name', '#Desc', '#Tok', 'Syll', 'Char', 'Syll', 'Tok', 'Desc', 'PERM', 'Desc', 'PERM', 'Desc', 'PERM'],\n tablefmt = 'latex_booktabs')\n\nadditional_header = \"\"\"\\\\toprule\n& & & \\multicolumn{2}{c}{TokLen} &\\multicolumn{2}{c}{DescLen} & \\multicolumn{2}{c}{Attributives} & \\multicolumn{2}{c}{Adverbs} & \\multicolumn{2}{c}{Prepositions}\\\\\\\\\n\\cmidrule(lr){4-5}\\cmidrule(lr){6-7}\\cmidrule(lr){8-9}\\cmidrule(lr){10-11}\\cmidrule(lr){12-13}\"\"\"\ntable = table.replace('\\\\toprule', additional_header)\ntable = table.replace('Places','\\midrule\\nPlaces')\ntable = table.replace('0 &','0.00 &')\ntable = table.replace('1.3 &','1.30 &')\ntable = table.replace('10.5 &','10.50 &')\ntable = table.replace('12.2 &','12.20 &')\ntable = table.replace('{lllrrrrrrrrrr}','{lcccccccccccc}')\ntable = table.replace('PERM', '\\\\textperthousand')\n\n# Space savers:\n#table = table.replace('\\\\toprule','\\cmidrule[\\heavyrulewidth](lr){1-13}')\n#table = table.replace('\\midrule','\\cmidrule(lr){1-13}')\n#table = table.replace('\\\\bottomrule','\\cmidrule[\\heavyrulewidth](lr){1-13}')\nprint(table)\nprint()\nprint('Continued:')\nrows = []\nfor name, data in entries:\n # Necessary to cover both the parallel datasets (MS COCO & Flickr30K) and Places.\n try:\n # For the parallel datasets.\n msttr = data['msttr']['parallel']\n except TypeError:\n # For Places.\n msttr = data['msttr']\n \n row = [name,\n \"{:.2f}\".format(msttr),\n \"{:.2f}\".format(data['words']['consciousness_per_description']),\n \"{:.2f}\".format(data['words']['consciousness_permille']),\n \"{:.2f}\".format(data['words']['self_reference_per_description']),\n \"{:.2f}\".format(data['words']['self_reference_permille']),\n \"{:.2f}\".format(data['words']['pos_allness_per_description']),\n \"{:.2f}\".format(data['words']['pos_allness_permille']),\n \"{:.2f}\".format(data['words']['negations_per_description']),\n \"{:.2f}\".format(data['words']['negations_permille']),\n \"{:.2f}\".format(data['words']['pseudo_quantifiers_per_description']),\n \"{:.2f}\".format(data['words']['pseudo_quantifiers_permille'])\n # \"{:.2f}\".format(data['words']['numerals_per_description']), # Not significant according to DeVito\n # \"{:.2f}\".format(data['words']['numerals_permille']), # Not significant according to DeVito\n ]\n rows.append(row)\n\ntable = tabulate(rows,\n headers=['Name', 'MSTTR', 'Desc', 'PERM', 'Desc', 'PERM', 'Desc', 'PERM', 'Desc', 'PERM', 'Desc', 'PERM'],\n tablefmt='latex_booktabs')\n\nadditional_header = \"\"\"\\\\toprule\n & & \\multicolumn{2}{c}{Consciousness} & \\multicolumn{2}{c}{Self-reference} & \\multicolumn{2}{c}{Allness} & \\multicolumn{2}{c}{Negations} & \\multicolumn{2}{c}{PseudoQuant} \\\\\\\\\n\\cmidrule(lr){3-4} \\cmidrule(lr){5-6} \\cmidrule(lr){7-8} \\cmidrule(lr){9-10} \\cmidrule(lr){11-12}\n\"\"\"\ntable = table.replace('Places','\\midrule\\nPlaces')\ntable = table.replace('0 &', '0.00 &')\ntable = table.replace('1.3 &', '1.30 &')\ntable = table.replace('{lrrrrrrrrrrr}','{lccccccccccc}')\ntable = table.replace('\\\\toprule', additional_header)\ntable = table.replace('PERM', '\\\\textperthousand')\nprint(table)\n","sub_path":"English/generate_table.py","file_name":"generate_table.py","file_ext":"py","file_size_in_byte":4906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"42829698","text":"# Copyright (c) 2020 Dell Inc. or its subsidiaries.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"docs/source/programmers_guide_src/code/\nreplication-snapshot-policy-compliance.py\"\"\"\n\nimport PyU4V\n\n# Set up connection to Unisphere for PowerMax Server, details collected\n# from configuration file in working directory where script is stored.\nconn = PyU4V.U4VConn()\n\n# Create a snapshot policy\nsnapshot_policy_name = 'PyU4V_Compliance_Policy'\nconn.snapshot_policy.create_snapshot_policy(\n snapshot_policy_name=snapshot_policy_name, interval='1 Day',\n local_snapshot_policy_snapshot_count=5)\n\n# Get the snapshot policy\nsnapshot_policy_details = (\n conn.snapshot_policy.get_snapshot_policy(snapshot_policy_name))\n\n# Check that snapshot policy exists\nassert snapshot_policy_details and snapshot_policy_details.get(\n 'snapshot_policy_name')\n\n# Create storage Group with one volume and associate with snapshot\n# policy.\nstorage_group_name = 'PyU4V_compliance_SG'\nstorage_group = conn.provisioning.create_non_empty_storage_group(\n srp_id='SRP_1', storage_group_id=storage_group_name,\n service_level='Diamond', workload=None,\n num_vols=1, vol_size=1, cap_unit='GB',\n snapshot_policy_ids=[snapshot_policy_name])\n\n# Get the storage group\nstorage_group_details = conn.provisioning.get_storage_group(\n storage_group_name)\n\n# Check that storage group exists\nassert storage_group_details and storage_group_details.get('storageGroupId')\n\n# Get the compliance details\ncompliance_details = (\n conn.snapshot_policy.get_snapshot_policy_compliance_last_week(\n storage_group_name))\n\n# Check details have been return\nassert compliance_details\n\n# Disassociate from snapshot policy\nconn.snapshot_policy.modify_snapshot_policy(\n snapshot_policy_name, 'DisassociateFromStorageGroups',\n storage_group_names=[storage_group_name])\n\n# Delete the snapshot policy\nconn.snapshot_policy.delete_snapshot_policy(snapshot_policy_name)\n\n# Get volumes from the storage group\nvolume_list = (conn.provisioning.get_volumes_from_storage_group(\n storage_group_name))\n\n# Delete the storage group\nconn.provisioning.delete_storage_group(storage_group_id=storage_group_name)\n\n# Delete each volume from storage group\nfor volume in volume_list:\n conn.provisioning.delete_volume(volume)\n\n# Close the session\nconn.close_session()\n","sub_path":"examples/replication-snapshot-policy-compliance.py","file_name":"replication-snapshot-policy-compliance.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"624086343","text":"import unittest\nimport allure\nfrom tests_api.testHelper import User\n\n\nclass TestEditGender(unittest.TestCase):\n\n def setUp(self):\n self.id = \"e02dfd94-a8a9-4b1a-6cfc-08d7a28d1878\"\n self.name = \"Jesus\"\n self.gender = 2\n self.birthday = \"2001-06-04\"\n self.User = User(self.id, self.name, self.gender, self.birthday)\n self.base_gender = self.User.get_gender()\n\n @allure.severity(allure.severity_level.NORMAL)\n @allure.link(\n \"http://34.65.101.58:5002/admin/users?page=1\",\n name='Click me')\n def test_edit_g(self):\n with allure.step(\"Edit user gender\"):\n self.User.edit_gender(self.gender)\n self.assertEqual(\n self.User.get_gender(),\n self.gender,\n \"Gender has not been changed to:{}\".format(self.gender))\n\n def tearDown(self):\n with allure.step(\"Back user gender\"):\n self.User.edit_gender(self.base_gender)\n self.assertEqual(\n self.User.get_gender(),\n self.base_gender,\n \"Gender has not been changed to:{}\".format(self.base_gender))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests_api/adminSide/test_edit_gender.py","file_name":"test_edit_gender.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"60706471","text":"# 元类\n\n# type\nclass Person(object):\n def hello(self, name='zhangbo'):\n print('hello %s' % name)\n\n\np = Person()\n\nprint(type(p))\np.hello()\n\n\ndef fn(self, name='zhangbo'):\n print('hello %s' % name)\n\n\nHello = type('Hello', (object,), dict(hello=fn))\n\nprint(type(Hello))\nh = Hello()\nh.hello()\n\n# metaclass 看不明白\n\nclass ListMetaClass(type):\n def __new__(cls, name,base,attrs):\n attrs['add'] = lambda self,value : self.append(value)\n return type.__new__(cls,name,base,attrs)\n\nclass MyList(list,metaclass=ListMetaClass):\n pass\n\nl = MyList()\nl.add(1)\nprint(l)\n\n# orm框架\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"面向对象编程/metaClass.py","file_name":"metaClass.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"454471573","text":"# adapted from github.com/epw/pyirc\nimport os, sys\nimport imp\nimport socket, select\nimport shlex\nimport time\n\nfrom collections import deque\n\nimport pyirc.Message, pyirc.Plugin\n\ndef messageHandler(*typelist):\n\tdef func(f):\n\t\tf.msgtypes = typelist\n\t\treturn f\n\treturn func\n\nclass Bot:\n\tdef changeNick(self, nick):\n\t\tself.send('NICK {nick:s}'.format(nick=nick))\n\t\tself.nick = nick\n\t\treturn self\n\n\tdef join(self, *chans):\n\t\tfor chan in chans:\n\t\t\tself.joinq.append(chan)\n\t\treturn self\n\n\tdef part(self, *chans):\n\t\tfor chan in chans:\n\t\t\tself.send('PART {:s}'.format(chan))\n\t\t\tdel self.channels[chan]\n\t\treturn self\n\n\tdef quit(self, reason=None):\n\t\trtext = '' if reason is None else ':{:s}'.format(reason)\n\t\tself._sendImmediate('QUIT {:s}'.format(rtext).strip())\n\t\treturn self\n\n\tdef send(self, msg):\n\t\tself.mbfr.append(msg)\n\t\treturn self\n\n\tdef _sendImmediate(self, msg):\n\t\tif self.debug: print('\\r<-- {:s}'.format(msg))\n\t\tself.conn.send((msg + '\\r\\n').encode('utf8'))\n\n\tdef say(self, recp, msg):\n\t\tself.send('PRIVMSG {recp:s} :{msg:s}'.format(recp=recp, msg=msg))\n\t\tfor p in self.plugins:\n\t\t\tp.handleChat(recp, self.nick, msg)\n\t\treturn self\n\n\tdef sayTo(self, recp, target, msg):\n\t\tself.say(recp, '{:s}: {:s}'.format(target, msg))\n\t\treturn self\n\n\tdef sayAll(self, msg):\n\t\tfor chan in self.getChannels():\n\t\t\tself.say(chan, msg)\n\t\treturn self\n\n\tdef getChannels(self):\n\t\treturn self.channels.keys()\n\n\tdef __init__(self, nick, host, port=6667, passwd=None, real=None, debug=False):\n\t\tself.mbfr = deque()\n\t\tself.joinq = deque()\n\t\tself.debug = debug\n\n\t\tself.connected = False\n\t\tself.authenticated = False\n\n\t\tself.host, self.port, self.conn = host, port, None\n\t\twhile self.conn is None:\n\t\t\ttry:\n\t\t\t\tprint('Creating connection...')\n\t\t\t\tself.conn = socket.create_connection((host, port), 0.5)\n\t\t\texcept socket.timeout as e:\n\t\t\t\tself.conn = None\n\n\t\tself.conn.setblocking(0)\n\n\t\tself.real = real if real is not None else nick\n\t\tself.passwd = passwd\n\n\t\tself.send('PASS {passwd:s}'.format(passwd=passwd)).changeNick(nick)\n\t\tself.send('USER {nick:s} {host:s} * :{real:s}'.format(nick=nick, host=host, real=real))\n\n\t\tself.channels = {}\n\t\tself.plugins = set()\n\t\tself.registerMessageHandlers()\n\n\tdef registerMessageHandlers(self):\n\t\tself.handlers = {}\n\t\tfor func in dir(self):\n\t\t\tf = getattr(self, func)\n\t\t\tif hasattr(f, 'msgtypes'):\n\t\t\t\tfor t in f.msgtypes:\n\t\t\t\t\tself.handlers[t] = f\n\n\tdef authenticate(self):\n\t\tif self.passwd is not None:\n\t\t\tparams = {'nick': self.nick, 'passwd': self.passwd}\n\t\t\tif 'quakenet' in self.host:\n\t\t\t\tself.say('Q@CServe.quakenet.org', 'AUTH {nick:s} {passwd:s}'.format(**params))\n\t\t\telif 'gamesurge' in self.host:\n\t\t\t\tself.say('AuthServ@Services.GameSurge.net', 'AUTH {nick:s} {passwd:s}'.format(**params))\n\t\t\telse:\n\t\t\t\tself.say('nickserv', 'GHOST {nick:s} {passwd:s}'.format(**params))\n\t\tself.authenticated = True\n\t\treturn self\n\n\tdef loadPlugins(self, path):\n\t\tfor root, dirs, files in os.walk(path):\n\t\t\tfor name in files:\n\t\t\t\tif name.endswith('.py') and not name.startswith('__'):\n\t\t\t\t\tpath = os.path.relpath(os.path.join(root, name))\n\t\t\t\t\tmname = path[:-3].replace('/', '.')\n\n\t\t\t\t\tmod = __import__(mname).__dict__\n\t\t\t\t\tfor v in mname.split('.')[1:]:\n\t\t\t\t\t\tmod = mod[v].__dict__\n\t\t\t\t\tfor p in mod.values():\n\t\t\t\t\t\tif type(p) == type and issubclass(p, pyirc.Plugin.Plugin):\n\t\t\t\t\t\t\tself.addPlugin(p(self))\n\t\treturn self\n\n\tdef addPlugin(self, plugin):\n\t\tprint('* Loading {:s}'.format(plugin.getPluginName()))\n\t\tself.plugins.add(plugin)\n\n\tdef listen(self):\n\t\ttry:\n\t\t\twhile True:\n\t\t\t\t# if any messages are ready to be listened to, listen to them\n\t\t\t\trlist, wlist, xlist = select.select([self.conn], [], [], 0.1)\n\t\t\t\tfor r in rlist: self.read(r)\n\n\t\t\t\tif self.connected:\n\t\t\t\t\tif not self.authenticated:\n\t\t\t\t\t\tself.authenticate()\n\t\t\t\t\twhile len(self.joinq):\n\t\t\t\t\t\tchan = self.joinq.popleft()\n\t\t\t\t\n\t\t\t\t\t\tif chan not in self.channels.values():\n\t\t\t\t\t\t\tself.send('JOIN {chan:s}'.format(chan=chan))\n\t\t\t\t\t\t\tself.channels[chan] = Bot.Channel(chan)\n\n\t\t\t\t\tclock = time.time()\n\t\t\t\t\tfor p in self.plugins:\n\t\t\t\t\t\tif p.idledelay is not None and clock > p.idleclock + p.idledelay:\n\t\t\t\t\t\t\tp.idleclock = clock\n\t\t\t\t\t\t\tp.idle()\n\n\t\t\t\t# send queued messages\n\t\t\t\twhile len(self.mbfr):\n\t\t\t\t\tself._sendImmediate(self.mbfr.popleft())\n\n\t\t# don't make a bit fuss about exiting\n\t\texcept (KeyboardInterrupt, SystemExit) as e:\n\t\t\tself.quit(reason='Exiting')\n\n\tdef read(self, conn):\n\t\tmsg = ''\n\t\ttry:\n\t\t\twhile True: msg += conn.recv(1024).decode('utf8')\n\t\texcept Exception as e: pass\n\n\t\tfor m in msg.rstrip().split('\\n'):\n\t\t\tself.handle(pyirc.Message.Message(m))\n\t \n\tdef handle(self, msg):\n\t\tif self.debug: print('\\r--> {:s}'.format(msg))\n\n\t\tif msg.isPing():\n\t\t\tself._sendImmediate(msg.getPong())\n\t\telif msg.msgtype in self.handlers:\n\t\t\tbody = msg.getMessageText()\n\t\t\tchan, nick, subnet = msg.getDelivery()\n\n\t\t\t# call the appropriate message handler\n\t\t\tself.handlers[msg.msgtype](msg, body, chan, nick, subnet)\n\n\t@messageHandler('PRIVMSG')\n\tdef msg_PRIVMSG(self, msg, body, chan, nick, subnet):\n\t\tif body.startswith(\"\\u0001ACTION\") and body.endswith(\"\\u0001\"):\n\t\t\tbody = body[8:-1]\n\t\t\tfor p in self.plugins:\n\t\t\t\tp.handleAction(chan, nick, body)\n\t\telse:\n\t\t\tfor p in self.plugins:\n\t\t\t\tp.handleChat(chan, nick, body)\n\n\t\tif body.startswith(pyirc.Message.Message.commandChar):\n\t\t\tcmd, *args = shlex.split(body[1:])\n\t\t\tfor p in self.plugins:\n\t\t\t\tp.handleCommand(chan, nick, cmd, args)\n\n\t@messageHandler('JOIN')\n\tdef msg_JOIN(self, msg, body, chan, nick, subnet):\n\t\tif chan in self.channels:\n\t\t\tself.channels[chan].users[nick] = set() # TODO\n\t\tfor p in self.plugins:\n\t\t\tp.onChannelJoin(chan, nick)\n\n\t@messageHandler('PART')\n\tdef msg_PART(self, msg, body, chan, nick, subnet):\n\t\tif chan in self.channels:\n\t\t\tdel self.channels[chan].users[nick]\n\t\tfor p in self.plugins:\n\t\t\tp.onChannelPart(chan, nick)\n\n\t@messageHandler('QUIT')\n\tdef msg_QUIT(self, msg, body, chan, nick, subnet):\n\t\tfor p in self.plugins:\n\t\t\tp.onQuit(nick, reason=body)\n\n\t@messageHandler('NICK')\n\tdef msg_NICK(self, msg, body, chan, nick, subnet):\n\t\toldnick, newnick = nick, body\n\t\tprint('nick change', oldnick, newnick)\n\t\tfor p in self.plugins:\n\t\t\tp.onNickChange(oldnick, newnick)\n\t\tfor chan,cinfo in self.channels.items():\n\t\t\tif oldnick in cinfo.users:\n\t\t\t\tcinfo.users[newnick] = cinfo.users[oldnick]\n\t\t\t\tdel cinfo.users[oldnick]\n\n\t@messageHandler('KICK')\n\tdef msg_KICK(self, msg, body, chan, nick, subnet):\n\t\tfor p in self.plugins:\n\t\t\tp.onQuit(chan, msg.get(3), reason=body)\n\n\t@messageHandler('INVITE')\n\tdef msg_INVITE(self, msg, body, chan, nick, subnet):\n\t\tif self.nick == msg.get(2):\n\t\t\tfor p in self.plugins:\n\t\t\t\tp.onInvite(msg.get(3))\n\t\t\t\n\t@messageHandler('001')\n\tdef msg_connect(self, msg, body, chan, nick, subnet):\n\t\tprint('Connection established.')\n\t\tself._sendImmediate('MODE {:s} +x'.format(self.nick))\n\t\tself.connected = True\n\n\t@messageHandler('353')\n\tdef msg_names(self, msg, body, chan, nick, subnet):\n\t\tif chan not in self.channels: return\n\t\tchaninfo = self.channels[chan]\n\n\t\tfor name in body.strip().split():\n\t\t\tflags = set() # TODO\n\t\t\tname = name.lstrip('@+')\n\t\t\tchaninfo.users[name] = flags\n\n\tclass Channel:\n\t\tdef __init__(self, chan):\n\t\t\tself.name = chan\n\t\t\tself.users = dict()\n\n\t\tdef __hash__(self):\n\t\t\treturn hash(self.name)\n\t\t\n","sub_path":"pyirc/Bot.py","file_name":"Bot.py","file_ext":"py","file_size_in_byte":7174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"126363035","text":"import game_map\n\n\nclass Discs:\n\n def __init__(self, x, y, key, color, king=None):\n self.x = x\n self.y = y\n self.key = key\n self.color = color\n self.king = king\n # self._is_captured = False\n\n '''\n def move_char(self, x, y, letter):\n \"\"\"\n This is a function witch write for Character new coordinate x, y.\n :param x: coordinate x\n :param y: coordinate y\n :type x: int\n :type y: int\n \"\"\"\n self.x = x\n self.y = y\n self.key = letter\n\n return self._x, self._y\n '''\n\nclass Player:\n\n _initial_dsk = 12\n\n def __init__(self, name):\n\n self._name = name\n self._color = 'red'\n self._army_dsk = self._create_army_dsk()\n\n def __str__(self):\n return 'O'\n\n def _create_army_dsk(self):\n\n army = []\n\n for key, value in game_map.dict_all_black_cells.items():\n\n x, y = value\n if x > 4:\n dsk = Discs(x, y, key, self._color)\n army.append(dsk)\n\n return army\n\n def disc_in_army(self, charkey):\n \"\"\"\n This is a function witch check disc.\n\n \"\"\"\n is_in_army = False\n army = self._army_dsk\n for a in army:\n if charkey == a.key:\n is_in_army = True\n break\n return is_in_army\n\n '''def disc_indx(self, charkey):\n \"\"\"\n \"\"\"\n army = self._army_dsk\n indx = army.index(charkey)\n\n return indx\n '''\n def get_army_dsk(self):\n\n return self._army_dsk\n\n def move_desc(self, charkey, charkey_new, new_x, new_y):\n \"\"\"\n This is a function witch write new coordinate desc.\n\n \"\"\"\n army = self._army_dsk\n\n for count, item in enumerate(army):\n # print(count, item.key)\n if item.key == charkey:\n indx = count\n break\n self._army_dsk.pop(indx)\n if new_x == 0:\n dsk = Discs(new_x, new_y, charkey_new, self._color, 'o')\n else:\n dsk = Discs(new_x, new_y, charkey_new, self._color)\n army.append(dsk)\n return\n\n def show_stats(self):\n\n print('Player stats: ')\n print(f'Name: {self._name}')\n\n def kill_disc(self, charkey):\n \"\"\"\n This is a function witch kill disc.\n\n \"\"\"\n army = self._army_dsk\n\n for count, item in enumerate(army):\n # print(count, item.key)\n if item.key == charkey:\n indx = count\n break\n self._army_dsk.pop(indx)\n\n return\n\n\nclass Bot(Player):\n\n def __str__(self):\n return 'X'\n\n def __init__(self):\n self._name = 'Botichello'\n self._color = 'black'\n self._army_dsk = self._create_army_dsk()\n\n def _create_army_dsk(self):\n\n army = []\n\n for key, value in game_map.dict_all_black_cells.items():\n\n x, y = value\n if x < 3:\n dsk = Discs(x, y, key, self._color)\n army.append(dsk)\n\n return army\n\n def get_army(self):\n\n return self._army\n\n def get_name(self):\n\n return self._name\n\n def move_desc(self, charkey, charkey_new, new_x, new_y):\n \"\"\"\n This is a function witch write new coordinate disc.\n\n \"\"\"\n army = self._army_dsk\n\n for count, item in enumerate(army):\n # print(count, item.key)\n if item.key == charkey:\n indx = count\n break\n self._army_dsk.pop(indx)\n if new_x == 7:\n dsk = Discs(new_x, new_y, charkey_new, self._color, 'x')\n else:\n dsk = Discs(new_x, new_y, charkey_new, self._color)\n army.append(dsk)\n return\n\n def kill_disc(self, charkey):\n \"\"\"\n This is a function witch write new coordinate desc.\n\n \"\"\"\n army = self._army_dsk\n\n for count, item in enumerate(army):\n # print(count, item.key)\n if item.key == charkey:\n indx = count\n break\n self._army_dsk.pop(indx)\n\n return\n\n\nPLAYERS = [Bot, Player]\n\n","sub_path":"pivak_gennadii/07/players.py","file_name":"players.py","file_ext":"py","file_size_in_byte":4200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"145225488","text":"#!/usr/bin/python\n# -*- coding: iso-8859-2 -*-\n\n\nwhile True :\n x = input(\"Podaj liczbe rzeczywista: \")\n if(x == \"stop\") : break\n\n try :\n number = float(x)\n except ValueError:\n print(\"Podano cos innego niż liczbe! Sprobuj ponownie\")\n del x\n\n else :\n print(number, round(pow(number, 3), 4))\n del x\n","sub_path":"Zadania_3/3.4.py","file_name":"3.4.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"57402814","text":"import json\nimport threading\nfrom flask import Flask, jsonify, make_response, request\nfrom flask_limiter import Limiter\nfrom flask_limiter.util import get_remote_address\n\napp = Flask(__name__)\n\nlimiter = Limiter(\n app,\n key_func=get_remote_address,\n default_limits=[\"1200 per day\", \"50 per hour\"]\n)\n\nwith open(\"data.json\", \"r\", encoding=\"utf8\") as read_data:\n channels = json.load(read_data)\n\nlock = threading.Lock()\n\n\n@app.route(\"/\")\ndef index():\n \"\"\" Main route of the website. \"\"\"\n return \"Awesome YouTubers voting system website.\"\n\n\n@app.route(\"/channels/all\")\ndef list_channels():\n \"\"\" Lists all channels in the database. \"\"\"\n\n return jsonify(channels)\n\n\n@app.route(\"/channels/\")\ndef get_channel(channel):\n \"\"\"\n If no query specified, prints the name of the\n YouTube channel typed. When a query with the\n name \"vote\" is given, adds or substracts 1\n from the specified YouTube score.\n \"\"\"\n\n if \"vote\" in request.args:\n vote = str(request.args[\"vote\"])\n\n if channel in channels:\n # Adds/substracts 1 from the channel.\n if vote == \"upvote\":\n channels[channel] += 1\n elif vote == \"downvote\":\n channels[channel] -= 1\n else:\n return \"Vote word not recognised.\"\n\n # Write to database file.\n with lock:\n with open(\"data.json\", \"w\", encoding=\"utf8\") as write_data:\n json.dump(channels, write_data, indent=4)\n\n return f\"You {vote}d successfully the channel {channel}.\"\n else:\n return \"Channel not found on the list.\"\n else:\n if channel in channels:\n return \"Channel: \" + channel\n else:\n return \"Channel not found on the list.\"\n\n\n@app.route(\"/channels//image.svg\")\ndef img_channel(channel):\n \"\"\" Returns the YouTube score in a svg image. \"\"\"\n\n if channel in channels:\n svg_image = f\"\"\"\n \n \n \n \n \n {channels[channel]}\n \n \n \n \"\"\"\n\n response = make_response(svg_image)\n response.headers.set('Content-Type', 'image/svg+xml')\n return response\n else:\n return \"Channel not found on the list\"\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"src/voter/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"381503418","text":"# coding=utf-8\n\nimport os\nimport logbook\nfrom logbook import Logger,TimedRotatingFileHandler\nfrom logbook.more import ColorizedStderrHandler,StderrHandler\n\nlog_dir = os.path.join(\"../../logging/\")\n \nclass PyPLogger(object):\n\n def __init__(self, clazz):\n logbook.set_datetime_format(\"local\")\n self.serverName = clazz.__name__[clazz.__name__.rfind('.')+1:]\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n self.log_file = TimedRotatingFileHandler(\n os.path.join(log_dir, '%s.log' % self.serverName),date_format='%Y-%m-%d', bubble=True, encoding='utf-8')\n self.log_std = ColorizedStderrHandler(bubble=True)\n# self.log_std = StderrHandler(bubble=True)\n \n self.log = Logger(self.serverName)\n self.__init_logger()\n self.__setting()\n \n\n def log_type(self, record, handler):\n# log = \"[{date}]-[{level}]-[\" + self.serverName + \"] - {msg}\".format(\n log = \"[\"+self.serverName + \"]\" +\"-[{date}]-[{level}] - {msg}\".format(\n date = record.time, \n level = record.level_name, \n# filename = os.path.split(record.filename)[-1], \n# func_name = record.func_name,\n# lineno = record.lineno,\n msg = record.message \n )\n return log\n \n def __init_logger(self):\n logbook.set_datetime_format(\"local\")\n self.log.handlers = []\n self.log.handlers.append(self.log_file)\n self.log.handlers.append(self.log_std)\n \n def __setting(self): \n self.log_std.formatter = self.log_type\n self.log_file.formatter = self.log_type\n \n def info(self, *args, **kwargs):\n self.log.info(*args, **kwargs)\n \n def warn(self, *args, **kwargs):\n self.log.warn(*args, **kwargs)\n \n def error(self, *args, **kwargs):\n self.log.error(*args, **kwargs)\n\n ","sub_path":"fstp-python-3.6/src/core/log/PyPLogger.py","file_name":"PyPLogger.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"381032209","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, absolute_import\nimport os\nfrom datetime import timedelta\n\nfrom unipath import Path\nfrom celery.schedules import crontab\n\n\nBASE_DIR = Path(os.path.dirname(__file__)).parent.parent\n\nDEBUG = False\n\nSECRET_KEY = 'secret'\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'django.contrib.sites',\n 'django.contrib.sitemaps',\n\n 'cacheops',\n 'django_countries',\n 'compressor',\n 'raven.contrib.django.raven_compat',\n\n 'tracker',\n)\n\nMIDDLEWARE_CLASSES = (\n 'utils.middleware.ClientIpMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.contrib.sites.middleware.CurrentSiteMiddleware',\n)\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'django.core.context_processors.request',\n 'tracker.context_processors.current_view',\n ],\n },\n },\n]\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder',\n)\n\nSTATIC_URL = '/static/'\nMEDIA_URL = '/media/'\n\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nTIME_FORMAT = 'H:i'\n\nROOT_URLCONF = 'swat4tracker.urls'\nWSGI_APPLICATION = 'swat4tracker.wsgi.application'\n\nALLOWED_HOSTS = []\nINTERNAL_IPS = ()\n\nSITE_ID = 1\n\nADMINS = (\n ('Sergei', 'kh.sergei@gmail.com'),\n)\n\nMANAGERS = ADMINS\n\nEMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\nSERVER_EMAIL = 'django@swat4stats.com'\nDEFAULT_FROM_EMAIL = 'noreply@swat4stats.com'\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.dummy.DummyCache',\n },\n 'locmem': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n },\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'syslog': {\n 'format': 'swat4tracker.%(name)s: [%(levelname)s] %(asctime)s - %(filename)s:%(lineno)s - %(message)s'\n },\n 'simple': {\n 'format': '[%(levelname)s] %(asctime)s - %(filename)s:%(lineno)s - %(funcName)s() - %(message)s'\n },\n },\n}\n\nCACHEOPS = {\n 'tracker.*': ('just_enable', None),\n}\n\nCOMPRESS_ENABLED = True\nCOMPRESS_OUTPUT_DIR = ''\n\nCOMPRESS_CSS_FILTERS = (\n 'compressor.filters.css_default.CssAbsoluteFilter',\n 'compressor.filters.datauri.CssDataUriFilter',\n 'compressor.filters.cssmin.rCSSMinFilter',\n)\n\nCOMPRESS_PRECOMPILERS = (\n ('text/less', '{} {{infile}} {{outfile}}'.format(BASE_DIR.child('node_modules', 'less', 'bin', 'lessc'))),\n)\n\n# only data-encode small files\nCOMPRESS_DATA_URI_MAX_SIZE = 1024*5\n\nMARKDOWN_PROTECT_PREVIEW = True\n\n# celery\nBROKER_URL = 'redis://localhost/3'\nCELERY_RESULT_BACKEND = 'redis://localhost/4'\n\nCELERYBEAT_SCHEDULE = {\n # fetch new servers from various sources every 30 min\n 'update-servers': {\n 'task': 'tracker.tasks.update_server_list',\n 'schedule': timedelta(minutes=5),\n },\n # query servers for 90 seconds with an interval of 5 seconds\n 'query-servers': {\n 'task': 'tracker.tasks.query_listed_servers',\n 'schedule': timedelta(seconds=60),\n 'kwargs': {'time_delta': 60, 'interval': 5},\n },\n # update the profile popular fields (name, team, etc) every hour\n 'update-popular': {\n 'task': 'tracker.tasks.update_popular',\n 'schedule': crontab(minute='10'),\n 'kwargs': {'time_delta': timedelta(hours=2)},\n },\n # update profile ranks every 2 hours\n 'update-ranks': {\n 'task': 'tracker.tasks.update_ranks',\n 'schedule': crontab(minute='20', hour='*/2'),\n 'kwargs': {'time_delta': timedelta(hours=4)},\n },\n # update positions every 6 hours\n 'update-positions': {\n 'task': 'tracker.tasks.update_positions',\n 'schedule': crontab(minute='30', hour='*/6'),\n },\n # update past year positions on the new year's jan 1st 6 am\n 'update-ranks-past-year': {\n 'task': 'tracker.tasks.update_positions',\n 'schedule': crontab(minute='0', hour='6', day_of_month='1', month_of_year='1'),\n 'args': (-1,),\n },\n}\n\nCELERY_TASK_RESULT_EXPIRES = 60\nCELERY_TASK_SERIALIZER = 'pickle'\nCELERY_ACCEPT_CONTENT = ['pickle']\nCELERY_DEFAULT_QUEUE = 'default'\n# dont reserve tasks\nCELERYD_PREFETCH_MULTIPLIER = 1\nCELERYD_HIJACK_ROOT_LOGGER = False\n\nRAVEN_CONFIG = {}\n","sub_path":"swat4tracker/settings/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":5377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"252458830","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n# 文件名: batch_gradient_descent.py\n\nimport numpy as np\n__author__ = 'yasaka'\n#固定随机种子\nnp.random.seed(1)\n#创建模拟训练集\nX = 2 * np.random.rand(10000, 1)\ny = 4 + 3 * X + np.random.randn(10000, 1)\nX_b = np.c_[np.ones((10000, 1)), X]\n# print(X_b)\n\nlearning_rate = 0.1\nn_iterations = 500\n#有100条样本\nm = 10000\n\n#初始化θ\ntheta = np.random.randn(2, 1)\ncount = 0\n\n\nfor iteration in range(n_iterations):\n count += 1\n gradients = 1/m * X_b.T.dot(X_b.dot(theta)-y)\n theta = theta - learning_rate * gradients\n\nprint(count)\nprint(theta)\n\n\n\n\n\n\n\n\n\n","sub_path":"my_ML/01_batch_gradient_descent.py","file_name":"01_batch_gradient_descent.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"73795347","text":"# Python function to display all the prime numbers within an interval selected by user\n\n\ndef primes(lower,upper):\n\n print(\"Prime numbers between\", lower, \"and\", upper, \"are:\")\n \n for num in range(lower, upper + 1):\n # all prime numbers are greater than 1\n if num > 1:\n for i in range(2, num):\n if (num % i) == 0:\n break\n else:\n print(num)\n\n\nprint('Enter lower and upper bounds:')\nlower = int(input('LOWER> '))\nupper = int(input('UPPER> '))\n\nprimes(lower,upper)\n","sub_path":"prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"460666595","text":"import RPi.GPIO as GPIO\nimport time\n\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BOARD)\n\nfrom components.Gpio import Gpio\n\nlights = Gpio(16,'lights')\nwater = Gpio(14,'pump')\n\nlights.on()\nlights.off()\nwater.on()\nwater.off()\n\n\n","sub_path":"grower.py","file_name":"grower.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"29267718","text":"\n#!/bin/python\nimport os\nos.system(\"clear\")\nos.system(\"cls\")\n\nintro = input(\"\"\"\n\nWhat is the output of the following snippet?\n \n x = \"1\" -> STRING!\n\n if x == 1:\n one\n elif x == \"1\":\n if int(x) > 1:\n two\n elif int(x) < 1:\n three\n else:\n four\n if int(x) == 1:\n five\n else:\n six\n\n\nENTER to continue...\n\"\"\")\n\nx = \"1\"\n\nif x == 1:\n print(\"one\")\nelif x == \"1\":\n if int(x) > 1:\n print(\"two\")\n elif int(x) < 1:\n print(\"three\")\n else:\n print(\" four\")\nif int(x) == 1:\n print(\"five\")\nelse:\n print(\"six\")\n","sub_path":"IntroductionToPythonAndProgrammingBasic-Cisco-master/contents/programs/ex-5.py","file_name":"ex-5.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"103406215","text":"import numpy as np\nimport pandas as pd\nimport random\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport collections\n\n# Number of excitatory and inhibitory neurons\nN_E = 80\nN_I = 20\nn_neurons = N_E + N_I\nn_sessions = 6\ntotal_time = 5000\n\n# All the parameters from Supplementary table from the paper.\nW_EI = 0.44\nW_IE = 0.66\nW_II = 0.54\nW_EE = 0.37\nW_EI2 = 0.49\nW_IE2 = 0.65\nW_II2 = 0.53\n\nmu_EI = W_EI\nmu_IE = W_IE\nmu_II = W_II\nsigma_EI2 = W_EI2 - W_EI ** 2\nsigma_IE2 = W_IE2 - W_IE2 ** 2\nsigma_II2 = W_II2 - W_II ** 2\nsigma_EI = np.sqrt(sigma_EI2)\nsigma_IE = np.sqrt(sigma_IE2)\nsigma_II = np.sqrt(sigma_II2)\nprint(str(sigma_EI2) + '\\n' + str(sigma_IE2) + '\\n' + str(sigma_II2))\n\n# Extracting E->E connectivity from the spine imaging data\nc_EE = 0.2\npath = \"Global_Spines_info.csv\"\nspines_info = pd.read_csv(path)\nspines_info.drop('Unnamed: 0', axis=1, inplace=True)\nspines_info.head()\n\nspines_IS1 = spines_info.loc[spines_info['Starting Imaging Session'] == 1]\n# spines_IS1.head(100)\nS = spines_IS1['Volume'].mean()\ng = W_EE / S\nprint(g)\n\n\n# Connectivity matrix 8*8\n# EI\n# I*\ndef W_Construction():\n c_EE = 0.2\n c_EI = 0.4\n c_IE = 0.3\n c_II = 0.4\n W = np.zeros((n_neurons, n_neurons))\n c = np.zeros((n_neurons, n_neurons))\n\n # E->I connections\n for i in range(N_E):\n for j in range(N_E, n_neurons):\n if random.uniform(0, 1) <= c_EI:\n c[i, j] = 1\n W[i, j] = np.random.lognormal(mu_EI, sigma_EI)\n\n # I->E connections\n for i in range(N_E, n_neurons):\n for j in range(N_E):\n if random.uniform(0, 1) <= c_IE:\n c[i, j] = 1\n W[i, j] = np.random.lognormal(mu_IE, sigma_IE)\n\n # I->I connections\n for i in range(N_E, n_neurons):\n for j in range(N_E, n_neurons):\n if random.uniform(0, 1) <= c_II:\n c[i, j] = 1\n W[i, j] = np.random.lognormal(mu_II, sigma_II)\n\n # E->E connections\n for i in range(N_E):\n for j in range(N_E):\n if random.uniform(0, 1) <= c_EE:\n index = random.randint(1, 3688)\n W[i, j] = spines_info['Volume'].loc[spines_info['Global_SpineID'] == index].values[0] * g\n c[i, j] = 1\n else:\n W[i, j] = 0\n return W, c\n\n\nW = np.zeros((n_sessions, n_neurons, n_neurons))\nc = np.zeros((n_sessions, n_neurons, n_neurons))\nplt.figure(figsize=(24, 4))\nfor i in range(n_sessions):\n W[i], c[i] = W_Construction()\n plt.subplot(1, 6, i + 1)\n sns.heatmap(W[i], vmin=0, vmax=1.6, cmap='jet')\n# Recurrent input of neuron i\n\n# for i in range(n_neurons):\n# for j in range(N_E):\n# tsteps = 0\n# while (True):\n# tstep = random.randint(10, 50)\n# tsteps = tsteps + tstep\n# if tsteps < total_time:\n# h[i, tsteps] = h[i, tsteps] + c[0, i, j] * W[0, i, j]\n# else:\n# break\n# for j in range(N_E, n_neurons):\n# tsteps = 0\n# while (True):\n# tstep = random.randint(10, 50)\n# tsteps = tsteps + tstep\n# if tsteps < total_time:\n# h[i, tsteps] = h[i, tsteps] - c[0, i, j] * W[0, i, j]\n# else:\n# break\n\n\n# Potential of neuron\n\ntheta = 33\ntau_m = 10\nH_E = 77.6\nH_I = 57.8\nv_R = 24.75\nspike = 150\n# e_firing_rates_freq = {}\n# i_firing_rates_freq = {}\ne_rates = []\ni_rates = []\nh = np.zeros((n_neurons, total_time))\nr = np.zeros(n_neurons) # Recording the state of each neuron in the last timestep\nv = np.zeros((n_neurons, total_time))\nfor i in range(n_neurons):\n v[i, 0] = v_R\nt = range(total_time - 1)\nfor dt in t:\n # e_spikes = 0\n # i_spikes = 0\n # For excitatory neurons\n for i in range(N_E):\n for j in range(N_E):\n h[i, dt] = h[i, dt] + c[0, i, j] * W[0, i, j] * r[j]\n for j in range(N_E, n_neurons):\n h[i, dt] = h[i, dt] - c[0, i, j] * W[0, i, j] * r[j]\n if v[i, dt] == spike:\n v[i, dt + 1] = v_R\n else:\n v[i, dt + 1] = v[i, dt] - v[i, dt] / tau_m + h[i, dt] + H_E / tau_m\n if v[i, dt + 1] >= theta:\n v[i, dt + 1] = spike\n r[i] = 1\n else:\n r[i] = 0\n # e_spikes += 1\n # e_firing_rate = e_spikes / N_E\n # if e_firing_rate in e_firing_rates_freq:\n # e_firing_rates_freq[e_firing_rate] += 1\n # else:\n # e_firing_rates_freq[e_firing_rate] = 1\n\n # For inhibitory neurons\n for i in range(N_E, n_neurons):\n for j in range(N_E):\n h[i, dt] = h[i, dt] + c[0, i, j] * W[0, i, j] * r[j]\n for j in range(N_E, n_neurons):\n h[i, dt] = h[i, dt] - c[0, i, j] * W[0, i, j] * r[j]\n if v[i, dt] == spike:\n v[i, dt + 1] = v_R\n else:\n v[i, dt + 1] = v[i, dt] - v[i, dt] / tau_m + h[i, dt] + H_I / tau_m\n if v[i, dt + 1] >= theta:\n v[i, dt + 1] = spike\n r[i] = 1\n else:\n r[i] = 0\n # i_spikes += 1\n\n # i_firing_rate = i_spikes / N_I\n # if i_firing_rate in i_firing_rates_freq:\n # i_firing_rates_freq[i_firing_rate] += 1\n # else:\n # i_firing_rates_freq[i_firing_rate] = 1\n\n# Plot the spiking of an excitatory and inhibitory neuron\nplt.figure()\nplt.plot(range(total_time), v[0], 'b')\nplt.show()\nplt.figure()\nplt.plot(range(total_time), v[N_E], 'r')\nplt.show()\n\n# # Plot the firing rates for excitatory an inhibitory neurons\n# e_firing_rates_freq = collections.OrderedDict(sorted((float(x), y) for x, y in e_firing_rates_freq.items()))\n# i_firing_rates_freq = collections.OrderedDict(sorted((float(x), y) for x, y in i_firing_rates_freq.items()))\n\n# plt.xlim(0, max(e_firing_rates_freq.keys()))\n# plt.stem(e_firing_rates_freq.keys(), e_firing_rates_freq.values())\n# plt.show()\n# plt.xlim(0, max(i_firing_rates_freq.keys()))\n# plt.stem(i_firing_rates_freq.keys(), i_firing_rates_freq.values(), 'r')\n# plt.show()\n\nfor i in range(n_neurons):\n arr, count = np.unique(v[i], return_counts=True)\n frequency = dict(zip(arr, count))\n if i < N_E:\n e_rates.append(frequency[spike] / total_time * 1000)\n else:\n i_rates.append(frequency[spike] / total_time * 1000)\n\nplt.figure()\nplt.xlim(0, max(e_rates))\nplt.hist(e_rates, bins=90, density=True)\nplt.show()\nplt.figure()\nplt.xlim(0, max(i_rates))\nplt.hist(i_rates, bins=90, density=True)\nplt.show()\nplt.figure()\ne_rates_log = np.log(e_rates)\nplt.xlim(min(e_rates_log), max(e_rates_log))\nplt.hist(e_rates_log, bins=90, density=True)\nplt.show()\nplt.figure()\ni_rates_log = np.log(i_rates)\nplt.xlim(min(i_rates_log), max(i_rates_log))\nplt.hist(i_rates_log, bins=90, density=True)\nplt.show()\n\nfor i in range(n_neurons):\n arr, count = np.unique(v[i], return_counts=True)\n frequency = dict(zip(arr, count))\n if i < N_E:\n e_rates.append(frequency[spike] / total_time)\n else:\n i_rates.append(frequency[spike] / total_time)\n\nplt.xlim(0, max(e_rates))\nplt.hist(e_rates)\nplt.show()\nplt.xlim(0, max(i_rates))\nplt.hist(i_rates)\nplt.show()\n","sub_path":"simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":7124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"571476327","text":"def append_to_str(s1, s2, length, filer=' '):\n length -= len(s1)\n s1 += filer*length\n s1 += ' | ' + str(s2)\n return s1\n\n\ndef resize_list(count):\n l = []\n for i in range(count):\n l.append('')\n return l\n\n\ndef print_table(table, line_count, colon_count):\n if type(table) is not list:\n print('ERROR 1')\n return\n if len(table) == 0:\n print('ERROR 2')\n return\n print()\n lines = resize_list(line_count)\n length1 = 0\n length2 = 0\n for colon in range(colon_count):\n for line in range(line_count):\n if type(table[line]) is not list:\n lines[line] = append_to_str(lines[line],'',length1,'-')\n else:\n lines[line] = append_to_str(lines[line], table[line][colon], length1)\n length2 = max(len(lines[line]), length2)\n length1 = length2\n for line in lines:\n print(line)\n","sub_path":"Table_printer.py","file_name":"Table_printer.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"378650083","text":"#!/usr/bin/env python\n\"\"\"This is a script for initializing the PUDL database locally.\"\"\"\n\nimport sys\nimport argparse\nimport pudl\nimport pudl.constants as pc\nfrom pudl.settings import SETTINGS\nimport pudl.models.glue\nimport pudl.models.eia860\nimport pudl.models.eia923\nimport pudl.models.entities\nimport pudl.models.ferc1\n\n# require modern python\nif not sys.version_info >= (3, 6):\n raise AssertionError(\n f\"PUDL requires Python 3.6 or later. {sys.version_info} found.\"\n )\n\n\ndef parse_command_line(argv):\n \"\"\"\n Parse command line arguments. See the -h option.\n\n :param argv: arguments on the command line must include caller file name.\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--settings_file', dest='settings_file',\n type=str, help=\"Specify a YAML settings file.\",\n default='settings.yml')\n arguments = parser.parse_args(argv[1:])\n return arguments\n\n\ndef main():\n \"\"\"The main function.\"\"\"\n\n args = parse_command_line(sys.argv)\n settings_init = pudl.settings.settings_init(\n settings_file=args.settings_file)\n\n pudl.init.verify_input_files(\n ferc1_years=settings_init['ferc1_years'],\n eia923_years=settings_init['eia923_years'],\n eia860_years=settings_init['eia860_years'],\n epacems_years=settings_init['epacems_years'],\n epacems_states=settings_init['epacems_states']\n )\n\n pudl.extract.ferc1.init_db(ferc1_tables=pc.ferc1_default_tables,\n refyear=settings_init['ferc1_ref_year'],\n years=settings_init['ferc1_years'],\n def_db=True,\n verbose=settings_init['verbose'],\n testing=settings_init['ferc1_testing'])\n\n pudl.init.init_db(ferc1_tables=settings_init['ferc1_tables'],\n ferc1_years=settings_init['ferc1_years'],\n eia923_tables=settings_init['eia923_tables'],\n eia923_years=settings_init['eia923_years'],\n eia860_tables=settings_init['eia860_tables'],\n eia860_years=settings_init['eia860_years'],\n epacems_years=settings_init['epacems_years'],\n epacems_states=settings_init['epacems_states'],\n verbose=settings_init['verbose'],\n debug=settings_init['debug'],\n pudl_testing=settings_init['pudl_testing'],\n ferc1_testing=settings_init['ferc1_testing'],\n csvdir=SETTINGS['csvdir'],\n keep_csv=settings_init['keep_csv'])\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"scripts/init_pudl.py","file_name":"init_pudl.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"71765872","text":"#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom setuptools import setup, find_packages\n\nwith open(\"README.md\", \"rt\") as f:\n long_description = f.read()\n\nwith open(\"requirements.txt\", \"rt\") as f:\n requirements = f.readlines()\n\nsetup(\n name=\"torchbiggraph\",\n version=\"1.dev\",\n description=\"A distributed system to learn embeddings of large graphs\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/facebookresearch/PyTorch-BigGraph\",\n author=\"Facebook AI Research\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n keywords=\"machine-learning knowledge-base graph-embedding link-prediction\",\n packages=find_packages(exclude=[\"docs\", \"tests\"]),\n install_requires=requirements,\n entry_points={\n \"console_scripts\": [\n \"torchbiggraph_config=torchbiggraph.config:main\",\n \"torchbiggraph_eval=torchbiggraph.eval:main\",\n \"torchbiggraph_export_to_tsv=torchbiggraph.converters.export_to_tsv:main\",\n \"torchbiggraph_import_from_tsv=torchbiggraph.converters.import_from_tsv:main\",\n \"torchbiggraph_partitionserver=torchbiggraph.partitionserver:main\",\n \"torchbiggraph_train=torchbiggraph.train:main\",\n ],\n },\n project_urls={\n \"Bug Reports\": \"https://github.com/facebookresearch/PyTorch-BigGraph/issues\",\n \"Source\": \"https://github.com/facebookresearch/PyTorch-BigGraph\",\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"14759364","text":"import pandas as pd\r\nfrom pygerrit2 import GerritRestAPI, Anonymous\r\nimport urllib\r\nimport urllib.request\r\nimport json\r\nfrom threading import Lock,Thread\r\nfrom concurrent import futures\r\nimport time\r\nimport os\r\n\r\nclass MultithreadReviewCrawler : \r\n def __init__(self,rest_api,base_url,base_dir,max_thread_number,queries_per_thread,start_position,span) : \r\n\r\n self.rest_api = rest_api \r\n self.base_dir = base_dir\r\n self.max_thread_number = max_thread_number \r\n self.queries_per_thread = queries_per_thread\r\n self.span = span\r\n self.finished = False \r\n self.final_position = -1 \r\n self.base_url = base_url \r\n self.start_position = start_position \r\n self.reviews_problems = []\r\n\r\n\r\n def crawl_job(self,tid,start_position) : \r\n end_position = start_position + self.queries_per_thread\r\n print(str(tid) + \":starting from \" + str(start_position) + \" to \" + str(end_position))\r\n for current_pos in range(start_position,start_position + self.queries_per_thread,self.span) : \r\n #do the query \r\n collected_changes = self.run_attempts(10,current_pos)\r\n if collected_changes == None : \r\n if tid == self.max_thread_number-1 : \r\n #TODO : recheck final condition\r\n self.finished = True \r\n self.final_position = current_pos\r\n print(\"Crawling is done\")\r\n return \r\n print(\"could not solve for : \" + str(current_pos) + ' - ' +str(current_pos + self.span))\r\n self.reviews_problems.append([current_pos,current_pos + self.span])\r\n continue \r\n self.save_changes(collected_changes,current_pos)\r\n \r\n\r\n\r\n print(str(tid) + \": from \" + str(start_position) + \" to \" + str(end_position) + ' finished')\r\n \r\n def run_attempts(self,attemps_number,current_pos) : \r\n timeout = 120\r\n for attempt_count in range(attemps_number) :\r\n try : \r\n collected_changes = self.rest_api.get(self.base_url +'&n='+str(self.span)+ '&S='+ str(current_pos) ,timeout =timeout)\r\n except: \r\n print('problem occured')\r\n timeout += 120\r\n continue\r\n print(\"total success!\")\r\n if len(collected_changes) != 0 :\r\n return collected_changes\r\n else : \r\n print('another attempt!')\r\n return None \r\n\r\n def save_changes(self,collected_changes,current_pos) : \r\n with open(os.path.join(self.base_dir,str(current_pos) + '_' + str(current_pos + self.span) + '.json'),'w') as query_file : \r\n json.dump(collected_changes,query_file)\r\n\r\n def run(self) : \r\n current_start_pos = self.start_position\r\n while not(self.finished) :\r\n pool = futures.ThreadPoolExecutor(max_workers=self.max_thread_number)\r\n waiting_for = []\r\n for tid in range(self.max_thread_number) : \r\n waiting_for.append(pool.submit(self.crawl_job,tid,current_start_pos))\r\n current_start_pos += self.queries_per_thread\r\n for completed in futures.as_completed(waiting_for) : \r\n continue \r\n time.sleep(10)\r\n return self.final_position,self.reviews_problems \r\nclass CrawlerLuncher : \r\n def __init__(self,project_url,base_dir,crawl_config=None ) : \r\n\r\n self.project_url = project_url \r\n\r\n self.base_dir = base_dir \r\n self.project_exsist = False \r\n self.default_config = {'current_position' : 0,'queries_per_thread' : 500,'span' : 100,'threads_number' : 4 ,'query' : '/changes/?o=ALL_REVISIONS&o=ALL_FILES&o=ALL_COMMITS&o=MESSAGES&o=DETAILED_ACCOUNTS&o=DETAILED_LABELS'}\r\n self.config = self.default_config \r\n os.makedirs(os.path.join(base_dir,'data'),exist_ok=True)\r\n if 'config.json' in os.listdir(self.base_dir) : \r\n self.project_exsist = True \r\n self.config = json.load(open(os.path.join(base_dir,'config.json')))\r\n current_index = self.recover()\r\n self.config['current_position'] = current_index\r\n else : \r\n if crawl_config != None : \r\n for config in crawl_config : \r\n self.config[config] = crawl_config[config]\r\n with open(os.path.join(self.base_dir,'config.json'), 'w') as config_file : \r\n json.dump(self.config,config_file)\r\n \r\n auth = Anonymous()\r\n self.rest = GerritRestAPI(url=self.project_url, auth=auth)\r\n self.crawler = MultithreadReviewCrawler(rest_api=self.rest,base_dir=self.base_dir+'/data',base_url=self.config['query'],max_thread_number=self.config['threads_number'],queries_per_thread=self.config['queries_per_thread'],start_position=self.config['current_position'],span=self.config['span'])\r\n def run_crawling(self) : \r\n last_position,not_querried = self.crawler.run()\r\n self.config['unquerried reviews'] = [{'start' : x[0],'end : ' : x[1]} for x in not_querried]\r\n self.config['current_position'] = last_position\r\n with open(os.path.join(self.base_dir,'config.json'), 'w') as config_file : \r\n json.dump(self.config,config_file)\r\n def recover(self) :\r\n current_pos = 0\r\n for file_name in os.listdir(os.path.join(self.base_dir,'data')) : \r\n if '.json' in file_name : \r\n second_index = int(file_name.split('_')[1].split('.')[0])\r\n if current_pos < second_index : \r\n current_pos = second_index\r\n return current_pos \r\n","sub_path":"CrawlRawData.py","file_name":"CrawlRawData.py","file_ext":"py","file_size_in_byte":5252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"580482978","text":"# -*- coding: utf8 -*-\nfrom flask import Flask, render_template, Blueprint\nfrom helpers import jinja_template_loader, _read_file\nfrom filters import FILTERS\nfrom globals import GLOBALS\n\napp = Flask(__name__, static_folder='../static', template_folder='..')\napp.jinja_env.filters.update(FILTERS)\napp.jinja_env.globals.update(GLOBALS)\napp.jinja_loader = jinja_template_loader\n\narticle = Blueprint('article', __name__, template_folder='templates')\n@article.route('/article//')\n@article.route('/article/')\ndef article_by_slug(slug):\n return slug\napp.register_blueprint(article)\n\ndemo_css = _read_file('demo.css')\ndemo_js = _read_file('demo.js')\n\nexample_article = {\n \"created_at\": \"2016-01-12 22:24:16+0000\",\n \"display_date\": \"2013-12-18 15:29:52+0000\",\n \"gallery\": {\n 'images': [{\n \"created_at\": \"2016-01-12 22:24:16+0000\",\n \"crops\": [],\n \"description\": \"\",\n \"meta\": [],\n \"original\": {\n \"aspect_ratio\": \"1:1\",\n \"content_type\": \"image/jpeg\",\n \"crop_type\": \"original\",\n \"filesize_bytes\": 255565,\n \"height\": 1920,\n \"orientation\": \"square\",\n \"public_url\": \"http://gq-images.s3.amazonaws.com/73/91/7f0a839d.jpg\",\n \"uid\": \"0JrRNXyoXdA\",\n \"width\": 1920\n },\n \"tags\": [],\n \"title\": \"\",\n \"uid\": \"yWOL1vbrol3\"\n },{\n \"created_at\": \"2016-01-12 22:24:16+0000\",\n \"crops\": [],\n \"description\": \"\",\n \"meta\": [],\n \"original\": {\n \"aspect_ratio\": \"1:1\",\n \"content_type\": \"image/jpeg\",\n \"crop_type\": \"original\",\n \"filesize_bytes\": 255565,\n \"height\": 1920,\n \"orientation\": \"square\",\n \"public_url\": \"http://gq-images.s3.amazonaws.com/73/91/7f0a839d.jpg\",\n \"uid\": \"0JrRNXyoXdA\",\n \"width\": 1920\n },\n \"tags\": [],\n \"title\": \"\",\n \"uid\": \"yWOL1vbrol3\"\n },{\n \"created_at\": \"2016-01-12 22:24:16+0000\",\n \"crops\": [],\n \"description\": \"\",\n \"meta\": [],\n \"original\": {\n \"aspect_ratio\": \"1:1\",\n \"content_type\": \"image/jpeg\",\n \"crop_type\": \"original\",\n \"filesize_bytes\": 255565,\n \"height\": 1920,\n \"orientation\": \"square\",\n \"public_url\": \"http://gq-images.s3.amazonaws.com/73/91/7f0a839d.jpg\",\n \"uid\": \"0JrRNXyoXdA\",\n \"width\": 1920\n },\n \"tags\": [],\n \"title\": \"\",\n \"uid\": \"yWOL1vbrol3\"\n }]\n },\n \"genre\": {\n \"ad_path\": \"/lifestyle\",\n \"ad_tag\": \"\",\n \"ad_zone\": \"\",\n \"body\": \"\",\n \"created_at\": \"2016-01-12 19:24:43+0000\",\n \"images\": {},\n \"is_sponsored\": False,\n \"meta\": [],\n \"meta_description\": \"\",\n \"meta_title\": \"\",\n \"priority\": 99999,\n \"slug\": \"lifestyle\",\n \"title\": \"Lifestyle\",\n \"type\": \"topic\",\n \"uid\": \"88jD1d2Wwex\",\n \"updated_at\": \"2016-01-14 14:08:55+0000\"\n },\n \"images\": {\n \"default\": {\n \"created_at\": \"2016-01-12 22:24:16+0000\",\n \"crops\": [],\n \"description\": \"\",\n \"meta\": [\n {\n \"key\": \"source\",\n \"value\": \"URL\"\n },\n {\n \"key\": \"source_url\",\n \"value\": \"http://cdni.condenast.co.uk/1920x1920/w_z/Watch-GQ_11Dec13_pr_bt.jpg\"\n }\n ],\n \"original\": {\n \"aspect_ratio\": \"1:1\",\n \"content_type\": \"image/jpeg\",\n \"crop_type\": \"original\",\n \"filesize_bytes\": 255565,\n \"height\": 1920,\n \"orientation\": \"square\",\n \"public_url\": \"http://gq-images.s3.amazonaws.com/73/91/7f0a839d.jpg\",\n \"uid\": \"0JrRNXyoXdA\",\n \"width\": 1920\n },\n \"tags\": [],\n \"title\": \"\",\n \"uid\": \"yWOL1vbrol3\"\n }\n },\n \"is_active\": True,\n \"is_legacy\": True,\n \"is_premium\": False,\n \"is_sponsored\": True,\n \"magazine\": {},\n \"meta\": [],\n \"profiles\": [\n {\n \"bio\": \"\",\n \"created_at\": \"2016-01-12 21:00:00+0000\",\n \"full_name\": \"Ken Kessler\",\n \"images\": {},\n \"job_title\": \"\",\n \"slug\": \"ken-kessler\",\n \"social_media\": [],\n \"teaser\": \"\",\n \"uid\": \"prL8BaP8zdB\",\n \"updated_at\": \"2016-01-12 21:00:00+0000\"\n }\n ],\n \"published_at\": \"2013-12-18 15:29:52+0000\",\n \"series\": {\n \"ad_path\": \"\",\n \"ad_tag\": \"\",\n \"ad_zone\": \"\",\n \"body\": \"\",\n \"created_at\": \"2016-02-19 11:17:50+0000\",\n \"images\": {},\n \"is_sponsored\": False,\n \"meta\": [],\n \"meta_description\": \"\",\n \"meta_title\": \"\",\n \"priority\": 99999,\n \"slug\": \"original-shorts\",\n \"title\": \"Original Shorts\",\n \"type\": \"series\",\n \"uid\": \"7qKO2Y12J2B\",\n \"updated_at\": \"2016-02-19 11:17:50+0000\"\n },\n \"show_date\": True,\n \"show_profiles\": True,\n \"slug\": \"rolex-air-king-watch-for-men-review\",\n \"status\": \"published\",\n \"tags\": [{\n \"ad_path\": \"\",\n \"ad_tag\": \"\",\n \"ad_zone\": \"\",\n \"article_priority\": 0,\n \"body\": \"\",\n \"created_at\": \"2016-03-23 14:20:07+0000\",\n \"images\": {\n \"logo\": {\n \"created_at\": \"2016-01-21 11:09:38+0000\",\n \"crops\": [],\n \"description\": \"\",\n \"meta\": [],\n \"original\": {\n \"aspect_ratio\": \"1:0.66\",\n \"content_type\": \"image/jpeg\",\n \"crop_type\": \"original\",\n \"filesize_bytes\": 289525,\n \"height\": 1280,\n \"orientation\": \"landscape\",\n \"public_url\": \"http://gq-images.s3.amazonaws.com/77/54/17d5add7.jpg\",\n \"uid\": \"w8k6vN6a0aP\",\n \"width\": 1920\n },\n \"tags\": [],\n \"title\": \"\",\n \"uid\": \"JXmg7rgvk2x\"\n }\n },\n \"is_sponsored\": False,\n \"meta\": [],\n \"meta_description\": \"\",\n \"meta_title\": \"\",\n \"priority\": 99999,\n \"slug\": \"converse\",\n \"title\": \"Converse\",\n \"type\": \"brand\",\n \"uid\": \"Ylvp7MBB2lj\",\n \"updated_at\": \"2016-03-23 16:45:17+0000\"\n },{\n \"ad_path\": \"/watches\",\n \"ad_tag\": \"\",\n \"ad_zone\": \"\",\n \"article_priority\": 0,\n \"body\": \"\",\n \"created_at\": \"2016-01-12 19:29:17+0000\",\n \"images\": {},\n \"is_sponsored\": False,\n \"meta\": [],\n \"meta_description\": \"\",\n \"meta_title\": \"\",\n \"priority\": 99999,\n \"slug\": \"watches\",\n \"title\": \"Watches\",\n \"type\": \"topic\",\n \"uid\": \"eZ36l3NEV55\",\n \"updated_at\": \"2016-01-14 14:07:10+0000\"\n }],\n \"teaser_long\": \"The only watch a man needs to own? Ken Kessler makes his case for the Rolex Air-King...\",\n \"teaser_short\": \"The only watch a man needs to own? Ken Kessler makes his case for the Rolex Air-King: Rolex Oyster Air-King watch for men in pictures\",\n \"title\": \"A king indeed. A king indeed. A king indeed. Wat\",\n \"type\": \"article\",\n \"uid\": \"WZ1vNEPVbOW\",\n \"updated_at\": \"2016-01-17 12:21:16+0000\"\n}\n\n\n@app.route('/')\ndef index():\n ctx = {\n 'article': example_article,\n 'css': demo_css,\n 'js': demo_js\n }\n return render_template('/demo/demo.html', **ctx)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"demo/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":7085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"646642300","text":"import time\n\n\nfrom Data.parameters import Data\nfrom reuse_func import GetData\n\n\nclass check_markers_on_map():\n def __init__(self,driver):\n self.driver = driver\n def test_map(self):\n self.p = GetData()\n self.driver.find_element_by_xpath(Data.hyper_link).click()\n self.p.page_loading(self.driver)\n dots = self.driver.find_elements_by_class_name(Data.dots)\n self.p.page_loading(self.driver)\n count = len(dots)-1\n return count\n","sub_path":"tests/src/UDISE/check_with_map_on_schoolinfra.py","file_name":"check_with_map_on_schoolinfra.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"312198420","text":"\nimport sys,os\nimport numpy as np\nimport scipy.io as sciMat\n\nVARs = ['ADens','BDens','uMinor','uPlus','M','Tau']\n\ndoub = lambda x:np.tile(x,[2,2,2]) if len(x.shape) is 3 else \\\n\t\tnp.tile(x,[1,2,2,2])\n\nsrcName = sys.argv[1]\npmat = sciMat.loadmat(srcName)\nfor v in VARs:\n\tpmat[v] = doub(pmat[v])\n\nh,t = os.path.split(srcName)\nsciMat.savemat(os.path.join(h,'doubled_'+t),pmat,oned_as='column')\n\t\n","sub_path":"rigid/tile.py","file_name":"tile.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"149794193","text":"import os\r\n\r\ndef generatorToArray(generator):\r\n\ttemp = []\r\n\tfor i in generator:\r\n\t\ttemp.append(i)\r\n\treturn temp\r\n\r\noriginal = \"breed\"\r\ndesired = \"state\"\r\n\r\nlistFiles = generatorToArray(os.walk(\".\"))\r\n\r\n#print(\"DEBUG Lista total\" + str(listFiles))\r\n\r\n#Priner Nivel\r\nfor i in listFiles[0][2]:\r\n\tif i != \"changeFilenames.py\":\r\n\t\tprint(i)\r\n\t\tos.rename(i,i.replace(original,desired))\r\n\t\t\"\"\"\r\n\t\t\r\n\t\tos.rename(i,i.replace(original.title(),desired.title()))\r\n\"\"\"\r\n\r\n\r\n\"\"\"\r\nfor i in listFiles[0][1]:\r\n\tfiles = generatorToArray(os.walk(\"./\" + i))\r\n\tfor j in files:\r\n\t\tos.rename(\"./\" + i + \"/\" + j[2][0], \"./\" + i + \"/\" + j[2][0].replace(original,desired))\r\n\t\tos.rename(\"./\" + i + \"/\" + j[2][0], \"./\" + i + \"/\" + j[2][0].replace(original.title(),desired.title()))\r\n\"\"\"","sub_path":"src/changeFilenames.py","file_name":"changeFilenames.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"144298051","text":"from sla_calculator import SLA_Calculator\n\n\ndef test_sla_time():\n sla_calc = SLA_Calculator()\n\n sla_time = sla_calc.calculate(start_time=\"2019-12-10T01:02:03Z\",\n open_hour=9,\n close_hour=17,\n country_name=\"US\",\n sla_in_hours=4)\n assert sla_time.to_iso8601_string() == '2019-12-10T13:00:00-08:00'\n","sub_path":"test_sla_time.py","file_name":"test_sla_time.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"400116709","text":"import wx\n\nclass bucky(wx.Frame):\n\tdef __init__(self, parent, id):\n\t\twx.Frame.__init__(self, parent, id, 'Frame aka window', size = (300,200))\n\t\tpanel = wx.Panel(self)\n\t\t\n\t\tmylist = ['beef', 'tuna', 'coconuts', 'more beef', 'cereal']\n\t\tcont = wx.ListBox(panel, -1, (20,20), (100,80), mylist, wx.LB_SINGLE)\n\t\tcont.SetSelection(3)\n\nif __name__ == \"__main__\":\n\tapp = wx.PySimpleApp()\n\tframe=bucky(parent=None, id=-1)\n\tframe.Show()\n\tapp.MainLoop()\n\n","sub_path":"wx13.py","file_name":"wx13.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"325590268","text":"'''\nThe S&P 500 index is an American stock market based on the market capitalization of 500 large companies\nthat have common stock listed on the NYSE or NASDAQ. 4,000 values, collected in a minute, for 500 stocks\nwill be used to train a model that predicts future values of the S&P 500 index based on linear regression.\n'''\n\nimport pandas as pd\nimport numpy as np\nimport statsmodels.api as sm\n\ndf_training_data = pd.read_csv('training.csv')\n\n'''\nHere, the stock values are being converted from type \"np.ndarray\" to type \"list\" to type \"tuple\". This is so \nthe list is usable by the \"sm.OLS\" command, which will calculate the best coefficients of the linear regression\nmodel to minimize the error. \n'''\narray_list = []\nfor elem in list(df_training_data.columns.values)[0:-1]:\n x = df_training_data[elem].values\n array_list.append(x)\n\narray_list.append(pd.array([1.0]*4000))\ntuple(array_list)\nX_value = np.column_stack(tuple(array_list))\n#X_value = sm.add_constant(X)\nY_value = df_training_data['S&P500'].values\n\n'''\nThe linear regression model is: the square of the model index from the true index. The true index value is the Y_value \nand the model index is calculated by the a combination of the stock values and their respective coefficients, as mentioned\nbefore.\n'''\n\nmodel = sm.OLS(Y_value, X_value)\nresults = model.fit()\n\nY_value.tolist()\nparams_list = results.params.tolist()\n\nerror_list = []\nmodel_value_list = []\nfor Y_True, ii in zip(Y_value, range(4000)):\n stock_price_row = df_training_data.loc[ii].tolist()[0:-1]\n relative_error = abs( ( Y_True - (np.dot(stock_price_row, params_list[0:-1]) + params_list[-1]) )/Y_True )*100 #(Y_True-(a_1*S_1+...+a_500*s_500+a_501)/Y_True\n error_list.append(relative_error)\n model_value_list.append(np.dot(stock_price_row, params_list[0:-1]) + params_list[-1])\nprint(error_list)\nprint(Y_value)\nprint(model_value_list)\n\nfor Y, M in zip(Y_value, model_value_list):\n print(Y,M)\n\n\n# Create OLS_4.py and train the model only with first 3000 data in training.csv.\n# Then test the model performance as above with the rest 1000 data. This test is called out-of sample test.","sub_path":"Financial_Modeling/Stock_Prediction.py","file_name":"Stock_Prediction.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"312350603","text":"#!/usr/bin/env python\n#-----------------------------------------------------------------------------\n# qwiic_button_ex7.py\n#\n# Simple Example for the Qwiic Button. Daisy chain together two Qwiic Buttons \n# with different I2C addresses. This example works for a Qwiic Button with the \n# default address of 0x6F and one with an address changed to 0x5B. To change \n# the address of a Qwiic Button, please visit example 6.\n#------------------------------------------------------------------------\n#\n# Written by Priynka Makin @ SparkFun Electronics, January 2021\n# \n# This python library supports the SparkFun Electroncis qwiic \n# qwiic sensor/board ecosystem on a Raspberry Pi (and compatable) single\n# board computers. \n#\n# More information on qwiic is at https://www.sparkfun.com/qwiic\n#\n# Do you like this library? Help support SparkFun. Buy a board!\n#\n#==================================================================================\n# Copyright (c) 2019 SparkFun Electronics\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy \n# of this software and associated documentation files (the \"Software\"), to deal \n# in the Software without restriction, including without limitation the rights \n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell \n# copies of the Software, and to permit persons to whom the Software is \n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all \n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR \n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, \n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE \n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER \n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE \n# SOFTWARE.\n#==================================================================================\n# Example 7\n\nfrom __future__ import print_function\nimport qwiic_i2c\nimport qwiic_button\nimport time\nimport sys\n\ndef run_example():\n\n print(\"\\nSparkFun Qwiic Button Example 7\")\n my_button1 = qwiic_button.QwiicButton()\n my_button2 = qwiic_button.QwiicButton(0x5B)\n\n if my_button1.begin() == False:\n print(\"\\nThe Qwiic Button 1 isn't connected to the system. Please check your connection\", \\\n file=sys.stderr)\n return\n if my_button2.begin() == False:\n print(\"\\nThe Qwiic Button 2 isn't connected to the system. Please check your connection\", \\\n file=sys.stderr)\n return\n \n print(\"\\nButton's ready!\")\n\n while 1:\n\n # Check if button 1 is pressed\n if my_button1.is_button_pressed() == True:\n print(\"\\nButton 1 is pressed!\")\n \n # Check if button2 is pressed\n if my_button2.is_button_pressed() == True:\n print(\"\\nButton 2 is pressed!\")\n \n time.sleep(0.02) # Don't hammer too hard on the I2C bus\n\nif __name__ == '__main__':\n try:\n run_example()\n except (KeyboardInterrupt, SystemExit) as exErr:\n print(\"\\nEnding Example 7\")\n sys.exit(0)\n","sub_path":"examples/qwiic_button_ex7_2Buttons.py","file_name":"qwiic_button_ex7_2Buttons.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"130961497","text":"import pickle\n\nimport numpy as np\nfrom typing import Dict, Tuple\nimport pandas as pd\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import OneHotEncoder, MinMaxScaler\nfrom sklearn.metrics import accuracy_score, f1_score, roc_curve, auc\n\nfrom sklearn.model_selection import train_test_split\n\n\nDATASET_SIZE = 1000\ncol_dict = {\n 'age': (29, 77),\n 'sex': (0, 1),\n 'cp': (0, 3),\n 'trestbps': (94, 200),\n 'chol': (126, 564),\n 'fbs': (0, 1),\n 'restecg': (0, 2),\n 'thalach': (71, 202),\n 'exang': (0, 1),\n 'oldpeak': (0.0, 6.5),\n 'slope': (0, 2),\n 'ca': (0, 4),\n 'thal': (0, 3),\n 'target': (0, 1)\n }\n\nCAT_FEATURES = ['sex', 'cp', 'fbs', 'restecg', 'exang', 'slope', 'thal']\nNUM_FEATURES = ['age', 'chol', 'ca', 'trestbps', 'thalach', 'oldpeak']\n\n\ndef split_train_val(data: pd.DataFrame, val_size: float=0.2, random_state: int=42) -> Tuple[pd.DataFrame, pd.DataFrame]:\n train_data, val_data = train_test_split(\n data, test_size=val_size, random_state=random_state\n )\n return train_data, val_data\n\n\ndef build_categorical_pipeline() -> Pipeline:\n categorical_pipeline = Pipeline(\n [\n ('impute', SimpleImputer(missing_values=np.nan, strategy='most_frequent')),\n ('ohe', OneHotEncoder()),\n ]\n )\n return categorical_pipeline\n\ndef build_numerical_pipeline() -> Pipeline:\n num_pipeline = Pipeline(\n [\n ('impute', SimpleImputer(missing_values=np.nan, strategy='mean')),\n ('scale', MinMaxScaler())\n ]\n )\n return num_pipeline\n\ndef build_transformer(categorical_features: list, numerical_features: list) -> ColumnTransformer:\n transformer = ColumnTransformer(\n [\n (\n 'categorical_pipeline',\n build_categorical_pipeline(),\n categorical_features,\n ),\n (\n 'numerical_pipeline',\n build_numerical_pipeline(),\n numerical_features,\n ),\n ]\n )\n return transformer\n\n\ndef evaluate_model(model, features, target) -> Dict[str, float]:\n predictions = model.predict(features)\n pred_probas = model.predict_proba(features)[:, 1]\n fpr, tpr, thresholds = roc_curve(target, pred_probas)\n return {\n 'roc_auc': auc(fpr, tpr),\n 'accuracy': accuracy_score(target, predictions),\n 'f1_score': f1_score(target, predictions)\n }\n\n\ndef dump_pickle(file_path, model):\n with open(file_path, 'wb') as f:\n pickle.dump(model, f)\n\ndef load_pickle(file_path):\n with open(file_path, 'rb') as fin:\n transformer = pickle.load(fin)\n return transformer","sub_path":"airflow_ml_dags/dags/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"487078118","text":"#imports\nfrom collections import OrderedDict\nfrom scipy.spatial import distance as dist\nimport numpy as np\n\nclass CentroidTracker:\n \n def __init__ (self):\n # list that will hold the centroids of the active objects\n self.objects = OrderedDict()\n # list that will hold how many frames the active objects have been off screen for\n self.disappeared = OrderedDict()\n # initially set the next object to be the 0th\n self.nextObjectID = 0\n # max number of frames an object can be not on screen before its index is deleted\n self.maxDisappeared = 50\n \n def register(self, centroid):\n self.objects[self.nextObjectID] = centroid\n # object has been off screen for zero frames\n self.disappeared[self.nextObjectID] = 0\n # get ready to set next unique object\n self.nextObjectID += 1\n \n def deregister(self, objectID):\n del self.objects[objectID]\n del self.disappeared[objectID]\n \n def update(self, rects):\n \n # if no bounding boxes in frame, add 1 frame to self.disappeared for each object being tracked\n if len(rects) == 0:\n for objectID in list(self.disappeared.keys()):\n self.disappeared[objectID] += 1\n \n if self.disappeared[objectID] > self.maxDisappeared:\n self.deregister(objectID)\n \n return self.objects\n \n # create an array of zeros to be filled with bounding box centroids\n inputCentroids = np.zeros((len(rects), 2), dtype = \"int\")\n \n # loop through the bounding box rects\n for(i, (ymin, xmin, ymax, xmax)) in enumerate(rects):\n # find the center of each bounding box\n cX = int((xmin + xmax) / 2.0)\n cY = int((ymin + ymax) / 2.0)\n # put the center in the inputCentroids[] list\n inputCentroids[i] = (cX, cY)\n \n # if there are currently no objects being tracked \n if len(self.objects) == 0:\n for i in range(0, len(inputCentroids)):\n self.register(inputCentroids[i])\n \n # if there are objects being tracked\n else:\n # split self.objects into the IDs and the values\n objectIDs = list(self.objects.keys())\n objectCentroids = list(self.objects.values())\n\n \n # calculate the distance between the new centroids and the previous centroids \n D = dist.cdist(np.array(objectCentroids), inputCentroids)\n \n # orderes the matix D and gives the indexes the smallest distance between each old centroid to a new centroid\n # rows would give the current centroid indes and cols would give the centroid index of the nearest centroid\n rows = D.min(axis=1).argsort()\n cols = D.argmin(axis=1)[rows]\n \n # variables to keep track of which rows and columns already examines\n usedRows = set()\n usedCols = set()\n \n # loop through all combinations of rows and columns\n for(row, col) in zip(rows, cols):\n \n # if row or column has been examined before we want to ignore it\n if row in usedRows or col in usedCols:\n continue\n \n objectID = objectIDs[row]\n self.objects[objectID] = inputCentroids[col]\n self.disappeared[objectID] = 0\n \n # add the examined row and col\n usedRows.add(row)\n usedCols.add(col)\n \n # keep track of unused rows and columns for the next if statement\n unusedRows = set(range(0, D.shape[0])).difference(usedRows)\n unusedCols = set(range(0, D.shape[1])).difference(usedCols)\n \n # handle situations when the number of new centroids does not equal the number of old centroids\n # objects have potentially disappeared\n if D.shape[0] >= D.shape[1]:\n for row in unusedRows:\n # index disappeared count for objectID that is missing\n objectID = objectIDs[row]\n self.disappeared[objectID] += 1\n \n # check to see if consecutive frames that the object has been off screen is greater than threshold and delete if it has been\n if self.disappeared[objectID] > self.maxDisappeared:\n self.deregister(objectID)\n \n # objects have appeared\n else:\n for col in unusedCols:\n self.register(inputCentroids[col])\n\n return self.objects\n ","sub_path":"object_detection_tpu_tracking_dlib_test/centroid_tracker.py","file_name":"centroid_tracker.py","file_ext":"py","file_size_in_byte":4837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"381733487","text":"# CURRENT\n# Riva Tropp\n# 7/29/2015\n# Scales up exits to match entries.\n###############################################################################\n#Scale up\n###############################################################################\nf1 = open(\"f_noon.csv\") #Be sure to have the correct file.\ntime = f1.readlines()\nsum_ent = 0.0\nsum_x = 0.0\nentries = []\nexits = []\nratio = 0.0\ntimesq = 0\ntime.pop(0) #Header\n\n#Grab original sum and ratio:\nfor thing in time:\n fields = thing.split(\",\")\n e = float(fields[6].strip('\"'))\n x = float(fields[7].strip('\\n').strip('\"')) \n sum_ent += e\n sum_x += x\n entries.append(e)\n exits.append(x)\n if \"127\" in fields[3]: #Grab the index of times square to adjust there later\n timesq = entries.index(e)\nf1.close()\n\n#diff = sum_ent - sum_x\nratio = sum_ent/sum_x\n\n#Scale up each exit by the ratio.\nfor x in xrange(0, len(exits)):\n exits[x] = exits[x] * ratio\n\nsum_ent = 0\nsum_x = 0\n\n#Round all the entries and exits, find the differences. \nfor e in xrange(0, len(entries)):\n entries[e] = int(entries[e] + .5)\n exits[e] = int(exits[e] + .5)\n sum_ent += entries[e]\n sum_x += exits[e]\n\ndiff = sum_ent - sum_x\nsum_ent = 0\nsum_x = 0\n\n#Subtract the difference from entries.\nentries[timesq] -= diff\n\n############################################################################\n#To Check:\n###########################################################################\n#for e in xrange(0, len(entries)):\n# entries[e] = int(entries[e])\n# exits[e] = int(exits[e])\n# sum_ent += entries[e]\n# sum_x += exits[e]\n\n#diff = sum_ent - sum_x\n#print sum_ent, sum_x, diff\n","sub_path":"PrePres/scaleup.py","file_name":"scaleup.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"370347087","text":"# Un tronco de cono, es una figura geométrica resultante de cortar la parte superior de un cono normal\n# El volumen de un cono se halla fácilmente multiplicando el área de la base (que para el cono es un círculo, y el área se calcula como A = πr ) por la altura. Su misión ahora es escribir un programa en python que halle el 2 volumen del tronco de cono, a partir de los datos necesarios.\nimport math\n\nr = float(input('Ingrese el radio lado del círculo superior: '))\nR = float(input('Ingrese el radio lado del círculo inferior: '))\nh = float(input('Ingrese la altura del tronco: '))\n\n\ndef cono(x, y, z):\n\n volumen = ((math.pi * z)/3) * (y ** 2 + x ** 2 + y * x)\n\n return 'El volumen del cono es de %s cm3.' % \"{0:,.2f}\".format(volumen)\n\n\nprint(cono(r, R, h))\n# Ingrese el radio lado del círculo superior: 5\n# Ingrese el radio lado del círculo inferior: 13\n# Ingrese la altura del tronco: 15\n# El volumen del cono es de 4,068.36 cm3.\n","sub_path":"guia-1/taller-serie-ejercicios-programación/ejercicio-3.py","file_name":"ejercicio-3.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"203730852","text":"import os\r\n\r\ntry:\r\n import sublime\r\n import sublime_plugin\r\n from sublime import status_message, error_message\r\nexcept ImportError: # running tests\r\n import sys\r\n\r\n from tests.sublime_fake import sublime\r\n from tests.sublime_fake import sublime_plugin\r\n\r\n sys.modules['sublime'] = sublime\r\n sys.modules['sublime_plugin'] = sublime_plugin\r\n\r\nif sublime.version().startswith('2'):\r\n import ctags\r\n from ctags import (FILENAME, parse_tag_lines, PATH_ORDER, SYMBOL,\r\n TagElements, TagFile)\r\n from helpers.edit import Edit\r\nelse: # safe to assume if not ST2 then ST3\r\n from CTags import ctags\r\n from CTags.ctags import (FILENAME, parse_tag_lines, PATH_ORDER, SYMBOL,\r\n TagElements, TagFile)\r\n from CTags.helpers.edit import Edit\r\n\r\n\r\ndef get_settings():\r\n \"\"\"Load settings.\r\n\r\n :returns: dictionary containing settings\r\n \"\"\"\r\n return sublime.load_settings(\"CTags.sublime-settings\")\r\n\r\n\r\ndef get_setting(key, default=None):\r\n \"\"\"Load individual setting.\r\n\r\n :param key: setting key to get value for\r\n :param default: default value to return if no value found\r\n\r\n :returns: value for ``key`` if ``key`` exists, else ``default``\r\n \"\"\"\r\n return get_settings().get(key, default)\r\n\r\nsetting = get_setting\r\n\r\n\r\nclass ctags_access_merge(sublime_plugin.TextCommand):\r\n def run(self, edit, **args):\r\n # todo: use mmap\r\n tags = []\r\n\r\n view = self.view\r\n tag_files = collect_project_tag_files(view)\r\n print(\"Found\", len(tag_files), \"tag files\")\r\n for path in tag_files:\r\n folder = os.sep.join(path.split(os.sep)[:-1])\r\n # column not important; merge creates a new file that is sorted\r\n with TagFile(path, SYMBOL) as f:\r\n for tag in f.search():\r\n # remove headers\r\n if not tag[SYMBOL].startswith(\"!\"):\r\n tag[FILENAME] = self.make_absolute(tag[FILENAME], folder)\r\n tags.append(tag)\r\n\r\n outfile = os.path.join(view.window().folders()[0], \".tagsmaster\")\r\n self.create_ctags_file(tags, outfile, SYMBOL)\r\n self.create_ctags_file(tags, outfile + \"_sorted_by_file\", FILENAME, SYMBOL)\r\n\r\n def create_ctags_file(self, tags, path, sort, sort2 = None):\r\n print(\"Creating new tags file\", path, \"containing\", len(tags), \"tags\")\r\n target_folder = os.sep.join(path.split(os.sep)[:-1])\r\n # update entry paths to be relative to the new tag file\r\n for tag in tags:\r\n tag[FILENAME] = self.make_relative_when_better(tag[FILENAME], target_folder)\r\n\r\n def get_sort_key(tag):\r\n if sort2 == None:\r\n return tag[sort]\r\n else:\r\n return (tag[sort], tag[sort2])\r\n\r\n tags = sorted(tags, key=get_sort_key)\r\n with open(path, \"w+\") as f:\r\n # write header\r\n f.write(\"!_TAG_FILE_FORMAT 2\\n\")\r\n f.write(\"!_TAG_FILE_SORTED {}\\n\".format(sort+1))\r\n # write entries\r\n f.writelines([tag.line + \"\\n\" for tag in tags])\r\n\r\n def make_absolute(self, path, root):\r\n return os.path.abspath(os.path.join(root, path))\r\n\r\n def make_relative_when_better(self, path, topath):\r\n if os.path.splitdrive(path)[0] == os.path.splitdrive(topath)[0]:\r\n relpath = os.path.relpath(path, topath)\r\n if len(relpath) < len(path):\r\n return relpath\r\n return path\r\n \r\ndef collect_project_tag_files(view):\r\n tag_files = []\r\n\r\n for folder in view.window().folders():\r\n for dirName, subdirList, fileList in os.walk(folder):\r\n search_path = os.path.join(folder, dirName)\r\n tag_files = tag_files + collect_tag_files_in_folder(search_path)\r\n\r\n # read all tag files in project\r\n for folder in view.window().folders():\r\n tag_files = tag_files + collect_tag_files_in_folder(folder)\r\n\r\n # read and add additional tag file paths from 'extra_tag_paths' setting\r\n try:\r\n for (selector, platform), path in setting('extra_tag_paths'):\r\n if view.match_selector(view.sel()[0].begin(), selector):\r\n if sublime.platform() == platform:\r\n tag_files = tag_files + collect_tag_files_in_folder(path)\r\n except Exception as e:\r\n print(e)\r\n\r\n return list(set(tag_files))\r\n\r\n\r\ndef collect_tag_files_in_folder(folder):\r\n search_paths = []\r\n search_paths.append(\r\n os.path.normpath(\r\n os.path.join(folder, setting('tag_file'))))\r\n for extrafile in setting('extra_tag_files'):\r\n search_paths.append(\r\n os.path.normpath(\r\n os.path.join(folder, extrafile)))\r\n return check_search_paths(search_paths)\r\n\r\n\r\ndef check_search_paths(paths):\r\n ret = []\r\n for p in paths:\r\n if p and (p not in ret) and os.path.exists(p) and os.path.isfile(p):\r\n ret.append(p)\r\n return ret\r\n\r\n","sub_path":"ctagsaccess.py","file_name":"ctagsaccess.py","file_ext":"py","file_size_in_byte":5023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"79714355","text":"'''\nCreated on Apr 30, 2014\n\n@author: alex\n'''\nimport random\nimport time\n\nimport networkx as nx\n\nfrom des.delays import delays_init\nfrom des.entities.network_entity import NetworkEntity\nfrom des.event.event import Event\nfrom des.entities.link import Link\nfrom des.event_q import EventQ\nfrom des.exception import TimeLoopException, RepeatedEntityException\nfrom des.report.report import Report\nfrom des.settings_handler import SettingsHandler\nfrom des.time import SimTime\nfrom des.util import Ordered_queue, Singleton\n\nimport des.report.r_print as r_print\n\n_log_header = \"\"\"\n################################################################################\nreplica number: {}\n################################################################################\n\"\"\"\n\nclass Sim(Singleton):\n '''\n Base class for the simulation\n '''\n def __init__(self):\n self._executed_events = 0\n self._max_events = 5000\n self._replicas = 0\n\n self._sim_time = SimTime()\n Link.set_time_obj(self._sim_time)\n\n self._event_q = EventQ(self._sim_time)\n Event.set_event_q(self._event_q)\n\n self._graph = nx.DiGraph()\n NetworkEntity.set_graph(self._graph)\n\n self._report = Report(self._sim_time)\n #self._report.init()\n\n self._sett = SettingsHandler()\n\n self._random = None\n\n self._pre_hooks = list()\n self._pos_hooks = list()\n\n def init(self, settings_module=None, seed=0):\n \"\"\"\n method to initialise the simulation only to be called by des.__init__.py\n\n @param settings_module: when null, loads settings from settings.py\n But when testing is better to pass it directly\n\n @param seed: the integer used to initialize the random module\n \"\"\"\n self._random = random.Random(seed)\n\n self._sett.load_settings_file(settings_module)\n\n Report.load_settings(self._sett)\n self._report.init()\n\n self._sett.load_graph(self.get_graph())\n\n self._replicas = self._sett.load_sim_sett()\n self._max_events = self._sett.get_value_by_name('MAX_EVENTS', int, 5000)\n\n delays_init(self._sett)\n\n\n def run(self):\n r_print.rprint(_log_header.format(0))\n self._run()\n\n replica = 1\n while replica < self._replicas:\n self.restart()\n\n r_print.rprint(log_header.format(0))\n self._run()\n\n replica += 1\n\n print('Performed {} replicas'.format(replica))\n\n def _run(self):\n for hook in self._pre_hooks:\n hook()\n\n while not self.is_finished():\n self._next_event()._execute()\n\n self._executed_events += 1\n\n for hook in self._pos_hooks:\n hook()\n\n def _next_event(self):\n \"\"\"\n @rtype: Event\n \"\"\"\n next_event = self._event_q.next_event()\n if (next_event is not None):\n self._sim_time.update_time(next_event.get_execute_time())\n return next_event\n else:\n return None\n\n\n def is_finished(self):\n #TODO: meter isto com uma condição como deve de ser\n\n ret = self._executed_events >= self._max_events\n ret = ret or len(self._event_q) == 0\n return ret\n\n\n def add_network_entity(self, entity):\n \"\"\"\n @type entity: des.entities.network_entity\n \"\"\"\n\n if not self.get_graph().has_node(entity):\n self.get_graph().add_node(entity)\n else:\n raise RepeatedEntityException(entity.__class__.__name__, str(entity))\n\n\n def print_graph(self):\n print(\"------- Sim graph info -------\")\n\n g = self.get_graph()\n for graph_node in g.nodes():\n graph_node.print_info()\n\n print(\"------------------------------\")\n\n\n def clean(self):\n '''\n Resets the the simulation to the state when it was first instantized\n (Graph is kept)\n '''\n self._sim_time = SimTime()\n Link.set_time_obj(self._sim_time)\n\n self._executed_events = 0\n self._event_q = EventQ(self._sim_time)\n Event.set_event_q(self._event_q)\n\n self._graph = nx.DiGraph()\n NetworkEntity.set_graph(self._graph)\n\n self._report = Report(self._sim_time)\n self._report.init()\n\n def restart(self):\n '''\n Reinitializes the sim for a new replica\n '''\n self.clean()\n\n self._sett.load_graph(self.get_graph())\n\n\n################################################################################\n#### Gets/Sets/Adds\n################################################################################\n\n def add_pre_hook(self, hook):\n if not callable(hook):\n raise Exception('Tried to add a pre hook that is not Callable')\n self._pre_hooks.append(hook)\n\n def add_pos_hook(self, hook):\n if not callable(hook):\n raise Exception('Tried to add a pos hook that is not Callable')\n self._pos_hooks.append(hook)\n\n def get_amount_of_scheduled_events(self):\n \"\"\"\n Returns the number of scheduled events.\n \"\"\"\n return len(self._event_q)\n\n def get_time(self):\n return self._sim_time.get_time()\n\n def get_random(self):\n '''\n @rtype: Random\n '''\n return self._random\n\n def get_graph(self):\n \"\"\"\n @rtype: DiGraph\n \"\"\"\n return self._graph\n","sub_path":"src/des/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":5420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"350930249","text":"# Copyright (c) 2022 Iluvatar CoreX. All rights reserved.\n# Copyright Declaration: This software, including all of its code and documentation,\n# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX\n# Semiconductor Co., Ltd. and its affiliates (\"Iluvatar CoreX\") in accordance with the PRC Copyright\n# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar\n# CoreX. No user of this software shall have any right, ownership or interest in this software and\n# any use of this software shall be in compliance with the terms and conditions of the End User\n# License Agreement.\n\"\"\" PyTorch selectable adaptive pooling\nAdaptive pooling with the ability to select the type of pooling from:\n * 'avg' - Average pooling\n * 'max' - Max pooling\n * 'avgmax' - Sum of average and max pooling re-scaled by 0.5\n * 'avgmaxc' - Concatenation of average and max pooling along feature dim, doubles feature dim\n\nBoth a functional and a nn.Module version of the pooling is provided.\n\nAuthor: Ross Wightman (rwightman)\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef pooling_factor(pool_type='avg'):\n return 2 if pool_type == 'avgmaxc' else 1\n\n\ndef adaptive_avgmax_pool2d(x, pool_type='avg', padding=0, count_include_pad=False):\n \"\"\"Selectable global pooling function with dynamic input kernel size\n \"\"\"\n if pool_type == 'avgmaxc':\n x = torch.cat([\n F.avg_pool2d(\n x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad),\n F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)\n ], dim=1)\n elif pool_type == 'avgmax':\n x_avg = F.avg_pool2d(\n x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)\n x_max = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)\n x = 0.5 * (x_avg + x_max)\n elif pool_type == 'max':\n x = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)\n else:\n if pool_type != 'avg':\n print('Invalid pool type %s specified. Defaulting to average pooling.' % pool_type)\n x = F.avg_pool2d(\n x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)\n return x\n\n\nclass AdaptiveAvgMaxPool2d(torch.nn.Module):\n \"\"\"Selectable global pooling layer with dynamic input kernel size\n \"\"\"\n def __init__(self, output_size=1, pool_type='avg'):\n super(AdaptiveAvgMaxPool2d, self).__init__()\n self.output_size = output_size\n self.pool_type = pool_type\n if pool_type == 'avgmaxc' or pool_type == 'avgmax':\n self.pool = nn.ModuleList([nn.AdaptiveAvgPool2d(output_size), nn.AdaptiveMaxPool2d(output_size)])\n elif pool_type == 'max':\n self.pool = nn.AdaptiveMaxPool2d(output_size)\n else:\n if pool_type != 'avg':\n print('Invalid pool type %s specified. Defaulting to average pooling.' % pool_type)\n self.pool = nn.AdaptiveAvgPool2d(output_size)\n\n def forward(self, x):\n if self.pool_type == 'avgmaxc':\n x = torch.cat([p(x) for p in self.pool], dim=1)\n elif self.pool_type == 'avgmax':\n x = 0.5 * torch.sum(torch.stack([p(x) for p in self.pool]), 0).squeeze(dim=0)\n else:\n x = self.pool(x)\n return x\n\n def factor(self):\n return pooling_factor(self.pool_type)\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' \\\n + 'output_size=' + str(self.output_size) \\\n + ', pool_type=' + self.pool_type + ')'\n","sub_path":"cv/classification/dpn107/pytorch/adaptive_avgmax_pool.py","file_name":"adaptive_avgmax_pool.py","file_ext":"py","file_size_in_byte":3738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"514442236","text":"def instantiateComponent(svcBLE_BASComponent):\n print('svcBLE_BASComponent')\n configName = Variables.get('__CONFIGURATION_NAME')\n processor = Variables.get(\"__PROCESSOR\")\n\n print('Config Name: {} processor: {}'.format(configName, processor))\n\n # Battery level with Notify\n basNotifyEnable = svcBLE_BASComponent.createBooleanSymbol('BAS_NOTIFY_ENABLE', None)\n basNotifyEnable.setLabel('Enble Notify Property')\n basNotifyEnable.setDefaultValue(False)\n basNotifyEnable.setVisible(True) \n \n # Add ble_bas.c file\n bleBasHeaderFile = svcBLE_BASComponent.createFileSymbol(None, None)\n bleBasHeaderFile.setSourcePath('driver/ble/templates/ble_bas.c.ftl')\n bleBasHeaderFile.setOutputName('ble_bas.c')\n bleBasHeaderFile.setOverwrite(True)\n bleBasHeaderFile.setDestPath('ble/service_ble/ble_bas')\n bleBasHeaderFile.setProjectPath('config/' + configName + '/ble/service_ble/ble_bas')\n bleBasHeaderFile.setType('SOURCE')\n bleBasHeaderFile.setEnabled(True)\n bleBasHeaderFile.setMarkup(True) \n\n # Add ble_bas.h file\n bleBasHeaderFile = svcBLE_BASComponent.createFileSymbol(None, None)\n bleBasHeaderFile.setSourcePath('driver/ble/templates/ble_bas.h.ftl')\n bleBasHeaderFile.setOutputName('ble_bas.h')\n bleBasHeaderFile.setOverwrite(True)\n bleBasHeaderFile.setDestPath('ble/service_ble/ble_bas')\n bleBasHeaderFile.setProjectPath('config/' + configName + '/ble/service_ble/ble_bas')\n bleBasHeaderFile.setType('HEADER')\n bleBasHeaderFile.setEnabled(True)\n bleBasHeaderFile.setMarkup(True) \n\n\ndef finalizeComponent(BLEStackComponent):\n Log.writeInfoMessage('Finalizing: {}'.format(BLEStackComponent.getID()))\n activeComponents = Database.getActiveComponentIDs()\n requiredComponents = ['libBLEStack']\n for r in requiredComponents:\n if r not in activeComponents:\n res = Database.activateComponents([r])\n","sub_path":"H3/wireless/driver/ble/config/bas.py","file_name":"bas.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"324382891","text":"# ----------\r\n# User Instructions:\r\n# \r\n# Implement the function optimum_policy2D below.\r\n#\r\n# You are given a car in grid with initial state\r\n# init. Your task is to compute and return the car's \r\n# optimal path to the position specified in goal; \r\n# the costs for each motion are as defined in cost.\r\n#\r\n# There are four motion directions: up, left, down, and right.\r\n# Increasing the index in this array corresponds to making a\r\n# a left turn, and decreasing the index corresponds to making a \r\n# right turn.\r\n\r\nforward = [[-1, 0], # go up\r\n [ 0, -1], # go left\r\n [ 1, 0], # go down\r\n [ 0, 1]] # go right\r\nforward_name = ['up', 'left', 'down', 'right']\r\n\r\n# action has 3 values: right turn, no turn, left turn\r\naction = [-1, 0, 1]\r\naction_name = ['R', '#', 'L']\r\n\r\n# EXAMPLE INPUTS:\r\n# grid format:\r\n# 0 = navigable space\r\n# 1 = unnavigable space \r\ngrid = [[1, 1, 1, 0, 0, 0],\r\n [1, 1, 1, 0, 1, 0],\r\n [0, 0, 0, 0, 0, 0],\r\n [1, 1, 1, 0, 1, 1],\r\n [1, 1, 1, 0, 1, 1]]\r\n\r\ninit = [4, 3, 0] # given in the form [row,col,direction]\r\n # direction = 0: up\r\n # 1: left\r\n # 2: down\r\n # 3: right\r\n \r\ngoal = [2, 0] # given in the form [row,col]\r\n\r\ncost = [2, 1, 20] # cost has 3 values, corresponding to making \r\n # a right turn, no turn, and a left turn\r\n\r\n# EXAMPLE OUTPUT:\r\n# calling optimum_policy2D with the given parameters should return \r\n# [[' ', ' ', ' ', 'R', '#', 'R'],\r\n# [' ', ' ', ' ', '#', ' ', '#'],\r\n# ['*', '#', '#', '#', '#', 'R'],\r\n# [' ', ' ', ' ', '#', ' ', ' '],\r\n# [' ', ' ', ' ', '#', ' ', ' ']]\r\n# ----------\r\n\r\n# ----------------------------------------\r\n# modify code below\r\n# ----------------------------------------\r\n\r\ndef optimum_policy2D(grid,init,goal,cost):\r\n unknown_value = 9999\r\n unknown_action = -1\r\n values = [[[[unknown_value, unknown_action] for row in range(len(grid[0]))] for col in range(len(grid))] for act in range(len(forward))]\r\n open = [[0, init[0], init[1], init[2], unknown_action]]\r\n while len(open) > 0:\r\n # tag value\r\n current = open.pop()\r\n g, x, y, d, a = current[0], current[1], current[2], current[3], current[4]\r\n if values[d][x][y][0] > g:\r\n values[d][x][y] = [g,a]\r\n # check possible action paths\r\n for a2 in range(len(action)):\r\n d2 = (d + action[a2]) % len(forward)\r\n x2 = x + forward[d2][0]\r\n y2 = y + forward[d2][1]\r\n g2 = g + cost[a2]\r\n if x2 >= 0 and x2 < len(grid) and y2 >= 0 and y2 < len(grid[0]) and grid[x2][y2] == 0 and values[d2][x2][y2][0] > g2:\r\n open.append([g2, x2, y2, d2, a2])\r\n path_available = False\r\n for d in range(len(forward)):\r\n path_available = path_available or values[d][goal[0]][goal[1]][0] != unknown_value\r\n if not path_available:\r\n return ['fail']\r\n policy2D = [[' ' for row in range(len(grid[0]))] for col in range(len(grid))]\r\n policy2D[goal[0]][goal[1]] = '*'\r\n current = goal\r\n tick = unknown_value\r\n direction = unknown_value\r\n # find starting point\r\n for d in range(len(forward)):\r\n x, y = current[0], current[1]\r\n if values[d][x][y][0] < tick:\r\n tick = values[d][x][y][0]\r\n direction = d\r\n while current != [init[0],init[1]]:\r\n x, y = current[0], current[1]\r\n a = values[direction][x][y][1]\r\n x2 = x - forward[direction][0]\r\n y2 = y - forward[direction][1]\r\n policy2D[x2][y2] = action_name[a]\r\n current = [x2, y2]\r\n direction = (direction - action[a]) % len(forward)\r\n return policy2D\r\n\r\nfor p in optimum_policy2D(grid,init,goal,cost):\r\n print(p)\r\n","sub_path":"Term3/Labs/Quizes/leftturnpolicy.py","file_name":"leftturnpolicy.py","file_ext":"py","file_size_in_byte":3820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"387311968","text":"from sklearn.preprocessing import LabelBinarizer\nimport numpy as np\nimport tensorflow as tf\n\nclass LinearMachine():\n def __init__(self):\n self.labelbinarizer = LabelBinarizer()\n\n def fit(self, samples, targets):\n self.labels = list(set(targets))\n y = tf.constant(self.labelbinarizer.fit_transform(targets), dtype=tf.float32, name='y')\n X = tf.concat([tf.ones([samples.shape[0],1], dtype=tf.float32), tf.constant(samples, dtype=tf.float32)], axis=1, name='X')\n Xt = tf.transpose(X)\n self.W = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(Xt, X)), Xt), y)\n with tf.Session() as sess:\n self.W.eval()\n\n def predict(self, samples, y=None):\n X = tf.concat([tf.ones([samples.shape[0],1], dtype=tf.float32), tf.constant(samples, dtype=tf.float32)], axis=1, name='X')\n prediction = tf.matmul(X,self.W)\n with tf.Session() as sess:\n pred = prediction.eval()\n if pred.ndim>1 and pred.shape[1]>1:\n return np.array([self.labels[i] for i in np.argmax(pred, axis=1)])\n else:\n return np.array([self.labels[int(i>0)] for i in pred])\n\n'''\nfrom sklearn import datasets\ndata = datasets.load_iris()\ndata = datasets.load_breast_cancer()\nclf = LinearMachine()\nclf.fit(data[\"data\"],data[\"target\"])\nresp = clf.predict(data[\"data\"])\nprint(sum(resp==data[\"target\"])/len(resp))\n#'''\n","sub_path":"LinearMachine.py","file_name":"LinearMachine.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"531304813","text":"x = int(input('Enter the First Number: '))\ny = int(input('Enter the Second Number: '))\nvariable3 = x\nx = y\ny = variable3\nprint('The Value of x after swapping: {}'.format(x))\nprint('The Value of y after swapping: {}'.format(y))\n\nx = int(input('Enter the height of Triangle in cms: '))\ny = int(input('Enter the Base of Triangle in cms: '))\nArea = 0.5*x*y\nprint('Area of the described triangle is: ', Area,'cm square.')\n\na=int(input(\"Enter the x2 Coefficient: \"))\nb=int(input(\"Enter the x Coefficient: \"))\nc=int(input(\"Enter the Constant term: \"))\nd=(b*2)-(4*a*c)\nsolution1=-1*b+d**0.5/2*a\nsolution2=-1*b-d**0.5/2*a\nprint('The two solutions are: ',solution1,'and',solution2)\n\nx = float(input('Enter the Temperature in Celsius: '))\ny = (x * 1.8) + 32\nprint('Temperature on the Fahrenheit scale is: ',y)\n\nnum = float(input(\"Enter a number: \"))\nif num >= 0:\n if num == 0:\n print(\"Zero\")\n else:\n print(\"Positive number\")\nelse:\n print(\"Negative number\")\n\nx = int(input('Enter the Year: '))\nif x % 4 == 0:\n if x % 100 == 0:\n if x % 400 == 0:\n print(x, 'is a leap year.')\n else:\n print(x, 'is not a leap year.')\n else:\n print(x, 'is a leap year.')\nelse:\n print(x, 'is not a leap year.')\n\nnum1 = float(input('Enter the First Number: '))\nnum2 = float(input('Enter the Second Number: '))\nnum3 = float(input('Enter the Third Number: '))\nif (num1 >= num2) and (num1 >= num3):\n largest = num1\nelif (num2 >= num1) and (num2 >= num3):\n largest = num2\nelse:\n largest = num3\nprint(\"The largest number is\", largest)\n\nnum = int(input('Enter the Number: '))\nsum = 0\nwhile(num>0):\n remainder = num%10\n sum = sum+remainder\n num = num//10\nprint('Sum of the digits of the given number is:', sum)\n\nnum = int(input('Enter the Number: '))\nprint(len(str(num)))\n\nsum1=0\nnum=int(input(\"Enter a number:\"))\ntemp=num\nwhile(num):\n i=1\n f=1\n r=num%10\n while(i<=r):\n f=f*i\n i=i+1\n sum1=sum1+f\n num=num//10\nif(sum1==temp):\n print(\"The number is a strong number.\")\nelse:\n print(\"The number is not a strong number.\")\n\ndef gcd(a, b):\n if a == 0:\n return b\n return gcd(b % a, a)\n\n\ndef lcm(a, b):\n return (a / gcd(a, b)) * b\n\n\na = int(input('Enter the First Number: '))\nb = int(input('Enter the Second Number: '))\nprint('LCM of', a, 'and', b, 'is', lcm(a, b))\n\n\nnum = int(input(\"Enter a number: \"))\n\nsum = 0\n\ntemp = num\nwhile temp > 0:\n digit = temp % 10\n sum += digit ** 3\n temp //= 10\n\nif num == sum:\n print(num,\"is an Armstrong number\")\nelse:\n print(num,\"is not an Armstrong number\")\n\nnum = int(input('Enter a Number: '))\n\nif num > 1:\n for i in range(2, num):\n if (num % i) == 0:\n print(num, \"is not a prime number\")\n break\n else:\n print(num, \"is a prime number\")\n\nelse:\n print(num, \"is not a prime number\")\n\nnum = int(input('Enter the number: '))\nfactorial = 1\nif num < 0:\n print(\"Go study some maths, factorial does not exist for negative numbers!\")\nelif num == 0:\n print(\"The factorial of 0 is 1\")\nelse:\n for i in range(1,num + 1):\n factorial = factorial*i\n print(\"The factorial of\",num,\"is\",factorial)\n\nnterms = int(input(\"Number of terms in the sequence: \"))\nn1, n2 = 0, 1\ncount = 0\nif nterms <= 0:\n print(\"Please enter a positive integer\")\nelif nterms == 1:\n print(\"Fibonacci sequence upto\",nterms,\":\")\n print(n1)\nelse:\n print(\"The Fibonacci sequence is: \")\n while count < nterms:\n print(n1)\n nth = n1 + n2\n n1 = n2\n n2 = nth\n count += 1\n\nx = int(input('Enter the value of x: '))\nn = int(input('Enter the value of n: '))\n\n\ndef fact(x):\n prod = 1\n for i in range(1, x + 1):\n prod *= i\n return prod\n\n\nsum = 0\nfor i in range(1, n + 1):\n sum += x ** i / fact(i)\nprint(sum)\n\nn = int(input('Enter a number: '))\nfor i in range(1,n+1):\n print(i*'*')\n\nn = int(input('Enter a number: '))\nfor i in range(1, n + 1):\n print((n - i) * ' ' + i * '*' + (n - i) * ' ')\n\nn = int(input('Enter a number: '))\nstring = ''\nfor i in range (1,n+1):\n x = str(i)+''\n string+=x\n print(string)\n\nstring = 'SAMI'\nn = 0\nfor i in string:\n n+=1\n print(n*i)\n\nstring = 'QWERTY'\nres = ''\nfor i in string:\n res+=1\n print(res)\n\nstring = input('Enter a String: ')\nn = 0\nfor i in string:\n n+=1\nprint(n)\n\nvowels = 'aeiou'\nstring = input('Enter a String: ')\nres = ''\nfor i in string:\n if i in vowels:\n res+=i\nprint('The number of vowels are: ', len(res))\nprint('Vowels: ', res)\n\nstr1 = str(input(\"Enter string:\"))\nn=int(input(\"Enter number of characters to be changed to lowercase:\"))\nprint(str1[:n].lower() + str1[n:])\n\ns=input(\"Enter any String:\")\nr=\"\"\nfor x in s:\n r=x+r\nprint(\"Reversed String =\",r)\n\nS='string1'\ns='string2'\ncount=0\nfor i in S:\n if i in s:\n count+=1\nprint(\"Number of substrings=\", count)\n\nx=float(input(\"Enter number:\"))\nfx=x\nif x<1:\n fx=x*100\nelse:\n fx==x\nprint(\"Formatted number with percentage=\", fx, \"%\")\n\nx = input('Enter a String: ')\nw = \"\"\nfor i in x:\n w = i + w\nif (x == w):\n print(\"Yes, it is a palindrome.\")\nelse:\n print(\"No, it is not a palindrome.\")\n\n\n","sub_path":"Github.py","file_name":"Github.py","file_ext":"py","file_size_in_byte":5136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"597060150","text":"from rest_framework.serializers import (\n ModelSerializer, \n RelatedField, \n CharField\n)\nfrom .models import (\n Product,\n PriceList,\n SubTypeProduct,\n TypeProduct,\n SizeProduct,\n PresentationProduct,\n FeatureProduct,\n Place,\n Shipping,\n Order,\n OrderItem,\n )\nfrom registration.serializers import (\n AddressSerializer,\n ExtraFieldsSerializer\n )\n\nclass SubTypeSerializer(ExtraFieldsSerializer):\n '''\n Serializer of Category Class.\n '''\n class Meta:\n model = SubTypeProduct\n fields = '__all__'\n\nclass TypeSerializer(ExtraFieldsSerializer):\n '''\n Serializer of Category Class.\n '''\n subtype = SubTypeSerializer(many=True, read_only=True)\n class Meta:\n model = TypeProduct\n fields = '__all__'\n\nclass SizeSerializer(ExtraFieldsSerializer):\n '''\n Serializer of Category Class.\n '''\n class Meta:\n model = SizeProduct\n fields = '__all__'\n\nclass PresentationSerializer(ExtraFieldsSerializer):\n '''\n Serializer of Category Class.\n '''\n class Meta:\n model = PresentationProduct\n fields = '__all__'\n\nclass ShippingSerializer(ExtraFieldsSerializer):\n class Meta:\n model = Shipping\n fields = '__all__'\n\nclass OwnerSerializer(ExtraFieldsSerializer):\n '''\n Serializer of Place Class.\n '''\n address = AddressSerializer()\n shipping = ShippingSerializer(many=True)\n class Meta:\n model = Place\n fields = (\n 'id',\n 'name',\n 'instagram',\n 'whatsapp',\n 'phone',\n 'address',\n 'shipping'\n )\n\nclass FeatureSerializer(ExtraFieldsSerializer):\n '''\n Serializer of Category Class.\n '''\n class Meta:\n model = FeatureProduct\n fields = '__all__'\n\nclass PriceSerializer(RelatedField):\n '''\n Change the representation for price in the ProductSerializer.\n '''\n def to_representation(self, value):\n size = value.size.id if value.size else None \n presentation = value.presentation.id if value.presentation else None\n return {\n 'size': size,\n 'presentation': presentation,\n 'price': value.price\n }\n\nclass ProductSerializer(ExtraFieldsSerializer):\n '''\n Serialize the data of product.\n '''\n # images = ProductImagesSerializer(many=True, read_only=True)\n size = SizeSerializer(many=True, read_only=True)\n presentation = PresentationSerializer(many=True, read_only=True)\n feature = FeatureSerializer(many=True, read_only=True)\n prices = PriceSerializer(many=True, read_only=True)\n types = TypeSerializer()\n\n class Meta:\n model = Product\n fields = '__all__'\n\nclass PriceListSerializer(ExtraFieldsSerializer):\n '''\n Serialize the data of price list.\n '''\n size = CharField(allow_null=True)\n presentation = CharField(allow_null=True)\n product = ProductSerializer()\n \n class Meta:\n model = PriceList\n fields = \"__all__\"\n extra_fields = [\"get_product_name\"]\n\nclass OrderItemSerializer(ModelSerializer):\n product = PriceListSerializer()\n class Meta:\n model = OrderItem\n exclude = ['order']\n\nclass OrderWhatsAppItemSerializer(ModelSerializer):\n class Meta:\n model = OrderItem\n exclude = ['order']\n\nclass OrderSerializer(ModelSerializer):\n items = OrderItemSerializer(many=True)\n delivery_address = AddressSerializer(allow_null=True)\n class Meta:\n model = Order\n fields = '__all__'\n\nclass OrderWhatsAppSerializer(ModelSerializer):\n items = OrderWhatsAppItemSerializer(many=True)\n class Meta:\n model = Order\n fields = '__all__'\n\n def create(self, validated_data):\n \"\"\"\n Create and return a new `OrderItem` instance, given the validated data.\n \"\"\"\n items_data = validated_data.pop(\"items\", [])\n # Create the Order\n order = Order.objects.create(**validated_data)\n # Create and add items\n for item in items_data:\n OrderItem.objects.create(order=order, **item)\n # Save order for calculated Total\n order.save()\n return order\n","sub_path":"orders/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"565308460","text":"import pyaudio\nimport wave\nimport time\nimport wx\n\nStopRecording = 0\n\n# details for audio file\ndef RecordSound(WAVE_OUTPUT_FILENAME):\n\n CHUNK = 1024\n FORMAT = pyaudio.paInt16\n CHANNELS = 2\n RATE = 44100\n RECORD_SECONDS = 5\n\n #WAVE_OUTPUT_FILENAME = \"Test.wav\"\n\n p = pyaudio.PyAudio()\n\n stream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n frames_per_buffer=CHUNK)\n\n print(\"* recording\")\n\n frames = []\n\n while (StopRecording == 0):\n data = stream.read(CHUNK)\n frames.append(data)\n wx.Yield()\n\n #for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n #data = stream.read(CHUNK)\n #frames.append(data)\n\n print(\"* done recording\")\n\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n print(\"* Saving file\")\n\n # Saving the recorded file\n\n wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')\n wf.setnchannels(CHANNELS)\n wf.setsampwidth(p.get_sample_size(FORMAT))\n wf.setframerate(RATE)\n wf.writeframes(b''.join(frames))\n wf.close()","sub_path":"RecordSound.py","file_name":"RecordSound.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"297554156","text":"import heapq\r\n\r\nli=[]\r\n\r\nedgeg={}\r\ngraph={}\r\nvis={}\r\ndef heuristic(graph,u,v):\r\n heapq.heapify(li)\r\n node=u\r\n\r\n value=edgeg[node]\r\n while node != v:\r\n vis[node]=True\r\n print(node)\r\n for unode in graph[node]:\r\n if not vis[unode[0]]:\r\n heapq.heappush(li,(edgeg[node]+unode[1],unode[0]))\r\n nodeextract=heapq.heappop(li)\r\n node=nodeextract[1]\r\n # heapq.heappush(li,(edgeg[nodeextract[0]],))\r\n print(node)\r\n\r\n\r\n\r\ndef add_edge(graph,u,v,cost):\r\n # if graph.has_key(u):\r\n if u in graph:\r\n graph[u].append((v,cost))\r\n else:\r\n graph[u]=[]\r\n vis[u]=False\r\n graph[u].append((v,cost))\r\n # if graph.has_key(v):\r\n if v in graph:\r\n graph[v].append((u,cost))\r\n else:\r\n graph[v]=[]\r\n vis[v]=False\r\n graph[v].append((u,cost))\r\n\r\n\r\ndef edgeg_update(v,g):\r\n edgeg[v]=g\r\n\r\n\r\n\r\nadd_edge(graph,'a','b',1)\r\nadd_edge(graph,'a','c',1)\r\nadd_edge(graph,'a','d',1)\r\nadd_edge(graph,'b','e',1)\r\nadd_edge(graph,'b','f',1)\r\nadd_edge(graph,'c','g',1)\r\nadd_edge(graph,'c','h',1)\r\nadd_edge(graph,'d','i',1)\r\nadd_edge(graph,'d','j',1)\r\n\r\n\r\nedgeg_update('a',38)\r\nedgeg_update('b',17)\r\nedgeg_update('c',9)\r\nedgeg_update('d',27)\r\nedgeg_update('e',5)\r\nedgeg_update('f',10)\r\nedgeg_update('g',3)\r\nedgeg_update('h',4)\r\nedgeg_update('i',15)\r\nedgeg_update('j',10)\r\n\r\nheuristic(graph,'a','e')\r\nprint('f')\r\n","sub_path":"ao_star.py","file_name":"ao_star.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"494888722","text":"import sys\nsys.path.append(\"/PythonGoogle\")\nclass importList:\n previous_search = None\n def json_import(self):\n import json\n with open(\"searches_list.json\", \"r+\") as l:\n search = json.loads(l.read())\n lines = l.readlines()\n if self.previous_search.lower() in search[\"previous_searches\"]:\n l.close()\n else:\n l.seek(0)\n for i in lines:\n if i != \"%s\" %(search):\n l.write(i)\n search[\"previous_searches\"].append('%s' %(self.previous_search.lower()))\n json.dump(search, l)\n l.close()","sub_path":"searchesImporter.py","file_name":"searchesImporter.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"631337466","text":"# -*- coding:utf-8 -*-\n__author__ = 'Gvammer'\nfrom PManager.models import PM_Task, PM_Timer, PM_Task_Message\nfrom PManager.viewsExt.tasks import TaskWidgetManager\nfrom PManager.viewsExt.tools import templateTools\nfrom django import forms\nimport datetime\nfrom django.utils import timezone\n\ndef widget(request, headerValues,a,b):\n class FilterForm(forms.Form):\n fromDate = forms.DateTimeField(required=False)\n toDate = forms.DateTimeField(required=False)\n aUserId = forms.MultipleChoiceField(\n widget=forms.CheckboxSelectMultiple,\n required=False,\n choices=[(user.id,' '.join([user.last_name,user.first_name]))\n for user in TaskWidgetManager.getUsersThatUserHaveAccess(request.user, headerValues['CURRENT_PROJECT'])\n ])\n\n filterForm = FilterForm(\n data=request.GET\n )\n cur_user_access_projects = [v['id'] for v in request.user.get_profile().getProjects().values('id')]\n weeksDelta = 4\n\n dateStart = datetime.datetime.now() - datetime.timedelta(weeks=weeksDelta)\n dateEnd = None\n users_id = filterForm['aUserId'].value()\n if filterForm.is_valid() and users_id:\n users_id_tmp = []\n for uId in users_id:\n users_id_tmp.append(int(uId))\n users_id = users_id_tmp\n del users_id_tmp\n\n dateStart = templateTools.dateTime.convertToDateTime(filterForm['fromDate'].value()) if filterForm['fromDate'].value() else None\n dateEnd = templateTools.dateTime.convertToDateTime(filterForm['toDate'].value()) if filterForm['toDate'].value() else None\n if dateEnd:\n dateEnd += datetime.timedelta(days=1) #include all day of end of range\n\n if dateStart and not dateEnd:\n dateEnd = datetime.datetime.now()\n else:\n users_id = []\n\n allUsers = TaskWidgetManager.getUsersThatUserHaveAccess(request.user, headerValues['CURRENT_PROJECT'])\n users = allUsers.filter(pk__in=users_id)\n\n filterProject = int(request.GET.get('project', 0))\n if filterProject:\n cur_user_access_projects = [filterProject]\n\n for user in users:\n profile = user.get_profile()\n if profile.avatar:\n profile.avatar = str(profile.avatar).replace('PManager', '')\n setattr(user,'profile',profile)\n\n if not user.email and user.username.find('@'):\n setattr(user, 'email', user.username)\n\n # query = 'SELECT SUM(`seconds`) as summ, id, user_id, task_id, dateStart, dateEnd from PManager_pm_timer' + \\\n # ' WHERE `user_id`=' + str(int(user.id)) + \\\n # ' AND `dateStart` > \\'' + str(dateStart) + '\\'' + \\\n # ((' AND `dateStart` < \\'' + str(dateEnd) + '\\'') if dateEnd else '') + \\\n # ' GROUP BY `task_id` ORDER BY `dateStart` DESC'\n # timers = PM_Timer.objects.raw(query)\n timers = PM_Timer.objects.filter(dateEnd__gt=dateStart, user=user)\n if dateEnd:\n timers = timers.filter(dateEnd__lt=dateEnd)\n\n arTaskTime = []\n allUserTime = 0\n allCommentsQty = 0\n allFilesQty = 0\n for timer in timers:\n try:\n task = timer.task\n if task.project.id not in cur_user_access_projects:\n continue\n\n comments = PM_Task_Message.objects.filter(task=task, author=user)\n\n if dateEnd:\n comments = comments.filter(dateCreate__lt=dateEnd)\n\n if dateStart:\n comments = comments.filter(dateCreate__gt=dateStart)\n\n if timer.seconds:\n allUserTime += int(timer.seconds)\n\n allCommentsQty += comments.count()\n arTaskTime.append({\n 'comments_qty': comments.count(),\n 'task': task,\n 'timer': timer,\n 'date': timer.dateEnd\n })\n\n except PM_Task.DoesNotExist:\n pass\n\n setattr(user, 'taskTime', arTaskTime)\n setattr(user, 'allTime', PM_Timer(seconds=allUserTime) if allUserTime else None)\n setattr(user, 'all_comments_qty', allCommentsQty)\n setattr(user, 'all_files_qty', allFilesQty)\n\n closedTaskQty = PM_Task.objects.filter(resp=user, active=True)\n commentsQty = PM_Task_Message.objects.filter(author=user)\n if dateEnd:\n closedTaskQty = closedTaskQty.filter(dateClose__lt=dateEnd)\n commentsQty = commentsQty.filter(dateCreate__lt=dateEnd)\n\n if dateStart:\n closedTaskQty = closedTaskQty.filter(dateClose__gt=dateStart)\n commentsQty = commentsQty.filter(dateCreate__gt=dateStart)\n\n closedTaskQty = closedTaskQty.count()\n commentsQty = commentsQty.count()\n\n setattr(user, 'closedTaskQty', closedTaskQty)\n setattr(user, 'commentsQty', commentsQty)\n\n return {\n 'users': users,\n 'allUsers': allUsers,\n 'filterForm': filterForm,\n 'now': templateTools.dateTime.convertToSite(timezone.make_aware(datetime.datetime.now(), timezone.get_current_timezone())),\n 'week_ago': templateTools.dateTime.convertToSite(timezone.make_aware(datetime.datetime.now(), timezone.get_current_timezone()) - datetime.timedelta(days=7)),\n 'title': u'Статистика пользователей'\n }","sub_path":"PManager/widgets/user_statistic/widget.py","file_name":"widget.py","file_ext":"py","file_size_in_byte":5385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"399005596","text":"\n# getting card number\ncard_number = input(\"Number: \")\n\nnumber_length = len(card_number)\n\ncard_array = list()\n\nfor digit in card_number:\n card_array.insert(0, int(digit))\n\n# calculating the sum by luhn's algorithm\ncard_sum = 0\n\nfor i in range(1, number_length + 1):\n\n if i % 2 == 0:\n mpy_digit = 2 * card_array[i - 1]\n\n if mpy_digit < 10:\n card_sum += mpy_digit\n\n else:\n card_sum += mpy_digit % 10 + 1\n\n else:\n card_sum += card_array[i - 1]\n\n\n# print the message that says which card number it is\n# or if it is not a card number\nif card_sum % 10 != 0:\n print(\"INVALID\\n\")\n\nelse:\n second_digit = card_array[-2]\n\n if second_digit > 0 and second_digit < 6:\n aux_verifier = 1\n\n else:\n aux_verifier = 0\n\n if card_array[-1] == 3 and number_length == 15 and (second_digit == 4 or second_digit == 7):\n print(\"AMEX\\n\")\n\n elif card_array[-1] == 5 and number_length == 16 and aux_verifier == 1:\n print(\"MASTERCARD\\n\")\n\n elif card_array[-1] == 4 and (number_length == 13 or number_length == 16):\n print(\"VISA\\n\")\n\n else:\n print(\"INVALID\\n\")\n\n","sub_path":"pset6/pset6/credit/credit.py","file_name":"credit.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"482722172","text":"from google.cloud import pubsub_v1\nimport os\nimport json\n\nfrom tests.test_base import BaseUnitTest\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = \"./credentials/as-dev-ian-0ef537352615.json\" #mac\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = \"c:\\\\auth_keys\\\\as-dev-gord-1522f36e41ad.json\" #windows\ntopic_name = \"\"\nproject_id = \"\"\ndataset_id = BaseUnitTest.DATASET\ndate_shard = BaseUnitTest.DATE\n\npublisher = pubsub_v1.PublisherClient()\ntopic_path = publisher.topic_path(project_id, topic_name)\n\nSAMPLE_LOAD_DATA = {\"protoPayload\": {\n \"serviceData\": {\"jobCompletedEvent\": {\"job\": {\"jobConfiguration\": {\"load\": {\"destinationTable\": {\n \"datasetId\": dataset_id\n , \"projectId\": project_id\n , \"tableId\": \"ga_sessions_%s\" % date_shard\n }}}}}}}}\n\nprint('Publishing backfill message to topic %s for %s.%s.ga_sessions_%s' % (topic_name,project_id, dataset_id, date_shard))\npublisher.publish(topic_path, json.dumps(SAMPLE_LOAD_DATA).encode('utf-8'), origin='python-unit-test'\n , username='gcp')\n\n","sub_path":"tools/pubsub_message_publish.py","file_name":"pubsub_message_publish.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"428614952","text":"__author__ = \"navodissa\"\n\nfrom http.server import BaseHTTPRequestHandler,HTTPServer,socketserver\n\nPORT_NUMBER = 8080\n\n#This class will handle any incoming request from the browser\n\nclass myHandler(BaseHTTPRequestHandler):\n\t\"\"\"docstring for myHandler\"BaseHTTPRequestHandlerf __init__(self, arg):\"\"\"\n\t# Handler for the GET requests\n\tdef do_GET(self):\n\t\tself.send_response(200)\n\t\tself.send_header('Content-type','text/html')\n\t\tself.end_headers()\n\t\t#Send the HTML messages\n\t\tself.wfile.write(bytes(\"Hello\", 'UTF-8'))\n\t\treturn\n\ntry:\n\t#Create a web server and define the handler to manager the incoming request\n\tserver = HTTPServer(('',PORT_NUMBER), myHandler)\n\tprint (\"Started httpserver on port\", PORT_NUMBER)\n\n\t#Wait forever for incoming http requests\n\tprint (\"Server in listening state\")\n\tserver.serve_forever()\n\nexcept KeyboardInterrupt:\n\tprint (\"^C received, shutting down the web server\")\n\tserver.socket.close()\n","sub_path":"PluralSight/httpserver.py","file_name":"httpserver.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"307956982","text":"def handin_model(data):\n N_H = len(data)\n \n # Containers\n T_mu = dict()\n T_tau = dict()\n B_mu = dict()\n B_tau = dict()\n \n # Loop of each handin\n for h in range(0, N_H):\n h_id = data[h].title\n scores = data[h].gradeings.items()\n \n N_G = len(scores)\n T_tau[h_id] = pm.Gamma('T_tau_%s' % str(h_id),10,0.1)\n T_mu[h_id] = pm.Normal('T_mu_%s' % str(h_id),0.5,1*T_tau[h_id])\n\n # Loop over each grader who have graded the handin\n for g in range(0, N_G):\n (g_id,val) = scores[g]\n \n if g_id not in B_mu and g_id not in B_tau:\n B_tau[g_id] = pm.Gamma('B_tau_%s' % str(g_id), 50, 0.1)\n B_mu[g_id] = pm.Normal('B_mu_%s' % str(g_id),0,1*B_tau[g_id])\n\n @pm.observed\n def Obs(value=val, B_mu=B_mu[g_id], B_tau=B_tau[g_id], T_mu=T_mu[h_id], T_tau=T_tau[h_id]):\n return pm.normal_like(value, B_mu+T_mu, B_tau+T_tau)\n \n collection = [pm.Container(T_mu),\n pm.Container(T_tau),\n pm.Container(B_mu),\n pm.Container(B_tau)]\n \n model = pm.Model(collection)\n map_ = pm.MAP(model)\n map_.fit()\n return model","sub_path":"Thesis/code/pymc_model.py","file_name":"pymc_model.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"94450405","text":"# https://osf.io/upav8/\n\nimport cProfile, pstats, io\n\n\"\"\" Modified from profile_util.py \"\"\"\ndef profile(*args, **kwargs):\n \n \"\"\"A decorator that uses cProfile to profile a function\"\"\"\n\n def inner(fnc):\n filename = kwargs['filename']\n del kwargs['filename']\n\n pr = cProfile.Profile()\n pr.enable()\n retval = fnc(*args, **kwargs)\n print(\"in profile:inner()\")\n print(f\"type(retval): {type(retval)}, len(retval): {len(retval)}\")\n \n pr.disable()\n\n print(\"--------------\")\n\n # https://stackoverflow.com/questions/2513479/redirect-prints-to-log-file\n # https://stackoverflow.com/questions/616645/how-to-duplicate-sys-stdout-to-a-log-file/2216517#2216517\n # setup redirect print to 'cprofile.log'\n import sys\n old_stdout = sys.stdout\n log_file = open(filename, \"w\")\n sys.stdout = log_file\n print(\"print writes to cprofile.log\\n\")\n # end\n\n s = io.StringIO()\n sortby = 'cumulative'\n ps = pstats.Stats(pr, stream=s).sort_stats(sortby)\n ps.print_stats()\n print(s.getvalue())\n\n # restore sys.stdout\n sys.stdout = old_stdout\n log_file.close()\n # end\n\n return retval\n\n return inner\n","sub_path":"python/profiling/cprofile/profile_to_logfile_param.py","file_name":"profile_to_logfile_param.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"90457932","text":"import os\nimport sys\nimport time\n\ndef menu():\n print(\"Package Manger v1.0.0\")\n choice = raw_input(\"\"\"\n A: Uninstall Linux Mint Tools\n B: Uninstall Ubute Tools\n C: Return to toolr\n E: Erase toolr and all tools\n Please enter your choice: \"\"\")\n\n if choice == \"A\" or choice == \"a\":\n os.system(\"sudo bash ~/toolr/scripts/uninstall_mint.sh\")\n elif choice == \"B\" or choice == \"b\":\n os.system(\"sudo bash ~/toolr/scripts/uninstall_18.4.sh\")\n elif choice == \"C\" or choice == \"c\":\n os.system(\"python main.py\")\n elif choice == \"E\" or choice == \"e\":\n os.system(\"sudo bash ~/toolr/scripts/EraseAll.sh\")\n\n else:\n print(\"You must only select either A,B,C or E\")\n print(\"Please try again\")\n menu()\nmenu()\n","sub_path":"v1.00-beta/PackageManger.py","file_name":"PackageManger.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"648064104","text":"import numpy as np\nimport serial\nimport struct\nfrom bisect import bisect_left\nfrom collections import deque\nfrom typing import Tuple, List, NamedTuple, Sized, Iterable\nimport itertools\nfrom localizator.receiver import Receiver, SliceDeck\nfrom localizator.dft import DFT\nfrom localizator.MLE import MLE\nfrom localizator.math_tools import gcc_phat\nfrom localizator.sound_detector import SoundDetector\n\nimport matplotlib.pyplot as plt\nimport librosa\nimport librosa.display\n\n\nclass HistoryEvent(object):\n def __init__(self, start_idx: int, end_idx: int, result: List[np.ndarray]):\n self.start_idx = start_idx\n self.end_idx = end_idx\n self.result = result\n\n def decrement_indexes(self, value) -> bool:\n self.start_idx -= value\n self.end_idx -= value\n return self.start_idx < 0\n\n\nclass DebugHistory(object):\n\n def __init__(self, data_chunk: int, buffer_size: int):\n self.data_buffer = SliceDeck(maxlen=buffer_size)\n self._data_chunk = data_chunk\n self._events: List[HistoryEvent] = []\n self._time_offset = 0\n\n def extend_data(self, data: Sized):\n if len(self.data_buffer) + len(data) > self.data_buffer.maxlen:\n self._time_offset += len(data)\n\n self.data_buffer.extend(data)\n\n def append_event(self, overall_idx: int, s_idx, e_idx: int, result: List[np.ndarray]):\n\n if e_idx > self._data_chunk:\n overall_idx -= 1\n s_idx -= self._data_chunk\n e_idx -= self._data_chunk\n\n l_bound = overall_idx * self._data_chunk + s_idx\n u_bound = overall_idx * self._data_chunk + e_idx\n\n self._events.append(HistoryEvent(l_bound, u_bound, result))\n\n def plot(self, env_history: np.ndarray = np.array([])):\n time_axis = range(self._time_offset, self._time_offset + len(self.data_buffer))\n plt.figure(figsize=(18, 10))\n plt.plot(time_axis, np.array(self.data_buffer), 'b.-')\n plt.axhline(y=12000)\n plt.axhline(y=7000)\n\n for event in self._events:\n if event.start_idx >= self._time_offset:\n plt.axvspan(event.start_idx, event.end_idx, facecolor='#2ca02c', alpha=0.5)\n\n plt.plot(time_axis, np.array(env_history), 'r')\n plt.tight_layout(rect=[0.02, 0.03, 1, 0.95])\n plt.xlabel(\"Sample number\", fontsize=20)\n plt.ylabel(\"ADC value\", fontsize=20)\n plt.savefig(\"signal.png\")\n plt.show()\n\n\nclass SensorMatrix(object):\n\n class InvalidInput(Exception):\n pass\n\n def __init__(self,\n receiver_coords: List[Tuple[float, float, float]],\n reference_rec_id: int = 0,\n rec_buff_size: int = 4096 * 2,\n sampling_freq: int = 41666,\n data_chunk: int = 4096,\n debug: bool = False):\n\n receivers: List[Receiver] = [Receiver(rec[0], rec[1], rec[2], buffer_size=rec_buff_size)\n for rec in receiver_coords]\n debug_buff_size = 120 * data_chunk\n self._sound_detector = SoundDetector(0.9993, debug_buff_size)\n self._mle_calc = MLE(receivers, src_conditions=lambda src: 0 <= src[2] < 2.0, reference_rec_id=reference_rec_id)\n self._data_chunk = 4096\n self._dft = DFT(512, sampling_freq)\n self._rec_dft_buff = np.array([])\n self._serial_settings = {\n \"channelNr\": 4,\n \"port\": '/dev/ttyACM0',\n \"baud\": 2000000,\n \"timeout\": 1,\n \"resultSize_bytes\": 2\n }\n self._recognition_settings = {\n \"lowSpectrum\": 7000,\n \"highSpectrum\": 12000,\n \"minPart\": 0.05,\n \"noiseFloor\": 5000\n }\n\n self.debug = debug\n\n self.debug_history = DebugHistory(data_chunk, debug_buff_size)\n\n def start_cont_localization(self, input_src: str = \"serial\", filename=\"input.wav\"):\n byte_count = self._serial_settings[\"channelNr\"] * self._data_chunk * self._serial_settings[\"resultSize_bytes\"]\n Receiver.isSimulation = False\n\n if input_src == \"wav\":\n import wave\n\n with wave.open(filename, \"rb\") as wav:\n self._dft.sampling_rate = wav.getframerate()\n self._serial_settings[\"channelNr\"] = wav.getnchannels()\n length = wav.getnframes() // self._data_chunk\n\n for idx in range(0, length):\n input_bytes = wav.readframes(self._data_chunk)\n self.localize(input_bytes, idx)\n\n if self.debug:\n self.debug_history.plot(self._sound_detector.env_history)\n else:\n with serial.Serial(self._serial_settings[\"port\"],\n self._serial_settings[\"baud\"],\n timeout=self._serial_settings[\"timeout\"]) as ser:\n while ser.is_open:\n input_bytes = ser.read(byte_count)\n self.localize(input_bytes)\n\n def localize(self, raw_data: bytes, idx: int = 0):\n \"\"\"Performs the whole localization process: check for searched signal, and if it is found calculate the\n src position, returns true if it was detected and false otherwise(for statistics)\"\"\"\n byte_len = len(raw_data)\n frames: List[int] = struct.unpack(\"{}h\".format(byte_len // self._serial_settings[\"resultSize_bytes\"]),\n raw_data)\n recs = self._mle_calc.receivers\n energy = []\n\n # Split data into separate channels\n for ch_id in range(0, self._serial_settings[\"channelNr\"]):\n ch_data = np.array(frames[ch_id::self._serial_settings[\"channelNr\"]], dtype=np.float32)\n energy.append(sum(map(lambda x: x * x, ch_data)))\n recs[ch_id].data_buffer.extend(ch_data)\n\n # claculate energy of all channels na choose the strongest\n strongest_idx: int = np.argmax(energy)\n\n if self.debug:\n self.debug_history.extend_data(recs[strongest_idx].data_buffer[self._data_chunk::])\n\n signal_buffer = recs[strongest_idx].data_buffer\n\n self._sound_detector.detect_sound(signal_buffer, 12000, 7000, data_offset=self._data_chunk)\n\n while len(self._sound_detector.events) > 0:\n l_idx, h_idx, s_mic = self._sound_detector.events.pop()\n\n is_event = self.is_event_detected(signal_buffer[l_idx: h_idx])\n\n if is_event:\n # find TdoA\n self.calculate_tdoa(l_idx, h_idx)\n # calculate src\n res = self.estimate_src_position()\n print(\"calculation result:{}\".format(res))\n self.debug_history.append_event(idx, l_idx, h_idx, res)\n # send to server\n\n def update_receiver_pos(self, positions: List[Tuple[float, float, float]], ref_id: int = 0):\n \"\"\"Updates the spatial positions of all microphones connected to the array. If less than 4 new positions are\n provided, then only first few will be updated. If more than 4 values are provided it raises InvalidInput\n Exception\"\"\"\n\n if len(positions) > len(self._mle_calc.receivers):\n raise SensorMatrix.InvalidInput(\"Too large position array to update only 4 receiver location!\")\n\n for [idx, pos] in enumerate(positions):\n self._mle_calc.receivers[idx].position = pos\n\n self._mle_calc.ref_rec = ref_id\n\n def get_raw_data(self):\n pass\n\n def is_event_detected(self, sound_signal: Iterable) -> bool:\n \"\"\"Detects if the ping pong ball hit was registered. This is done in a simple fashion by taking into account\n only frequencies from certain range specified in recognition settings. Then the spectrum is calculated and\n smoothed by moving average and compared via normalized cross-correlation to the saved sound pattern\"\"\"\n\n sound_signal = np.array(sound_signal, np.float32)\n\n D = librosa.stft(sound_signal, n_fft=64)\n spectrogram = np.abs(D)\n spectrogram_db = librosa.amplitude_to_db(spectrogram, ref=np.max)\n frequencies = np.linspace(0, 41666, 33)\n pos_l = bisect_left(frequencies, self._recognition_settings[\"lowSpectrum\"])\n pos_h = bisect_left(frequencies, self._recognition_settings[\"highSpectrum\"])\n\n spec_slice = np.mean(spectrogram_db[pos_l:pos_h, :], axis=0)\n bounce_idx = [idx for idx, el in enumerate(spec_slice) if el >= -32.0]\n\n if len(bounce_idx) >= 3:\n return True\n\n return False\n\n def calculate_tdoa(self, s_idx: int, e_idx: int):\n \"\"\"Calculates TDoA between all receivers and reference one in the sensor matrix. Results are stored within\n receiver object\"\"\"\n\n # extract bounce sound and its surrounding from rec buffers\n l_bound = s_idx - self._dft.dft_size + 1\n u_bound = s_idx + self._dft.dft_size - 1\n\n if l_bound < 0:\n l_bound = 0\n\n if u_bound >= len(self._mle_calc.receivers[0].data_buffer):\n u_bound = len(self._mle_calc.receivers[0].data_buffer)\n\n bounce_data = [rec.data_buffer[l_bound: u_bound] for rec in self._mle_calc.receivers]\n\n for rec_idx in range(1, self._serial_settings[\"channelNr\"]):\n\n delay, hist = gcc_phat(bounce_data[rec_idx], bounce_data[0], self._dft, phat=True,\n delay_in_seconds=True, buffered_dft=False)\n self._mle_calc.receivers[rec_idx].tDoA = delay\n if self.debug:\n print(delay)\n plt.figure(figsize=(18, 10))\n plt.subplot(311)\n plt.plot(bounce_data[0], label=\"mic_1\")\n plt.plot(bounce_data[1], label=\"mic_2\")\n plt.legend()\n plt.subplot(312)\n plt.plot(bounce_data[0], label=\"mic_1\")\n plt.plot(bounce_data[2], label=\"mic_3\")\n plt.legend()\n plt.subplot(313)\n plt.plot(bounce_data[0], label=\"mic_1\")\n plt.plot(bounce_data[3], label=\"mic_4\")\n plt.xlabel(\"Sample number\")\n #plt.tight_layout(rect=[0.00, 0.03, 1, 0.95])\n plt.legend()\n plt.show()\n\n def estimate_src_position(self) -> List[np.ndarray]:\n r1 = self._mle_calc.calculate()\n r2 = self._mle_calc.get_other_solution()\n r1 = np.squeeze(np.asarray(r1))\n r2 = np.squeeze(np.asarray(r2))\n return [r1, r2]\n\n def simulate_wave_propagation(self, src_pos: Tuple[float, float, float]) -> List[np.ndarray]:\n \"\"\"Simulates the propagation of the sound from given src points and based on time differences, recalculates\n the position of the sound\"source. Returns both roots found during the process, with first one being chosen\n by the algorithm as the correct one\"\"\"\n\n Receiver.set_source_position(src_pos)\n Receiver.isSimulation = True\n for rec in self._mle_calc.receivers:\n rec.receive()\n return self.estimate_src_position()","sub_path":"localizator/sensor_matrix.py","file_name":"sensor_matrix.py","file_ext":"py","file_size_in_byte":11091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"475091206","text":"# coding=utf-8\nimport pika\nfrom pika.credentials import PlainCredentials\nimport time\nimport random\n\nauth = PlainCredentials(username='user', password='123')\np = pika.ConnectionParameters(host='localhost', port=5672, credentials=auth)\nconnection = pika.BlockingConnection(p)\nchannel = connection.channel()\n\nchannel.basic_qos()\n\nchannel.exchange_declare(exchange='log', exchange_type='topic')\nchannel.queue_declare(queue='error_log')\nchannel.queue_bind(queue='error_log', exchange='log', routing_key='log.error.#')\n\nfor i in range(1000):\n channel.basic_publish(exchange='log', routing_key=f'log.error.error_{random.randint(0,10)}',\n body=f'data{random.randint(0,10)}')\n time.sleep(0.01)\n\n\ndef consume_log(ch, method, properties, body):\n print('订阅到数据', body)\n\n\nchannel.basic_consume(consumer_callback=consume_log, queue='error_log', no_ack=True)\n# 这一行会阻塞\nchannel.start_consuming()\n\ninput('press any key to quit')\nchannel.close()\nconnection.close()\nprint('finish')\n","sub_path":"test_/test/db_test/rabbitmq_test.py","file_name":"rabbitmq_test.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"495219830","text":"def min(x,y):\n if x>y:\n return y\n else:return x\n\ndef distance(x1,y1,x2,y2):\n distance_ab=(x1-x2)**2+(y1-y2)**2\n distance_ab=distance_ab**0.5\n return distance_ab\n\ndef listsort(x,y):\n list_sort=sorted(x)\n y_sorted=[]\n for i in range(len(x)):\n location=x.index(list_sort[i])\n x[location]=\"NONE\"\n y_sorted.append(y[location])\n return list_sort,y_sorted\n\ndef divide(x_list,y_list):\n mid_xlist=[]\n mid_ylist=[]\n mid_ylist_sorted=[]\n mid_xlist_sorted=[]\n if len(x_list)==2:\n return distance(x_list[0],x_list[1])\n if len(x_list)==3:\n length_number1=min(distance(x_list[0],y_list[0],x_list[1],y_list[1]),distance(x_list[1],y_list[1],x_list[2],y_list[2]))\n length_number=min(length_number1,distance(x_list[0],y_list[0],x_list[2],y_list[2]))\n return length_number\n\n left_number_xlist=x_list[:(len(x_list)//2)]\n left_number_ylist=y_list[:len(x_list)//2]\n right_number_xlist=x_list[(len(x_list)//2):]\n right_number_ylist=y_list[(len(x_list)//2):]\n left_number_d=divide(left_number_xlist,left_number_ylist)\n print(\"left_number_d=\",left_number_d)\n right_number_d=divide(right_number_xlist,right_number_ylist)\n print(\"right_number_d\",right_number_d)\n\n d=min(left_number_d,right_number_d)\n l=left_number_xlist[-1]+right_number_xlist[0]\n l=l//2\n for i in range(len(x_list)):\n if x_list[i]>(l-d) and x_list[i]<(l-d):\n mid_xlist.append(x_list[i])\n mid_ylist.append(y_list[i])\n mid_ylist_sorted,mid_xlist_sorted=listsort(mid_ylist,mid_xlist)\n for i in range(len(mid_ylist_sorted)):\n for j in range(len(mid_ylist_sorted)):\n if i==j:\n pass\n if mid_ylist_sorted[j]>mid_ylist_sorted[i]-d and mid_ylist_sorted[j] Invalid CT directory address...\")\n return None\n \n elif len(file_list) > 2000:\n warnings.warn(\"Too many slices. It might cause memory issues...\")\n\n \n file_list.sort()\n\n while file_list[0][0] == '.':\n del file_list[0]\n \n slices = [dcm.read_file(ct_dir + '/' + s, force=True) for s in file_list]\n \n\n return slices\n\n\ndef read_slice(slice_dir):\n try:\n ds = dcm.read_file(slice_dir)\n return ds\n \n except:\n print(\"File reading error\")\n return None\n\n\ndef get_image(dicom_data):\n img = dicom_data.pixel_array\n min_v = img.min()\n max_v = img.max()\n\n img = (img - min_v)/(max_v - min_v + 1e-6)\n img = img * 255\n\n return img.astype(np.uint8)\n\n\ndef get_image_hu(dicom_data):\n hu = dcm.pixel_data_handlers.util.apply_modality_lut(dicom_data.pixel_array,dicom_data)\n return hu.astype(np.int16)\n\n\ndef get_windowed_image(dicom_data, wtype=None):\n \"\"\"\n Rescales a CT scan Slice image to a specific windowing\n inputs:\n dicom_dataset: a dicom DataSet\n wtype: a string or tuple, tuple consists (WindowCenter, WindowWidth) or string can be\n 'lung', 'brain', 'bone', 'liver', 'tissues', 'mediastinum'\n output:\n a numpy 2-d array of result image\n \"\"\"\n\n if wtype is None:\n wc = -600\n ww = 1500\n \n elif type(wtype) == str:\n tmp = get_window_values(wtype)\n wc = tmp[0]\n ww = tmp[1]\n \n elif type(wtype) == tuple:\n wc = wtype[0]\n ww = wtype[1]\n \n else:\n print (\"Error -> Invalid wtype argument...\")\n \n hf_img = dcm.pixel_data_handlers.util.apply_modality_lut(dicom_data.pixel_array,dicom_data)\n dicom_data.WindowCenter = wc\n dicom_data.WindowWidth = ww\n res = dcm.pixel_data_handlers.util.apply_voi_lut(hf_img, dicom_data, index=0)\n \n return res\n\n\ndef get_window_values(wtype=\"lung\"):\n hf_values = {\n \"lung\" : (-600, 1500),\n \"mediastinum\": (50, 350) ,\n \"tissues\" : (50, 400) ,\n \"liver\" : (30, 150) ,\n \"brain\" : (40, 80) ,\n \"bone\" : (400, 1800)\n }\n\n return hf_values[wtype]\n","sub_path":"dicom_utils/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"484089662","text":"import numpy\nimport gseries.generators\nimport gseries.linear_model as linear_model\nimport pandas\nimport statsmodels.tsa.arima_model as arima_model\nimport gseries.tsa\nimport statsmodels.tsa.ar_model\n\nEPSILON = 0.5e-8\n\ndef arp_test1(p, ar, y):\n print(\"ar({p})\".format(p=p))\n model = gseries.tsa.ARMA(y, (p, 0))\n fit = model.fit()\n ssr = numpy.sum(numpy.square(fit.resid))\n mean = numpy.mean(y)\n sst = numpy.sum(numpy.array(tuple(map(lambda o: numpy.square(o - mean), y))))\n r2 = 1 - ssr / sst\n \n print(fit.summary())\n print(\"R-squared: {r2}\".format(r2 = r2))\n print(\"Polynomial: {p}\".format(p = ar.polynomial.coef))\n print()\n \ndef fit(model, polynomial, method):\n fit = model.fit(method = method)\n print(fit.summary())\n print(\"method={method}, Polynomial: {p}\".format(p = polynomial.coef, method=method))\n print()\n\ndef arp_test2(p):\n print(\"ar2({p}), Generating series\".format(p=p))\n ar = gseries.generators.AR.create(p) \n y = ar.sample(1000) \n\n print(\"ar2({p}), Creating Matrices\".format(p=p))\n A, B = gseries.math.ar_matrices(y, p, 10)\n\n print(\"ar2({p}), Creating Model\".format(p=p))\n model = linear_model.OLS(B, A)\n\n print(\"ar2({p}), Fitting Matrices\".format(p=p))\n fit(model, ar.polynomial, \"pinv\")\n fit(model, ar.polynomial, \"qr\")\n arp_test1(p, ar, y)\n\nif __name__ == \"__main__\":\n# arp_test2(8)\n# arp_test2(10)\n# arp_test2(12)\n arp_test2(12)\n","sub_path":"tests/ar_test.py","file_name":"ar_test.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"310232258","text":"# -*- coding: utf-8 -*- \nfrom httplib2 import Http\nfrom apiclient import discovery\nfrom oauth2client.service_account import ServiceAccountCredentials\nimport urllib\nfrom google.appengine.ext import ndb\nfrom Model.dbModel import dbModel\nfrom Common import Common\nimport googleMapLocation\nimport ConfigParser\nimport os\nCONFIGPARSER = ConfigParser.ConfigParser()\n\ndef syncExcelToDB(): \n\tCONFIGPARSER.read(os.path.join(os.path.dirname(__file__), '..', Common.ConfigFolder, 'config.ini'))\n\tspreadsheetId = CONFIGPARSER.get(\"DEFAULT\", \"excelsheetid\")\n\tsheetList = [u'日',u'一',u'二',u'三',u'四',u'五',u'六']\n\tservice = discovery.build('sheets', 'v4', developerKey=CONFIGPARSER.get(\"DEFAULT\", \"apikey\"),discoveryServiceUrl='https://sheets.googleapis.com/$discovery/rest?version=v4')\n\t\n\tresult = service.spreadsheets().values().batchGet(spreadsheetId=spreadsheetId,ranges=sheetList).execute()\n\tresponseSheet = result.get('valueRanges', [])\t\n\tweekDay = Common.getTodayWeekDay()\n\texistlocationDict =googleMapLocation.getExistLocationToDict()\n\tnewLocationDict = {}\n\tbadmintonInfoList = []\n\tcoordinate = None\n\taddress = None\n\tfor index,sheet in enumerate( responseSheet):\n\t\trows =sheet.get('values',[])\n\t\tfor rowIndex,row in enumerate( rows): \n\t\t\tif rowIndex == 1 or rowIndex ==0:\n\t\t\t\tcontinue\n\t\t\tbadmintonInfo = dbModel.badmintonInfo() \n\t\t\ttry:\n\t\t\t\tif len(row) > 8:\n\t\t\t\t\tbadmintonInfo.location = row[1]\n\t\t\t\t\tif existlocationDict is not None and badmintonInfo.location.encode('UTF-8') not in existlocationDict:\n\t\t\t\t\t\tcoordinate,address = googleMapLocation.getLocationInfo(row[3]) \n\t\t\t\t\tif coordinate is not None:\n\t\t\t\t\t\tbadmintonInfo.address = address\n\t\t\t\t\t\tbadmintonInfo.lat = coordinate['lat']\n\t\t\t\t\t\tbadmintonInfo.lng = coordinate['lng']\n\t\t\t\t\t\tnewLocationDict = googleMapLocation.locationToDict(newLocationDict,existlocationDict,badmintonInfo.location,address,coordinate)\n\t\t\t\t\tbadmintonInfo.payInfo = Common.convertToInt(row[6])\n\t\t\t\t\tbadmintonInfo.contactName = row[7]\n\t\t\t\t\tbadmintonInfo.contactPhone = row[8]\n\t\t\t\t\tbadmintonInfo.startTime = Common.convertToDateTime(row[0].split(\"~\")[0],index -weekDay)\t\n\t\t\t\t\tbadmintonInfo.endTime = Common.convertToDateTime(row[0].split(\"~\")[1],index -weekDay)\n\t\t\t\t\tbadmintonInfo.weekDay = sheetList[index]\n\t\t\t\t\tbadmintonInfo.weekDayInt = index\n\t\t\t\t\tbadmintonInfo.source = \"excel\"\n\t\t\t\t\tbadmintonInfo.line = row[9] if len(row) > 9 else \"\"\n\t\t\t\t\tbadmintonInfo.sourceData = \",\".join(row ) \n\t\t\t\t\t#break\n\t\t\tfinally:\t\t\t\t\t\n\t\t\t\tbadmintonInfoList.append(badmintonInfo)\n\tif len(badmintonInfoList) > 0:\n\t\tndb.put_multi(badmintonInfoList)\n\t\tinsertLocationInfoList(newLocationDict)\n\ttask = dbModel.AutoTask()\n\ttask.name =\"Auto Task sync\"\n\ttask.put()\n\treturn \"OK\"\ndef removeDateFromSource(source):\n\tdelete_all = dbModel.badmintonInfo.query(dbModel.badmintonInfo.source == source).fetch(keys_only=True)\n\tif delete_all:\n\t\tndb.delete_multi(delete_all)\ndef removeLocation():\n\tdelete_all = dbModel.locationInfo.query().fetch(keys_only=True)\n\tif delete_all:\n\t\tndb.delete_multi(delete_all)\n\n\n\ndef insertLocationInfoList(locationDict):\n\tlocationList = []\n\tif locationDict is not None:\t\n\t\tfor key in locationDict:\n\t\t\tlocation = dbModel.locationInfo()\n\t\t\tlocation.address = locationDict[key]['address']\n\t\t\tlocation.lat = locationDict[key]['lat']\n\t\t\tlocation.lng = locationDict[key]['lng']\n\t\t\tlocation.name = key\n\t\t\tlocationList.append(location)\n\tif len(locationList) > 0:\t\t\n\t\tndb.put_multi(locationList)\n","sub_path":"apiHandler/googleExcelcrawler.py","file_name":"googleExcelcrawler.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"408561803","text":"#!/usr/bin/env python\n# Copyright 2019, Kay Hayen, mailto:kay.hayen@gmail.com\n#\n# Part of \"Nuitka\", an optimizing Python compiler that is compatible and\n# integrates with CPython, but also works on its own.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\" Tool to automatically format source code in Nuitka style.\n\n\"\"\"\n\nimport os\nimport re\nimport shutil\n\nfrom nuitka.Tracing import my_print\n\n\ndef cleanupWindowsNewlines(filename):\n \"\"\" Remove Windows new-lines from a file.\n\n Simple enough to not depend on external binary.\n \"\"\"\n\n source_code = open(filename, \"rb\").read()\n\n updated_code = source_code.replace(b\"\\r\\n\", b\"\\n\")\n updated_code = updated_code.replace(b\"\\n\\r\", b\"\\n\")\n\n if updated_code != source_code:\n my_print(\"Fixing Windows new lines for\", filename)\n\n with open(filename, \"wb\") as out_file:\n out_file.write(updated_code)\n\n\ndef _updateCommentNode(comment_node):\n if \"pylint:\" in str(comment_node.value):\n\n def replacer(part):\n def renamer(pylint_token):\n # pylint: disable=too-many-branches,too-many-return-statements\n if pylint_token == \"E0602\":\n return \"undefined-variable\"\n elif pylint_token in (\"E0401\", \"F0401\"):\n return \"import-error\"\n elif pylint_token == \"E1102\":\n return \"not-callable\"\n elif pylint_token == \"E1133\":\n return \" not-an-iterable\"\n elif pylint_token == \"E1128\":\n return \"assignment-from-none\"\n # Save line length for this until isort is better at long lines.\n elif pylint_token == \"useless-suppression\":\n return \"I0021\"\n # elif pylint_token == \"I0021\":\n # return \"useless-suppression\"\n elif pylint_token == \"R0911\":\n return \"too-many-return-statements\"\n elif pylint_token == \"R0201\":\n return \"no-self-use\"\n elif pylint_token == \"R0902\":\n return \"too-many-instance-attributes\"\n elif pylint_token == \"R0912\":\n return \"too-many-branches\"\n elif pylint_token == \"R0914\":\n return \"too-many-locals\"\n elif pylint_token == \"R0915\":\n return \"too-many-statements\"\n elif pylint_token == \"W0123\":\n return \"eval-used\"\n elif pylint_token == \"W0603\":\n return \"global-statement\"\n elif pylint_token == \"W0613\":\n return \"unused-argument\"\n elif pylint_token == \"W0622\":\n return \"redefined-builtin\"\n elif pylint_token == \"W0703\":\n return \"broad-except\"\n else:\n return pylint_token\n\n return part.group(1) + \",\".join(\n sorted(renamer(token) for token in part.group(2).split(\",\"))\n )\n\n new_value = re.sub(\n r\"(pylint\\: disable=)(.*)\", replacer, str(comment_node.value), flags=re.M\n )\n comment_node.value = new_value\n\n\ndef autoformat(filename, abort=False):\n from baron.parser import ( # pylint: disable=I0021,import-error,no-name-in-module\n ParsingError, # @UnresolvedImport\n )\n from redbaron import ( # pylint: disable=I0021,import-error,no-name-in-module\n RedBaron, # @UnresolvedImport\n )\n\n my_print(\"Consider\", filename, end=\": \")\n\n old_code = open(filename, \"r\").read()\n\n try:\n red = RedBaron(old_code)\n # red = RedBaron(old_code.rstrip()+'\\n')\n except ParsingError:\n if abort:\n raise\n\n my_print(\"PARSING ERROR.\")\n return 2\n\n for node in red.find_all(\"CommentNode\"):\n try:\n _updateCommentNode(node)\n except Exception:\n my_print(\"Problem with\", node)\n node.help(deep=True, with_formatting=True)\n raise\n\n new_code = red.dumps()\n\n if new_code != old_code:\n new_name = filename + \".new\"\n\n with open(new_name, \"w\") as source_code:\n source_code.write(red.dumps())\n\n if os.name == \"nt\":\n cleanupWindowsNewlines(new_name)\n\n # There is no way to safely replace a file on Windows, but lets try on Linux\n # at least.\n old_stat = os.stat(filename)\n\n try:\n os.rename(new_name, filename)\n except OSError:\n shutil.copyfile(new_name, filename)\n os.unlink(new_name)\n\n os.chmod(filename, old_stat.st_mode)\n\n my_print(\"updated.\")\n changed = 1\n else:\n my_print(\"OK.\")\n changed = 0\n\n return changed\n","sub_path":"nuitka/tools/quality/autoformat/Autoformat.py","file_name":"Autoformat.py","file_ext":"py","file_size_in_byte":5408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"448037552","text":"import requests\nimport base64\nimport urllib\nimport time\n\nprint(\"START\")\nstart_time = time.time()\n\n# url = 'http://kslweb1.spb.ctf.su/sqli/time1/?query=qwerty&sig_query=cXdlcnR5'\nurl = 'http://kslweb1.spb.ctf.su/sqli/time1/?query=sleep%285%29&sig_query=c2xlZXAoNSk%3D'\n\nstring = \"zxcvbnm\"\nwith open('code.txt', 'r') as file:\n string = \"\".join(file.readlines())\n\ns = string.encode('UTF-8')\nencoded = base64.b64encode(s)\nencoded = encoded.decode('UTF-8')\n\n\nstring = str(string.encode('UTF-8'))[2:]\nstring = urllib.parse.quote_plus(string)\n\nlength = 129 + len(string) + len(encoded)\n\nline=\"name=erytyghunjkmhjb&email=pochta.&message=\" + string +\"&sig_name=ZXJ5dHlnaHVuamttaGpi&sig_email=cG9jaHRhLg%3D%3D&sig_message=\" + encoded\n\n\nresponse = requests.post(url, headers={\n'Host': 'kslweb1.spb.ctf.su',\n'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:69.0) Gecko/20100101 Firefox/69.0',\n'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n'Accept-Language': 'en-US,en;q=0.5',\n'Accept-Encoding': 'gzip, deflate',\n'Content-Type': 'application/x-www-form-urlencoded',\n'Content-Length': str(length),\n'DNT': '1',\n'Connection': 'keep-alive',\n'Referer': 'http://kslweb1.spb.ctf.su/fourth/level32/?',\n'Upgrade-Insecure-Requests': '1'\n},\ndata=line\n)\n\nprint(response.content.decode(\"UTF-8\"))\nend_time = time.time()\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\n# url = 'http://kslweb1.spb.ctf.su/sqli/time1/?query=sleep%285%29&sig_query=c2xlZXAoNSk%3D'\n#\n","sub_path":"python_code/forkbomb/pictures/file2.py","file_name":"file2.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"395444130","text":"from collections import defaultdict\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef read_mecab_file():\n result = []\n with open('neko.txt.mecab', 'r') as f:\n line = f.readline()\n while len(line) > 0:\n # ref. http://taku910.github.io/mecab/#parse\n surface, other = line.split('\\t')\n other_list = other.split(',')\n result_dict = {\n 'surface': surface,\n 'base': other_list[6],\n 'pos': other_list[0],\n 'pos1': other_list[1]\n }\n result.append(result_dict)\n\n line = f.readline()\n if 'EOS' in line:\n break\n return result\n\n\ndef main():\n result = read_mecab_file()\n bag_of_words = defaultdict(int)\n for row in result:\n bag_of_words[row['surface']] += 1\n top_list = sorted(bag_of_words.items(), key=lambda x: x[1], reverse=True)\n\n left = np.array([i for i in range(len(top_list))])\n height = np.array([v for k, v in top_list])\n\n ax = plt.gca()\n ax.spines['top'].set_color('none')\n ax.set_xscale('log')\n ax.set_yscale('log')\n\n plt.plot(left, height)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"chapter_4/39.py","file_name":"39.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"435154429","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom impacket.dcerpc.v5.rpcrt import DCERPCException\nfrom impacket.dcerpc.v5 import rrp\nfrom impacket.examples.secretsdump import RemoteOperations\n\nclass CMEModule:\n\n name = 'install_elevated'\n description = \"Checks for AlwaysInstallElevated\"\n supported_protocols = ['smb']\n opsec_safe = True\n multiple_hosts = True\n\n def options(self, context, module_options):\n '''\n '''\n\n def on_admin_login(self, context, connection):\n try:\n remoteOps = RemoteOperations(connection.conn, False)\n remoteOps.enableRegistry()\n\n try:\n ans_machine = rrp.hOpenLocalMachine(remoteOps._RemoteOperations__rrp)\n regHandle = ans_machine['phKey']\n ans_machine = rrp.hBaseRegOpenKey(remoteOps._RemoteOperations__rrp, regHandle, 'SOFTWARE\\\\Policies\\\\Microsoft\\\\Windows\\\\Installer')\n keyHandle = ans_machine['phkResult']\n dataType, aie_machine_value = rrp.hBaseRegQueryValue(remoteOps._RemoteOperations__rrp, keyHandle, 'AlwaysInstallElevated')\n rrp.hBaseRegCloseKey(remoteOps._RemoteOperations__rrp, keyHandle)\n\n if aie_machine_value == 0:\n context.log.highlight('AlwaysInstallElevated Status: 0 (Disabled)')\n return\n\n except rrp.DCERPCSessionError:\n context.log.highlight('AlwaysInstallElevated Status: 0 (Disabled)')\n return\n\n\n try:\n ans_user = rrp.hOpenCurrentUser(remoteOps._RemoteOperations__rrp)\n regHandle = ans_user['phKey']\n ans_user = rrp.hBaseRegOpenKey(remoteOps._RemoteOperations__rrp, regHandle, 'SOFTWARE\\\\Policies\\\\Microsoft\\\\Windows\\\\Installer')\n keyHandle = ans_user['phkResult']\n dataType, aie_user_value = rrp.hBaseRegQueryValue(remoteOps._RemoteOperations__rrp, keyHandle, 'AlwaysInstallElevated')\n rrp.hBaseRegCloseKey(remoteOps._RemoteOperations__rrp, keyHandle)\n\n except rrp.DCERPCSessionError:\n context.log.highlight('AlwaysInstallElevated Status: 1 (Enabled: Computer Only)')\n return\n\n if aie_user_value == 0:\n context.log.highlight('AlwaysInstallElevated Status: 1 (Enabled: Computer Only)')\n else:\n context.log.highlight('AlwaysInstallElevated Status: 1 (Enabled)')\n finally:\n remoteOps.finish()\n","sub_path":"cme/modules/install_elevated.py","file_name":"install_elevated.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"627997487","text":"# time can be optimized to O(n), space O(n)\n\n\nimport collections\n\n\nclass Solution(object):\n def strongPasswordChecker(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n LEN_LOWER_LIMIT = 6\n LEN_UPPER_LIMIT = 20\n length = len(s)\n has_lower, has_upper, has_digit = False, False, False\n repeats = []\n i = 0\n while i < len(s):\n c = s[i]\n if c.islower():\n has_lower = True\n elif c.isupper():\n has_upper = True\n elif c.isdigit():\n has_digit = True\n start = i\n while i < len(s) and s[i] == s[start]:\n i += 1\n if i - start >= 3: # don't forget this condition\n repeats.append(i - start)\n cnt_missing_type = 3 - (int(has_lower) + int(has_upper) + int(has_digit))\n\n # remove all duplicates\n repeats.sort(key=lambda x: x % 3) # edge case: \"AAAAAABBBBBB123456789a\", can be optimized by bucket sort\n repeats = collections.deque(repeats)\n num_changes = 0\n while repeats:\n x = repeats.popleft()\n if length < LEN_LOWER_LIMIT:\n # insert\n new_insert = x // 3\n num_changes += new_insert\n length += new_insert\n cnt_missing_type -= new_insert\n elif length > LEN_UPPER_LIMIT:\n # delete\n if x % 3 == 0:\n new_del = 1\n elif x % 3 == 1:\n new_del = min(2, length - LEN_UPPER_LIMIT)\n else:\n new_del = min(x - 2, length - LEN_UPPER_LIMIT)\n num_changes += new_del\n length -= new_del\n x -= new_del\n if x > 2:\n repeats.append(x)\n else:\n # replace\n new_replace = x // 3\n num_changes += new_replace\n cnt_missing_type -= new_replace\n\n # deal with missing types\n cnt_missing_type = max(cnt_missing_type, 0)\n if length < LEN_LOWER_LIMIT:\n # insert\n length += cnt_missing_type\n num_changes += cnt_missing_type\n else:\n # replace\n num_changes += cnt_missing_type\n\n # deal with length\n if length < LEN_LOWER_LIMIT:\n # insert\n num_changes += LEN_LOWER_LIMIT - length\n elif length > LEN_UPPER_LIMIT:\n # delete\n num_changes += length - LEN_UPPER_LIMIT\n\n return num_changes\n\n\n\"\"\"\nA password is considered strong if below conditions are all met:\n\nIt has at least 6 characters and at most 20 characters.\nIt must contain at least one lowercase letter, at least one uppercase letter, and at least one digit.\nIt must NOT contain three repeating characters in a row (\"...aaa...\" is weak, \nbut \"...aa...a...\" is strong, assuming other conditions are met).\nWrite a function strongPasswordChecker(s), that takes a string s as input, \nand return the MINIMUM change required to make s a strong password. If s is already strong, return 0.\n\nInsertion, deletion or replace of any one character are all considered as one change.\n\nAccepted\n\"\"\"\n","sub_path":"0420. Strong Password Checker.py","file_name":"0420. Strong Password Checker.py","file_ext":"py","file_size_in_byte":3285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"356970141","text":"#!/usr/bin/env python3\n\nimport re\nimport imaplib\nimport getpass\nfrom email import message_from_bytes\nfrom quopri import decodestring\nfrom os.path import isfile\nfrom datetime import datetime\n\n## Collect credentials and parameters\nhostname = input('Hostname: ')\nfolder = input('IMAP-Dir: ')\nusername = input('Username: ')\npassword = getpass.getpass()\nprint('iTunes Account: 1) DE 2) US')\nlanguage = int(input('Language: '))\n\n# Select patterns based on the selected store language\nif language == 1:\n search_patterns = [\n '(SUBJECT \"Ihr Beleg\" FROM \"do_not_reply@itunes.com\" )',\n '(SUBJECT \"Ihre Rechnung von Apple\" FROM \"no_reply@email.apple.com\")'\n ]\n date_pattern = r'datum\\:\\s*([0-9\\.]*)'\n date_format = '%d.%m.%Y'\n date_format_alt = '%d.%m.%y'\n date_format_out = '%Y-%m-%d'\n total_pattern = r'gesamt\\:\\s*([\\-0-9\\,]*)'\n currency = 'EUR'\n\nelif language == 2:\n raise Exception('US account emails are not yet supported. Help neeed from US citizens!')\n\n search_patterns = [\n '(SUBJECT \"Your Receipt\" FROM \"do_not_reply@itunes.com\" )',\n '(SUBJECT \"Your invoice from Apple\" FROM \"no_reply@email.apple.com\")'\n ]\n date_pattern = r'date\\:\\s*([\\w/0-9\\.]*)'\n date_format = '%d.%m.%Y'\n date_format_alt = '%d.%m.%y'\n date_format_out = '%Y-%m-%d'\n total_pattern = r'total\\:\\s*([\\-0-9\\,]*)'\n currency = 'USD'\n\nelse:\n raise Exception('Currently only German (1) account emails are supported')\n\n## Le jeu commence ...\ntotal = 0\nearliest_date = datetime.today()\n\nimap = imaplib.IMAP4_SSL(host=hostname)\nimap.login(username, password)\nimap.select(folder)\n\nfor patterns in search_patterns:\n\n result, data = imap.search(None, patterns)\n if result != 'OK':\n raise Exception('Search was not successful.', result)\n\n for message_id in data[0].split():\n\n result, msg_raw = imap.fetch(message_id, '(RFC822)')\n if result != 'OK':\n raise Exception('Fetch was not successful.', result)\n\n msg = message_from_bytes(msg_raw[0][1])\n body = \"\"\n\n if msg.is_multipart():\n for part in msg.walk():\n ctype = part.get_content_type()\n cdispo = str(part.get('Content-Disposition'))\n\n if ctype == 'text/plain' and 'attachment' not in cdispo:\n charset = part.get_content_charset()\n body = part.get_payload()\n break\n else:\n charset = msg.get_content_charset()\n body = msg.get_payload()\n\n ds = decodestring(body).decode(charset)\n\n res = re.search(date_pattern, ds, flags=re.IGNORECASE)\n try:\n date_of_invoice = datetime.strptime(res.group(1), date_format)\n except ValueError:\n date_of_invoice = datetime.strptime(res.group(1), date_format_alt)\n\n res = re.search(total_pattern, ds, flags=re.IGNORECASE)\n total += float(res.group(1).replace(',', '.'))\n earliest_date = min(earliest_date, date_of_invoice)\n\n ## Write all invoices in plaintext to current folder\n filename = date_of_invoice.strftime(date_format_out)+'__'+message_id.decode('utf-8')+'.txt'\n\n if isfile(filename):\n continue\n\n with open(filename, 'w') as f:\n print(ds, file=f)\n\nimap.close()\nimap.logout()\n\nprint('Since {}, You\\'ve spent a total amount of {} {:.2f}'.format(earliest_date.strftime(date_format_out), currency, total))","sub_path":"itunes.py","file_name":"itunes.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"421093476","text":"from openerp.osv import osv,fields\n\nclass res_partner(osv.osv):\n _name = \"res.partner\"\n _inherit = \"res.partner\"\n\n _columns = {\n 'royalty_receiver': fields.boolean('Receives royalties'),\n 'royalty_product_ids': fields.many2many('product.product', string='Products with royalties'),\n 'fiscal_id': fields.char('Fiscal ID', size=30),\n 'contact': fields.char('Contact', size=50),\n 'email_contact': fields.char('Email contact'),\n 'phone_contact': fields.char('Phone contact', size=30),\n 'logistic_info': fields.char('Logistic INFO'),\n 'incoterm': fields.char('Incoterm', size=20),\n 'docs_required': fields.char('Docs required'),\n 'special_instructions': fields.text('Special Instructions'),\n }\nres_partner()\n","sub_path":"elote_royalties/model/res_partner.py","file_name":"res_partner.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"188975609","text":"import torch\nclass PruneConfig(object):\n def __init__(self):\n self.n_points_per_layer = 1\n self.prunable_layer_types = [torch.nn.modules.conv.Conv2d, torch.nn.modules.linear.Linear]\n self.calib_batch = 10\n self.device = 'cuda'\n\n\nclass LassoPruneConfig(PruneConfig):\n def __init__(self, model, ckpt, train_dataloader, val_dataloader=None):\n super(LassoPruneConfig, self).__init__()\n self.model = model\n self.ckpt = ckpt\n self.train_dataloader = train_dataloader\n self.val_dataloader = val_dataloader\n\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"278138837","text":"# -*- coding: utf-8 -*-\n#FABIEN GENTY\n#2017/10\n#PROJET LONG VISUALISATEUR DE PROTEINES\n\n#loading librairy\n\nimport pandas as pd\nimport math as m\nimport numpy as np\n\ndef set_CSV(path):\n \"\"\"\n Opening the CSV file and cleaning the file with Na\n \"\"\"\n data_csv = pd.read_csv(path,\n sep = '\\t',\n header=0,\n na_filter = True,\n index_col=False,\n na_values =\" NaN\")\n del data_csv[\"Unnamed: 0\"]\n return data_csv\n\nif __name__ == '__main__':\n main()\n #donne = set_CSV(\"data_table/data_down.csv\")\n #print(donne)\n","sub_path":"env/app/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"363727900","text":"#!python\n# set PYSPARK_DRIVER_PYTHON=python\n# set PYSPARK_DRIVER_PYTHON_OPTS=\n# spark-submit --master local[7] --deploy-mode client BiLevelPerfTest.py\nimport gc\nimport scipy.stats, numpy\nimport time\nfrom LinearRegression import linear_regression\nfrom pyspark.sql import SparkSession\n\nspark = None\nsc = None\nlog = None\n\ndef createSparkContext():\n global spark\n spark = SparkSession \\\n .builder \\\n .appName(\"BiLevelPerfTest\") \\\n .config(\"spark.sql.shuffle.partitions\", 7) \\\n .config(\"spark.ui.enabled\", \"false\") \\\n .config(\"spark.rdd.compress\", \"false\") \\\n .config(\"spark.driver.memory\", \"2g\") \\\n .config(\"spark.executor.memory\", \"3g\") \\\n .config(\"spark.executor.memoryOverhead\", \"1g\") \\\n .config(\"spark.sql.execution.arrow.enabled\", \"true\") \\\n .getOrCreate()\n return spark\ndef setupSparkContext(in_spark):\n global spark, sc, log\n spark = in_spark\n sc = spark.sparkContext\n log4jLogger = sc._jvm.org.apache.log4j\n log = log4jLogger.LogManager.getLogger(__name__)\n log.info(\"script initialized\")\n return sc, log\n\nimport math\nimport random\nimport collections\nimport pyspark.sql.functions as func\nimport pyspark.sql.types as DataTypes\nfrom pyspark.sql.window import Window\nfrom pyspark.sql import Row\nfrom pyspark.sql.functions import pandas_udf, PandasUDFType\nimport pandas as pd\nimport numpy as np\nfrom numba import vectorize, jit, njit, prange, cuda\nfrom numba import float64 as numba_float64\n\nDataPoint = collections.namedtuple(\"DataPoint\", \n [\"id\", \"grp\", \"subgrp\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\"])\nDataPointSchema = DataTypes.StructType([\n DataTypes.StructField('id',DataTypes.LongType(),False),\n DataTypes.StructField('grp',DataTypes.LongType(),False),\n DataTypes.StructField('subgrp',DataTypes.LongType(),False),\n DataTypes.StructField('A',DataTypes.LongType(),False),\n DataTypes.StructField('B',DataTypes.LongType(),False),\n DataTypes.StructField('C',DataTypes.DoubleType(),False),\n DataTypes.StructField('D',DataTypes.DoubleType(),False),\n DataTypes.StructField('E',DataTypes.DoubleType(),False),\n DataTypes.StructField('F',DataTypes.DoubleType(),False)])\ndef generateData(numGrp1=3, numGrp2=3, repetition=1000):\n return [\n DataPoint(\n id=i, \n grp=(i // numGrp2) % numGrp1,\n subgrp=i % numGrp2,\n A=random.randint(1, repetition),\n B=random.randint(1, repetition),\n C=random.uniform(1, 10),\n D=random.uniform(1, 10),\n E=random.normalvariate(0, 10),\n F=random.normalvariate(1, 10))\n for i in range(0, numGrp1 * numGrp2 * repetition)]\npyData_3_3_10 = generateData(3,3,10)\npyData_3_3_100 = generateData(3,3,100)\npyData_3_3_1k = generateData(3,3,1000)\npyData_3_3_10k = generateData(3,3,10000)\npyData_3_3_100k = generateData(3,3,100000)\npyData_3_30_10k = generateData(3,30,10000)\npyData_3_300_1k = generateData(3,300,1000)\npyData_3_3k_100 = generateData(3,3000,100)\n\nCondMethod = collections.namedtuple(\"CondMethod\", \n [\"name\", \"interface\", \"delegate\"])\nimplementation_list = []\ndef count_iter(iterator):\n count = 0\n for obj in iterator:\n count += 1\n return count\n\n#region join aggregation\ndef bi_sql_join(pyData):\n dfData = spark.createDataFrame(pyData)\n spark.catalog.dropTempView(\"exampledata\")\n dfData.createTempView(\"exampledata\")\n spark.sql('''\n SELECT \n level1.grp, \n LAST(level1.mean_of_C) mean_of_C, \n LAST(level1.max_of_D) max_of_D, \n AVG(level2.var_of_E) avg_var_of_E,\n AVG(level2.var_of_E2) avg_var_of_E2\n FROM\n (SELECT\n grp, AVG(C) mean_of_C, MAX(D) max_of_D\n FROM\n exampledata\n GROUP BY grp) AS level1\n LEFT JOIN\n (SELECT \n grp,\n subgrp,\n VARIANCE(E) var_of_E,\n (SUM(E * E) - \n SUM(E)*AVG(E))/(COUNT(E)-1) var_of_E2\n FROM\n exampledata\n GROUP BY grp , subgrp\n ) AS level2 \n ON level1.grp = level2.grp\n GROUP BY level1.grp\n ORDER BY level1.grp\n ''')\\\n .collect()\n\nimplementation_list.append(CondMethod(\n name='bi_sql_join', \n interface='sql', \n delegate=bi_sql_join))\n\ndef bi_fluent_join(pyData):\n df = spark.createDataFrame(pyData)\n level1 = df \\\n .groupBy(df.grp) \\\n .agg(\n func.mean(df.C).alias(\"mean_of_C\"),\n func.max(df.D).alias(\"max_of_D\"))\n level2 = df \\\n .groupBy(df.grp, df.subgrp) \\\n .agg(\n func.variance(df.E).alias(\"var_of_E\"),\n ((func.sum(df.E * df.E)-\n func.sum(df.E) * func.avg(df.E))\n /(func.count(df.E)-1)).alias(\"var_of_E2\")\n )\n level3 = level2 \\\n .join(level1, \"grp\") \\\n .groupBy(level1.grp) \\\n .agg(\n func.last(level1.mean_of_C).alias(\"mean_of_C\"),\n func.last(level1.max_of_D).alias(\"max_of_D\"),\n func.avg(level2.var_of_E).alias(\"avg_var_of_E\"),\n func.avg(level2.var_of_E2).alias(\"avg_var_of_E2\")\n ) \\\n .orderBy(level1.grp)\n # .collect()\n return level3, None\n\nimplementation_list.append(CondMethod(\n name='bi_fluent_join', \n interface='fluent', \n delegate=bi_fluent_join))\n\n#endregion\n\n#region pandas\ndef bi_pandas(pyData):\n groupby_columns = ['grp']\n agg_columns = ['mean_of_C','max_of_D', 'avg_var_of_E', 'avg_var_of_E2']\n df = spark.createDataFrame(pyData)\n postAggSchema = DataTypes.StructType(\n [x for x in DataPointSchema.fields if x.name in groupby_columns] + \n [DataTypes.StructField(name, DataTypes.DoubleType(), False) for name in agg_columns])\n #\n @pandas_udf(postAggSchema, PandasUDFType.GROUPED_MAP)\n def inner_agg_method(dfPartition):\n group_key = dfPartition['grp'].iloc[0]\n C = dfPartition['C']\n D = dfPartition['D']\n E = dfPartition['E']\n subgroupedE = dfPartition.groupby('subgrp')['E']\n return pd.DataFrame([[\n group_key, \n C.mean(),\n D.max(),\n subgroupedE.var().mean(),\n subgroupedE \\\n .agg(lambda E: \\\n ((E * E).sum() - \n E.sum()**2/E.count())/(E.count()-1)) \\\n .mean(),\n ]], columns=groupby_columns + agg_columns)\n #\n aggregates = df.groupby(df.grp).apply(inner_agg_method)\n return aggregates, None\n\nimplementation_list.append(CondMethod(\n name='bi_pandas', \n interface='pandas', \n delegate=lambda pyData: bi_pandas(pyData)))\n\ndef bi_pandas_numba(pyData):\n groupby_columns = ['grp']\n agg_columns = ['mean_of_C','max_of_D', 'avg_var_of_E', 'avg_var_of_E2']\n df = spark.createDataFrame(pyData)\n postAggSchema = DataTypes.StructType(\n [x for x in DataPointSchema.fields if x.name in groupby_columns] + \n [DataTypes.StructField(name, DataTypes.DoubleType(), False) for name in agg_columns])\n #\n @jit(numba_float64(numba_float64[:]), nopython=True)\n def my_numba_mean(C):\n return np.mean(C)\n #\n @jit(numba_float64(numba_float64[:]), nopython=True)\n def my_numba_max(C):\n return np.max(C)\n #\n @jit(numba_float64(numba_float64[:]), nopython=True)\n def my_numba_var(C):\n return np.var(C)\n #\n @jit(numba_float64(numba_float64[:]), parallel=True, nopython=True)\n def my_looplift_var(E):\n n = len(E)\n accE2 = 0.\n for i in prange(n):\n accE2 += E[i] ** 2\n accE = 0.\n for i in prange(n):\n accE += E[i]\n return (accE2 - accE**2/n)/(n-1)\n #\n @pandas_udf(postAggSchema, PandasUDFType.GROUPED_MAP)\n def inner_agg_method(dfPartition):\n group_key = dfPartition['grp'].iloc[0]\n C = np.array(dfPartition['C'])\n D = np.array(dfPartition['D'])\n subgroupedE = dfPartition.groupby('subgrp')['E']\n return pd.DataFrame([[\n group_key, \n my_numba_mean(C),\n my_numba_max(D),\n subgroupedE.apply(lambda x: my_numba_var(np.array(x))).mean(),\n subgroupedE.apply(lambda x: my_looplift_var(np.array(x))).mean(),\n ]], columns=groupby_columns + agg_columns)\n #\n aggregates = df.groupby(df.grp).apply(inner_agg_method)\n return aggregates, None\n\nimplementation_list.append(CondMethod(\n name='bi_pandas_numba', \n interface='pandas', \n delegate=lambda pyData: bi_pandas_numba(pyData)))\n\n#endregion\n\n#region bi nested\ndef bi_sql_nested(pyData):\n dfData = spark.createDataFrame(pyData)\n spark.catalog.dropTempView(\"exampledata\")\n dfData.createTempView(\"exampledata\")\n spark.sql('''\n SELECT \n grp,\n SUM(sub_sum_of_C) / SUM(sub_count) as mean_of_C,\n MAX(sub_max_of_D) as max_of_D,\n AVG(sub_var_of_E) as avg_var_of_E,\n AVG(\n (\n sub_sum_of_E_squared - \n sub_sum_of_E * sub_sum_of_E / sub_count\n ) / (sub_count - 1)\n ) as avg_var_of_E2\n FROM\n (SELECT \n grp, subgrp, \n count(C) as sub_count, \n sum(C) as sub_sum_of_C, \n max(D) as sub_max_of_D, \n variance(E) as sub_var_of_E,\n sum(E * E) as sub_sum_of_E_squared, \n sum(E) as sub_sum_of_E\n FROM\n exampledata\n GROUP BY grp, subgrp) level2\n GROUP BY grp\n ORDER BY grp\n ''')\\\n .collect()\n\nimplementation_list.append(CondMethod(\n name='bi_sql_nested', \n interface='sql', \n delegate=bi_sql_nested))\n\ndef bi_fluent_nested(pyData):\n df = spark.createDataFrame(pyData)\n df = df.groupBy(df.grp, df.subgrp)\\\n .agg(func.mean(df.C).alias(\"sub_mean_of_C\"), \n func.count(df.C).alias(\"sub_count\"), \n func.sum(df.C).alias(\"sub_sum_of_C\"), \n func.max(df.D).alias(\"sub_max_of_D\"), \n func.variance(df.E).alias(\"sub_var_of_E\"), \n func.sum(df.E * df.E).alias(\"sub_sum_of_E_squared\"), \n func.sum(df.E).alias(\"sub_sum_of_E\"))\n df = df.groupBy(df.grp) \\\n .agg(\n (\n func.sum(df.sub_mean_of_C * df.sub_count)\n / func.sum(df.sub_count)\n ).alias(\"mean_of_C\"), \n func.max(df.sub_max_of_D).alias(\"max_of_D\"), \n func.avg(df.sub_var_of_E).alias(\"cond_var_of_E1\"),\n func.avg(\n (df.sub_sum_of_E_squared -\n df.sub_sum_of_E * df.sub_sum_of_E \n / df.sub_count)).alias(\"cond_var_of_E2\"))\n df.select('grp', 'mean_of_C', 'max_of_D', \n 'cond_var_of_E1', 'cond_var_of_E2')\\\n .orderBy(df.grp)\\\n .collect()\n\nimplementation_list.append(CondMethod(\n name='bi_fluent_nested', \n interface='fluent', \n delegate=bi_fluent_nested))\n\n#endregion\n\n#region bi Window\ndef bi_fluent_window(pyData):\n df = spark.createDataFrame(pyData)\n window = Window \\\n .partitionBy(df.grp, df.subgrp) \\\n .orderBy(df.id)\n df = df \\\n .orderBy(df.grp, df.subgrp, df.id)\\\n .withColumn(\"sub_var_of_E\", \n func.variance(df.E)\\\n .over(window))\n df = df \\\n .groupBy(df.grp, df.subgrp)\\\n .agg(func.sum(df.C).alias(\"sub_sum_of_C\"), \n func.count(df.C).alias(\"sub_count\"), \n func.max(df.D).alias(\"sub_max_of_D\"),\n func.last(df.sub_var_of_E).alias(\"sub_var_of_E1\"), \n func.variance(df.E).alias(\"sub_var_of_E2\"))\n df \\\n .groupBy(df.grp)\\\n .agg(\n (func.sum(df.sub_sum_of_C)/\n func.sum(df.sub_count)).alias(\"mean_of_C\"), \n func.max(df.sub_max_of_D).alias(\"max_of_D\"),\n func.avg(df.sub_var_of_E1).alias(\"avg_var_of_E1\"), \n func.avg(df.sub_var_of_E2).alias(\"avg_var_of_E2\"))\\\n .orderBy(df.grp)\\\n .collect()\n\nimplementation_list.append(CondMethod(\n name='bi_fluent_window', \n interface='fluent', \n delegate=bi_fluent_window))\n\n#endregion\n\n#region bi rdd grpMap\ndef bi_rdd_grpmap(pyData):\n class MutableRunningTotal:\n def __init__(self, grp):\n self.grp = grp\n self.running_sub_sum_of_E_squared = 0\n self.running_sub_sum_of_E = 0\n self.running_sub_count = 0\n\n rddData = sc.parallelize(pyData)\n def processData1(grp, iterator):\n import math, statistics\n running_sum_of_C = 0\n running_grp_count = 0\n running_max_of_D = None\n running_subs_of_E = {}\n \n for item in iterator:\n running_sum_of_C += item.C\n running_grp_count += 1\n running_max_of_D = item.D \\\n if running_max_of_D is None or \\\n running_max_of_D < item.D \\\n else running_max_of_D\n if item.subgrp not in running_subs_of_E:\n running_subs_of_E[item.subgrp] = MutableRunningTotal(grp)\n running_sub = running_subs_of_E[item.subgrp]\n running_sub.running_sub_sum_of_E_squared += \\\n item.E * item.E\n running_sub.running_sub_sum_of_E += item.E\n running_sub.running_sub_count += 1\n mean_of_C = running_sum_of_C / running_grp_count \\\n if running_grp_count > 0 else math.nan\n ar = [math.nan if \n x.running_sub_count < 2 else\n (\n x.running_sub_sum_of_E_squared\n - x.running_sub_sum_of_E * \\\n x.running_sub_sum_of_E / \\\n x.running_sub_count\n ) / (x.running_sub_count - 1)\n for x in running_subs_of_E.values()]\n avg_var_of_E = statistics.mean(ar)\n return (grp, \n Row(grp=grp, \n mean_of_C=mean_of_C, \n max_of_D=running_max_of_D, \n avg_var_of_E=avg_var_of_E))\n\n rddResult = rddData\\\n .groupBy(lambda x: (x.grp))\\\n .map(lambda pair: processData1(pair[0], pair[1]))\\\n .repartition(1)\\\n .sortByKey().values()\n spark.createDataFrame(rddResult)\\\n .select('grp', 'mean_of_C', 'max_of_D', 'avg_var_of_E')\\\n .collect()\n\nimplementation_list.append(CondMethod(\n name='bi_rdd_grpmap', \n interface='rdd', \n delegate=bi_rdd_grpmap))\n\n#endregion\n\n#region bi rdd reduce1\ndef bi_rdd_reduce1(pyData):\n rddData = sc.parallelize(pyData)\n SubTotal1 = collections.namedtuple(\"SubTotal1\", \n [\"running_sum_of_C\", \"running_max_of_D\", \n \"subgrp_running_totals\"])\n SubTotal2 = collections.namedtuple(\"SubTotal2\", \n [\"running_sum_of_E_squared\", \n \"running_sum_of_E\", \"running_count\"])\n\n def mergeValue(pre, v):\n subgrp_running_totals = pre.subgrp_running_totals.copy()\n if v.subgrp not in subgrp_running_totals:\n subgrp_running_totals[v.subgrp] = \\\n SubTotal2(\n running_sum_of_E_squared=0,\n running_sum_of_E=0,\n running_count=0\n )\n subsub = subgrp_running_totals[v.subgrp]\n subgrp_running_totals[v.subgrp] = SubTotal2(\n subsub.running_sum_of_E_squared + v.E * v.E,\n subsub.running_sum_of_E + v.E,\n subsub.running_count + 1)\n return SubTotal1(\n running_sum_of_C=pre.running_sum_of_C + v.C,\n running_max_of_D=pre.running_max_of_D \\\n if pre.running_max_of_D is not None and \\\n pre.running_max_of_D > v.D \\\n else v.D, \n subgrp_running_totals=subgrp_running_totals)\n def createCombiner(v):\n return mergeValue(SubTotal1(\n running_sum_of_C=0, \n running_max_of_D=None, \n subgrp_running_totals={}), v)\n def mergeCombiners(lsub, rsub):\n subgrp_running_totals = {}\n all_subgrp = set(lsub.subgrp_running_totals.keys()|\n rsub.subgrp_running_totals.keys())\n for subgrp in all_subgrp:\n l = []\n if subgrp in lsub.subgrp_running_totals:\n l.append(lsub.subgrp_running_totals[subgrp])\n if subgrp in rsub.subgrp_running_totals:\n l.append(rsub.subgrp_running_totals[subgrp])\n if len(l) == 1:\n result = l[0]\n else:\n result = SubTotal2(\n running_sum_of_E_squared = \n sum(x.running_sum_of_E_squared for x in l),\n running_sum_of_E = \n sum(x.running_sum_of_E for x in l),\n running_count = \n sum(x.running_count for x in l))\n subgrp_running_totals[subgrp] = result\n return SubTotal1(\n running_sum_of_C=\n lsub.running_sum_of_C + rsub.running_sum_of_C, \n running_max_of_D=lsub.running_max_of_D \\\n if lsub.running_max_of_D is not None and \\\n lsub.running_max_of_D > rsub.running_max_of_D \\\n else rsub.running_max_of_D, \n subgrp_running_totals=subgrp_running_totals)\n def finalAnalytics(grp, level1):\n import statistics\n running_grp_count = 0\n list_of_var_of_E = []\n for sub in level1.subgrp_running_totals.values():\n count = sub.running_count\n running_grp_count += count\n var_of_E = math.nan \\\n if count < 2 else \\\n (\n sub.running_sum_of_E_squared - \n sub.running_sum_of_E * \n sub.running_sum_of_E / count\n ) / (count - 1)\n list_of_var_of_E.append(var_of_E)\n \n return Row(\n grp=grp,\n mean_of_C= math.nan\n if running_grp_count < 1 else\n level1.running_sum_of_C/running_grp_count,\n max_of_D=level1.running_max_of_D, \n avg_var_of_E = statistics.mean(list_of_var_of_E))\n \n rddResult = rddData \\\n .map(lambda x: (x.grp, x))\\\n .combineByKey(createCombiner,\n mergeValue,\n mergeCombiners)\\\n .sortByKey()\\\n .map(lambda x: finalAnalytics(x[0],x[1]))\n spark.createDataFrame(rddResult)\\\n .select('grp', 'mean_of_C', 'max_of_D', 'avg_var_of_E')\\\n .collect()\n\nimplementation_list.append(CondMethod(\n name='bi_rdd_reduce1', \n interface='rdd', \n delegate=bi_rdd_reduce1))\n\n#endregion\n\n#region bi rdd reduce2\ndef bi_rdd_reduce2(pyData):\n rddData = sc.parallelize(pyData)\n SubTotal = collections.namedtuple(\"SubTotal\", \n [\"running_sum_of_C\", \"running_count\", \"running_max_of_D\", \n \"running_sum_of_E_squared\", \"running_sum_of_E\"])\n\n def mergeValue(pre, v):\n return SubTotal(\n running_sum_of_C=pre.running_sum_of_C + v.C,\n running_count=pre.running_count + 1, \n running_max_of_D=pre.running_max_of_D \\\n if pre.running_max_of_D is not None and \\\n pre.running_max_of_D > v.D \\\n else v.D, \n running_sum_of_E_squared=\n pre.running_sum_of_E_squared +\n v.E * v.E, \n running_sum_of_E=\n pre.running_sum_of_E + v.E)\n def createCombiner(v):\n return mergeValue(SubTotal(\n running_sum_of_C=0, \n running_count=0,\n running_max_of_D=None, \n running_sum_of_E_squared=0, \n running_sum_of_E=0), v)\n def mergeCombiners(lsub, rsub):\n return SubTotal(\n running_sum_of_C=\n lsub.running_sum_of_C + rsub.running_sum_of_C, \n running_count=\n lsub.running_count + rsub.running_count, \n running_max_of_D=lsub.running_max_of_D \\\n if lsub.running_max_of_D is not None and \\\n lsub.running_max_of_D > rsub.running_max_of_D \\\n else rsub.running_max_of_D, \n running_sum_of_E_squared=\n lsub.running_sum_of_E_squared + \n rsub.running_sum_of_E_squared, \n running_sum_of_E = \n lsub.running_sum_of_E + rsub.running_sum_of_E)\n def finalAnalytics(grp, iterator):\n running_sum_of_C = 0\n running_grp_count = 0\n running_max_of_D = None\n running_subs_of_E = {}\n running_sum_of_var_of_E = 0\n running_count_of_subgrp = 0\n \n for sub in iterator:\n count = sub.running_count\n running_sum_of_C += sub.running_sum_of_C\n running_grp_count += count\n running_max_of_D = sub.running_max_of_D \\\n if running_max_of_D is None or \\\n running_max_of_D < sub.running_max_of_D \\\n else running_max_of_D\n var_of_E = math.nan \\\n if count < 2 else \\\n (\n sub.running_sum_of_E_squared - \n sub.running_sum_of_E * \n sub.running_sum_of_E / count\n ) / (count - 1)\n running_sum_of_var_of_E += var_of_E\n running_count_of_subgrp += 1\n \n return Row(\n grp=grp,\n mean_of_C= math.nan\n if running_grp_count < 1 else\n running_sum_of_C/running_grp_count,\n max_of_D=running_max_of_D, \n avg_var_of_E = math.nan\n if running_count_of_subgrp < 1 else\n running_sum_of_var_of_E / \n running_count_of_subgrp)\n \n rddResult = rddData \\\n .map(lambda x: ((x.grp, x.subgrp), x))\\\n .combineByKey(createCombiner,\n mergeValue,\n mergeCombiners)\\\n .map(lambda x: (x[0][0],x[1]))\\\n .groupByKey(numPartitions=1)\\\n .map(lambda x: (x[0],finalAnalytics(x[0],x[1])))\\\n .sortByKey().values()\n spark.createDataFrame(rddResult)\\\n .select('grp', 'mean_of_C', 'max_of_D', 'avg_var_of_E')\\\n .collect()\n\nimplementation_list.append(CondMethod(\n name='bi_rdd_reduce2', \n interface='rdd', \n delegate=bi_rdd_reduce2))\n\n#endregion\n\n#region bi rdd mapPartitions\ndef bi_rdd_mappart(pyData):\n rddData = sc.parallelize(pyData)\n SubTotal1 = collections.namedtuple(\"SubTotal1\", \n [\"running_sum_of_C\", \"running_max_of_D\", \n \"subgrp_totals\"])\n SubTotal2 = collections.namedtuple(\"SubTotal2\", \n [\"running_sum_of_E_squared\", \"running_sum_of_E\", \"running_count\"])\n class MutableGrpTotal:\n def __init__(self):\n self.running_sum_of_C = 0\n self.running_max_of_D=None\n self.running_subgrp_totals={}\n class MutableSubGrpTotal:\n def __init__(self):\n self.running_count=0\n self.running_sum_of_E_squared=0\n self.running_sum_of_E=0\n\n def partitionTriage(iterator):\n running_grp_totals = {}\n for v in iterator:\n k1 = v.grp\n if k1 not in running_grp_totals:\n running_grp_totals[k1]=MutableGrpTotal()\n r1 = running_grp_totals[k1]\n r1.running_sum_of_C += v.C\n r1.running_max_of_D = \\\n r1.running_max_of_D \\\n if r1.running_max_of_D is not None and \\\n r1.running_max_of_D > v.D \\\n else v.D\n k2 = v.grp\n if k2 not in r1.running_subgrp_totals:\n r1.running_subgrp_totals[k2]=MutableSubGrpTotal()\n r2 = r1.running_subgrp_totals[k2]\n r2.running_sum_of_E_squared += v.E * v.E\n r2.running_sum_of_E += v.E\n r2.running_count += 1\n for k in running_grp_totals:\n r1 = running_grp_totals[k]\n yield (\n k, \n SubTotal1(\n running_sum_of_C=r1.running_sum_of_C, \n running_max_of_D=r1.running_max_of_D, \n subgrp_totals={k2:\n SubTotal2(\n running_sum_of_E_squared=r2.running_sum_of_E_squared, \n running_sum_of_E=r2.running_sum_of_E, \n running_count=r2.running_count)\n for k2,r2 in r1.running_subgrp_totals.items()}))\n\n def mergeCombiners3(grp, iterable):\n import statistics\n lsub = MutableGrpTotal()\n for rsub1 in iterable:\n lsub.running_sum_of_C += rsub1.running_sum_of_C\n lsub.running_max_of_D=lsub.running_max_of_D \\\n if lsub.running_max_of_D is not None and \\\n lsub.running_max_of_D > rsub1.running_max_of_D \\\n else rsub1.running_max_of_D\n for subgrp,rsub2 in rsub1.subgrp_totals.items():\n k2 = subgrp\n if k2 not in lsub.running_subgrp_totals:\n lsub.running_subgrp_totals[k2]=MutableSubGrpTotal()\n lsub2 = lsub.running_subgrp_totals[k2]\n lsub2.running_sum_of_E_squared += \\\n rsub2.running_sum_of_E_squared\n lsub2.running_sum_of_E += rsub2.running_sum_of_E\n lsub2.running_count += rsub2.running_count\n running_count = 0\n vars_of_E = []\n for subgrp, lsub2 in lsub.running_subgrp_totals.items():\n var_of_E = \\\n (\n lsub2.running_sum_of_E_squared -\n lsub2.running_sum_of_E * lsub2.running_sum_of_E /\n lsub2.running_count\n ) / (lsub2.running_count-1)\n vars_of_E.append(var_of_E)\n running_count += lsub2.running_count\n return Row(\n grp=grp,\n mean_of_C=lsub.running_sum_of_C / running_count, \n max_of_D=lsub.running_max_of_D, \n avg_var_of_E=statistics.mean(vars_of_E))\n\n rddResult = rddData \\\n .mapPartitions(partitionTriage) \\\n .groupByKey() \\\n .map(lambda kv: (kv[0], mergeCombiners3(kv[0], kv[1]))) \\\n .sortByKey().values()\n spark.createDataFrame(rddResult)\\\n .select('grp', 'mean_of_C', 'max_of_D', 'avg_var_of_E')\\\n .collect()\n\nimplementation_list.append(CondMethod(\n name='bi_rdd_mappart', \n interface='rdd', \n delegate=bi_rdd_mappart))\n\n#endregion\n\nRunResult = collections.namedtuple(\"RunResult\", [\"dataSize\", \"relCard\", \"elapsedTime\", \"recordCount\"])\ndef DoTesting():\n NumRunsPer = 23 # 100\n # datasets = [(1,pyData_3_3_10k), (10,pyData_3_30_1k), (100,pyData_3_300_100), (1000,pyData_3_3k_10)]\n # datasets = [(1,pyData_3_3_10k)]\n datasets = [(1,pyData_3_3_100k), (10,pyData_3_30_10k), (100,pyData_3_300_1k), (1000,pyData_3_3k_100)]\n\n cond_run_itinerary = []\n for cond_method in implementation_list:\n if cond_method.name not in ['bi_pandas', 'bi_pandas_numba']:\n continue\n for datatuple in datasets:\n cond_run_itinerary.extend((cond_method, datatuple) for i in range(0, NumRunsPer))\n random.shuffle(cond_run_itinerary)\n with open('Results/bi_runs_4.csv', 'at') as f:\n for index, (cond_method, (relCard, data)) in enumerate(cond_run_itinerary):\n log.info(\"Working on %d of %d\"%(index, len(cond_run_itinerary)))\n startedTime = time.time()\n df, rdd = cond_method.delegate(data)\n if df is not None:\n rdd = df.rdd\n recordCount = count_iter(rdd.toLocalIterator())\n finishedTime = time.time()\n result = RunResult(\n dataSize=len(data),\n relCard=relCard,\n elapsedTime=finishedTime-startedTime,\n recordCount=recordCount)\n f.write(\"%s,%s,%d,%d,%f,%d\\n\"%(cond_method.name, cond_method.interface, result.dataSize, result.relCard, result.elapsedTime, result.recordCount))\n gc.collect()\n time.sleep(10)\n\ndef DoPostProcess_Unknown_skipped():\n cond_runs = {}\n if False:\n with open('Results/bi_runs_4.csv', 'at') as f:\n for index, (cond_method, (relCard, data)) in enumerate(cond_run_itinerary):\n log.info(\"Working on %d of %d\"%(index, len(cond_run_itinerary)))\n startedTime = time.time()\n cond_method.delegate(data)\n finishedTime = time.time()\n if cond_method.name not in cond_runs:\n cond_runs[cond_method.name] = []\n result = RunResult(\n dataSize=len(data),\n relCard=relCard,\n elapsedTime=finishedTime-startedTime)\n cond_runs[cond_method.name].append(result)\n f.write(\"%s,%s,%d,%d,%f\\n\"%(cond_method.name, cond_method.interface, result.dataSize, result.relCard, result.elapsedTime))\n gc.collect()\n time.sleep(10)\n else:\n with open('Results/bi_runs_4_cleaned.csv', 'r', encoding='utf-8-sig') as f, \\\n open('Results/temp.csv', 'w') as fout:\n for textline in f:\n fields = tuple(textline.rstrip().split(','))\n cond_method_name, cond_method_interface, result_dataSize, result_relCard, result_elapsedTime = fields\n result = RunResult(\n dataSize=int(result_dataSize),\n relCard=int(result_relCard),\n elapsedTime=float(result_elapsedTime))\n if cond_method_name not in cond_runs:\n cond_runs[cond_method_name] = []\n cond_runs[cond_method_name].append(result)\n fout.write(\"%s,%s,%d,%d,%f\\n\"%(cond_method_name, cond_method_interface, result.dataSize, result.relCard, result.elapsedTime))\n\ndef DoAnalysis():\n cond_runs = {}\n with open('Results/bi_runs_4.csv', 'r', encoding='utf-8-sig') as f, \\\n open('Results/temp.csv', 'w') as fout:\n for textline in f:\n if textline.startswith('#'):\n print(\"Excluding line: \"+textline)\n continue\n if textline.find(',') < 0:\n print(\"Excluding line: \"+textline)\n continue\n fields = textline.rstrip().split(',')\n if len(fields) < 6:\n fields.append('3')\n cond_method_name, cond_method_interface, result_dataSize, result_relCard, result_elapsedTime, result_recordCount = fields\n if result_recordCount != '3':\n print(\"Excluding line: \"+textline)\n continue\n result = RunResult(\n dataSize=int(result_dataSize),\n relCard=int(result_relCard),\n elapsedTime=float(result_elapsedTime),\n recordCount=int(result_recordCount))\n if cond_method_name not in cond_runs:\n cond_runs[cond_method_name] = []\n cond_runs[cond_method_name].append(result)\n fout.write(\"%s,%s,%d,%d,%f,%d\\n\"%(cond_method_name, cond_method_interface, result.dataSize, result.relCard, result.elapsedTime, result.recordCount))\n CondResult = collections.namedtuple(\"CondResult\", \n [\"name\", \"interface\", \"run_count\",\n \"b0\", \"b0_low\", \"b0_high\",\n \"b1\", \"b1_low\", \"b1_high\",\n \"s2\", \"s2_low\", \"s2_high\"])\n summary_status = ''\n regression_status = ''\n if True:\n cond_results = []\n confidence = 0.95\n summary_status += \"%s,%s,%s,%s,%s,%s,%s,%s\\n\"% (\n 'Method', 'Interface',\n 'NumRuns', 'relCard', 'Elapsed Time', 'stdev', 'rl', 'rh'\n )\n regression_status += '%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\\n'%(\n 'Method', 'Interface',\n 'b0_low', 'b0', 'b0_high',\n 'b1_low', 'b1', 'b1_high',\n 's2_low', 's2', 's2_high')\n # f.write((\"%s,%s,%s,\"+\"%s,%s,%s,\"+\"%s,%s,%s,\"+\"%s,%s,%s\\n\")%(\n # 'RawMethod', 'interface', 'run_count',\n # 'b0', 'b0 lo', 'b0 hi',\n # 'b1M', 'b1M lo', 'b1M hi',\n # 's2', 's2 lo', 's2 hi'))\n # f.write((\"%s,%s,%s,\"+\"%s,%s,%s,\"+\"%s,%s\\n\")% (\n # 'RawMethod', 'interface', 'run_count',\n # 'relCard', 'mean', 'stdev', \n # 'rl', 'rh'\n # ))\n for name in cond_runs:\n print(\"Looking to analyze %s\"%name)\n cond_method = [x for x in implementation_list if x.name == name][0]\n times = cond_runs[name]\n size_values = set(x.relCard for x in times)\n for relCard in size_values:\n ar = [x.elapsedTime for x in times if x.relCard == relCard]\n numRuns = len(ar)\n mean = numpy.mean(ar)\n stdev = numpy.std(ar, ddof=1)\n rl, rh = scipy.stats.norm.interval(confidence, loc=mean, scale=stdev/math.sqrt(len(ar)))\n # f.write((\"%s,%s,\"+\"%d,%d,\"+\"%f,%f,%f,%f\\n\")%(\n # name, cond_method.interface, \n # numRuns, relCard, \n # mean, stdev, rl, rh\n # ))\n summary_status += \"%s,%s,%d,%d,%f,%f,%f,%f\\n\"% (\n name, cond_method.interface,\n numRuns, relCard, mean, stdev, rl, rh\n )\n x_values = [float(x.relCard) for x in times]\n y_values = [float(x.elapsedTime) for x in times]\n (b0, (b0_low, b0_high)), (b1, (b1_low,b1_high)), (s2, (s2_low,s2_high)) = \\\n linear_regression(x_values, y_values, confidence)\n result = CondResult(\n name=cond_method.name,\n interface=cond_method.interface,\n run_count=len(times),\n b0=b0,\n b0_low=b0_low,\n b0_high=b0_high,\n b1=b1,\n b1_low=b1_low,\n b1_high=b1_high,\n s2=s2,\n s2_low=s2_low,\n s2_high=s2_high\n )\n cond_results.append(result)\n # f.write((\"%s,%s,%d,\"+\"%f,%f,%f,\"+\"%f,%f,%f,\"+\"%f,%f,%f\\n\")%(\n # cond_method.name, cond_method.interface, result.run_count,\n # result.b0, result.b0_low, result.b0_high,\n # result.b1*1e+6, result.b1_low*1e+6, result.b1_high*1e+6,\n # result.s2, result.s2_low, result.s2_high))\n regression_status += '%s,%s,%f,%f,%f,%f,%f,%f,%f,%f,%f\\n'%(\n cond_method.name, cond_method.interface,\n result.b0_low, result.b0, result.b0_high,\n result.b1_low, result.b1, result.b1_high,\n result.s2_low, result.s2, result.s2_high)\n with open('Results/bi_results_5.csv', 'w') as f:\n f.write(summary_status)\n f.write(\"\\n\")\n f.write(regression_status)\n f.write(\"\\n\")\n\n# with \n# for result in cond_results:\n# line = \"%s,%s,%f,%f,%f,%f\" % (result.name, result.interface, result.avg, result.stderr, result.rangelow, result.rangehigh)\n# print(line)\nif __name__ == \"__main__\":\n spark = createSparkContext()\n sc, log = setupSparkContext(spark)\n # DoTesting()\n # DoPostProcess_Unknown_skipped()\n DoAnalysis()\n","sub_path":"Python/BiLevelPerfTest.py","file_name":"BiLevelPerfTest.py","file_ext":"py","file_size_in_byte":35285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"113496898","text":"import re\nfrom logger import logger\n\n\ndef parse_file(file):\n\tlogger.debug(\"Parsing file {}\".format(file.name))\n\n\t# variables\n\tgrid = [0]\n\tgrid_size = None\n\n\ti = 0\n\tfor line_nbr, line in enumerate(file.readlines(), start=1):\n\t\tline = re.sub(r\"\\s*#.*|\\n\", \"\", line)\n\n\t\tif line:\n\t\t\tsplit = re.split(\"\\s+\", line)\n\n\t\t\tif grid_size is None:\n\t\t\t\tif len(split) != 1:\n\t\t\t\t\traise Exception(\"First element should be the grid size.\")\n\t\t\t\tgrid_size = int(split[0])\n\t\t\t\tif grid_size < 0:\n\t\t\t\t\traise Exception(\"Size cannot have a negative value.\")\n\n\t\t\t\tgrid *= grid_size ** 2\n\n\t\t\telse:\n\t\t\t\tif i > grid_size:\n\t\t\t\t\traise Exception(\"Amount of rows exceeds the specified size ({})\".format(grid_size))\n\t\t\t\tif len(split) != grid_size:\n\t\t\t\t\traise Exception(\"Amount of columns doesnt match with the specified size ({})\".format(grid_size))\n\n\t\t\t\tfor j, elem in enumerate(split):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tvalue = int(elem)\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\traise Exception(\"Tile value should be an int.\")\n\n\t\t\t\t\tif 0 <= value < grid_size ** 2:\n\t\t\t\t\t\tif grid[value]:\n\t\t\t\t\t\t\traise Exception(\"Tile with the value {} already assigned.\".format(value))\n\t\t\t\t\t\tgrid[value] = (j, i)\n\n\t\t\t\t\telse:\n\t\t\t\t\t\traise Exception(\"Tile value cannot be {}; should be greater than 0 and less than {}\"\n\t\t\t\t\t\t\t.format(value, grid_size ** 2))\n\n\t\t\t\ti += 1\n\n\tif grid_size is None:\n\t\traise Exception(\"Size of the grid didn't found.\")\n\telif i != grid_size:\n\t\traise Exception(\"Amount of rows doesnt match with the specified size ({})\".format(grid_size))\n\n\treturn grid_size, grid\n","sub_path":"n_puzzle/file_parser.py","file_name":"file_parser.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"302525062","text":"import os, shutil, glob\n\nsrc_fldr = r\"/Users/beuo/Downloads/\"\ndst_fldr = \"/Users/beuo/Documents/Demandas/AtualizaMiddleIntegrationVtex/\"; ## Edit this\n\ntry:\n os.makedirs(dst_fldr); ## it creates the destination folder\nexcept:\n print(\"Folder already exist or some error\")\n\nfor txt_file in glob.glob(src_fldr+\"\\\\*.xlsx\"):\n shutil.copy2(txt_file, dst_fldr);\nshutil.move(src_fldr,'')","sub_path":".history/move_20191127155721.py","file_name":"move_20191127155721.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"534332235","text":"from __future__ import (absolute_import, division, print_function, unicode_literals)\nfrom .supervised_learner import SupervisedLearner\n\nimport numpy as np\nimport math\n\n\nclass MultilayerPerceptronLearner(SupervisedLearner):\n \"\"\"\n For nominal labels, this model simply returns the majority class. For\n continuous labels, it returns the mean value.\n If the learning model you're using doesn't do as well as this one,\n it's time to find a new learning model.\n \"\"\"\n\n def __init__(self, data=None, example_hyperparameter=None):\n \"\"\" Example learner initialization. Any additional variables passed to the Session will be passed on to the learner,\n e.g. learning rate, etc.\n\n Learners can initialize weights here\n Args:\n data:\n hyperparameters:\n \"\"\"\n self.hidden_layer = []\n self.hidden_layer_wc = []\n self.output_layer = dict()\n self.output_layer_wc = dict()\n self.output_classes = []\n self.learning_rate = .1\n self.label_names = dict()\n self.str_to_enum = dict()\n self.momentum = 0.1\n self.node_count = 16\n self.train_percent = .8\n self.vs_percent = .2\n\n def train(self, features, labels):\n \"\"\"\n \"\"\"\n self.label_names = labels.enum_to_str[0]\n self.str_to_enum = labels.str_to_enum[0]\n\n # N = number of features, M = number of outputs\n # create N nodes for hidden layer with N weights\n for i in range(0, self.node_count):\n np.random.seed()\n #self.hidden_layer.append(np.random.uniform(1.0, 1.0, features.shape[1]+1))\n self.hidden_layer.append(np.random.uniform(-1.0, 1.0, features.shape[1]+1))\n self.hidden_layer_wc.append(np.zeros(features.shape[1]+1))\n\n # FOR TWO HIDDEN LAYER MLP\n # # create N nodes for hidden layer with N weights\n # for i in range(0, self.node_count):\n # np.random.seed()\n # #self.hidden_layer.append(np.random.uniform(1.0, 1.0, features.shape[1]+1))\n # self.hidden_layer2.append(np.random.uniform(-1.0, 1.0, (self.node_count)+1))\n # self.hidden_layer_wc2.append(np.zeros((self.node_count)+1))\n\n # create M nodes for output layer with N weights\n for i in range(0, len(labels.enum_to_str[0])):\n np.random.seed()\n #self.output_layer[self.label_names[i]] = np.random.uniform(1.0, 1.0, self.node_count+1)\n self.output_layer[self.label_names[i]] = np.random.uniform(-1.0, 1.0, (self.node_count)+1)\n self.output_layer_wc[self.label_names[i]] = np.zeros(self.node_count+1)\n\n #for testing that arrays are created right\n #print(self.hidden_layer)\n #print(self.output_layer)\n\n #store arff's locally for splitting\n self.f = features\n self.l = labels\n\n\n\n #useful variables\n counter = 0\n epoch_count = 0\n bssf = math.inf\n\n #split for VS set\n train_size = int(self.train_percent * self.f.shape[0])\n\n train_features = self.f.create_subset_arff(slice(None,train_size))\n train_labels = self.l.create_subset_arff(slice(None,train_size))\n\n test_features = self.f.create_subset_arff(slice(train_size, None))\n test_labels = self.l.create_subset_arff(slice(train_size, None))\n print(train_features.instance_count)\n\n #for writing to file\n f = open(\"mse_vs_epoch.csv\", \"w\")\n g = open(\"acc_vs_epch.csv\", \"w\")\n #main training loop while not improving\n while(1):\n #for l in range(0,5):\n #shuffle training set\n train_features.shuffle(train_labels)\n\n #main for loop through each feature in training set\n #for i in range(0, 2):\n for i in range(0, train_features.instance_count):\n\n hidden_outputs = []\n outputs = []\n err_arr = []\n\n #feature values of that instance\n feat_arr = train_features.data[i]\n\n # add threshold value to features array\n feat_arr = np.append(feat_arr, 1.0)\n\n #get net for each hidden node and add to hidden_outputs\n for j in range (0, len(self.hidden_layer)):\n net = np.dot(self.hidden_layer[j], feat_arr)\n # print(\"net at hidden node \" + str(j) + \": \" + str(net))\n output = self.sigmoid(net)\n # print(\"sigmoid result: \" + str(output))\n hidden_outputs.append(output)\n\n # add threshold value to features array\n hidden_outputs.append(1.0)\n\n # FOR TWO HIDDEN LAYER MLP\n # #get net for each hidden node and add to hidden_outputs\n # for j in range (0, len(self.hidden_layer2)):\n # net = np.dot(self.hidden_layer2[j], hidden_outputs)\n # # print(\"net at hidden node \" + str(j) + \": \" + str(net))\n # output = self.sigmoid(net)\n # # print(\"sigmoid result: \" + str(output))\n # hidden_outputs2.append(output)\n #\n # # add threshold value to features array\n # hidden_outputs2.append(1.0)\n #\n # # ->numpy array\n # hidden_outputs2 = np.array(hidden_outputs2)\n hidden_outputs = np.array(hidden_outputs)\n\n #get net for each output node and add to outputs\n for j in range(0, len(self.output_layer)):\n #print(self.output_layer[self.label_names[j]])\n #print(hidden_outputs)\n net = np.dot(self.output_layer[self.label_names[j]], hidden_outputs)\n # print(\"net at output node \" + str(j) + \": \" + str(net))\n output = self.sigmoid(net)\n # print(\"sigmoid result: \" + str(output))\n outputs.append(output)\n\n # ->numpy array\n outputs = np.array(outputs)\n\n #calc the expected array of outputs given what the classification is\n expected_arr = self.calc_expected(train_labels.data[i,0], labels)\n\n #calc error for output layer, update deltas\n for j in range(0, len(outputs)):\n err = self.o_err(outputs.item(j), expected_arr.item(j))\n # print(\"Error for output node \"+str(j)+\": \"+str(err))\n for k in range(0, len(hidden_outputs)):\n # print(hidden_outputs.item(k))\n # print(self.output_layer_wc[self.label_names[j]][k])\n wc = (self.learning_rate * hidden_outputs.item(k) * err) + self.momentum*self.output_layer_wc[self.label_names[j]][k]\n # print(\"WC \"+str(k)+\"-\"+str(j)+\": \"+str(wc))\n np.put(self.output_layer_wc[self.label_names[j]], k, wc)\n err_arr.append(err)\n\n # ->numpy array\n err_arr = np.array(err_arr)\n\n # FOR TWO HIDDEN LAYER MLP\n # #calc error for hidden layer, update deltas\n # for j in range(0, len(self.hidden_layer2)):\n # #update hidden layer node\n # err = self.err(j, hidden_outputs2.item(j), err_arr)\n # # print(\"Error for hidden node \"+str(j)+\": \"+str(err))\n # for k in range(0, len(feat_arr)):\n # wc = self.learning_rate * feat_arr[k] * err + self.momentum*self.hidden_layer_wc2[j][k]\n # # print(\"WC \"+str(k)+\"-\"+str(j)+\": \"+str(wc))\n # np.put(self.hidden_layer_wc2[j], k, wc)\n\n #calc error for hidden layer, update deltas\n for j in range(0, len(self.hidden_layer)):\n #update hidden layer node\n err = self.err(j, hidden_outputs.item(j), err_arr)\n # print(\"Error for hidden node \"+str(j)+\": \"+str(err))\n for k in range(0, len(feat_arr)):\n wc = self.learning_rate * feat_arr[k] * err + self.momentum*self.hidden_layer_wc[j][k]\n # print(\"WC \"+str(k)+\"-\"+str(j)+\": \"+str(wc))\n np.put(self.hidden_layer_wc[j], k, wc)\n self.hidden_layer[j] = self.hidden_layer[j]+self.hidden_layer_wc[j]\n\n #update weights\n for j in range(0, len(self.output_layer)):\n self.output_layer[self.label_names[j]] = self.output_layer[self.label_names[j]] + self.output_layer_wc[self.label_names[j]]\n #break\n #break\n\n #after and epoch evaluate accuracy using validation set\n epoch_count += 1\n mse = self.measure_accuracy(test_features,test_labels, eval_method=\"sse\")/2\n mse2 = self.measure_accuracy(train_features, train_labels, eval_method=\"sse\")/2\n acc = self.measure_accuracy(test_features, test_labels)\n f.write(str(epoch_count)+\", \"+str(mse)+\", \"+str(mse2)+\"\\n\")\n g.write(str(epoch_count)+\", \"+str(acc)+\"\\n\")\n\n if mse < bssf:\n bssf = mse\n counter = 0\n else:\n if(counter >= 5):\n break\n counter = counter + 1\n\n #mse = self.measure_accuracy(test_features,test_labels, eval_method=\"sse\")/2\n #mse2 = self.measure_accuracy(train_features, train_labels, eval_method=\"sse\")/2\n #f.write(str(self.momentum)+\",\"+str(mse)+\",\"+str(mse2))\n #g.write(str(self.momentum)+\",\"+str(epoch_count)+\"\\n\")\n\n\n #using the expected value assigns each calcuation a 0 or 1 for if it was expected or not\n def calc_expected(self, expected, labels):\n expected_arr = []\n #print(labels.enum_to_str[0][expected])\n #print(self.label_names[0])\n #print(self.label_names[1])\n for i in range(0, len(self.output_layer)):\n if labels.enum_to_str[0][expected] == self.label_names[i]:\n expected_arr.append(1)\n else:\n expected_arr.append(0)\n return np.array(expected_arr)\n\n #sigmoid function\n def sigmoid(self, net):\n\n temp = np.exp(-net)\n return 1/(1+temp)\n\n def err(self, me, output, passed_errs):\n\n net_err = 0\n for k in range(0, len(self.output_layer)):\n net_err += passed_errs.item(k) * self.output_layer[self.label_names[k]][me]\n return float(net_err)*output*(1-output)\n\n #get error of output layer\n\n def o_err(self, actual, expected):\n\n return (float(expected) - actual) * actual * (1 - actual)\n\n def predict(self, features):\n \"\"\"\n This function runs 1 instance through the model and returns the model's predicted label\n TO DO: Add vectorization option\n Args:\n features (array-like): Array of feature values\n Returns:\n array-like: 1D array of predictions (1 for each output class)\n \"\"\"\n #print(self.hidden_layer)\n #print(self.output_layer)\n hidden_outputs = []\n outputs = []\n\n feat_arr = features.tolist()\n feat_arr.append(1)\n\n # output of MLP for this instance\n out = 0\n\n #get net for each hidden node and add to hidden_outputs\n for j in range (0, len(self.hidden_layer)):\n # print(\"features array\")\n # print(feat_arr)\n net = sum(k[0] * k[1] for k in zip(self.hidden_layer[j], feat_arr))\n # print(\"net at hidden node \" + str(j) + \": \" + str(net))\n output = self.sigmoid(net)\n # print(\"sigmoid result: \" + str(output))\n hidden_outputs.append(output)\n\n #value for threshold weight\n hidden_outputs.append(1)\n for j in range(0, len(self.output_layer)):\n # print(\"Hidden outputs:\")\n # print(hidden_outputs)\n net = sum(k[0] * k[1] for k in zip(self.output_layer[self.label_names[j]], hidden_outputs))\n # print(\"net at output node \" + str(j) + \": \" + str(net))\n output = self.sigmoid(net)\n # print(\"sigmoid result: \" + str(output))\n outputs.append(output)\n\n bssf = -math.inf\n for k in range(0,len(outputs)):\n if(outputs[k] > bssf):\n bssf = outputs[k]\n out = self.label_names[k]\n #print(outputs)\n return [self.str_to_enum[out]]\n","sub_path":"Lab 2/toolkit/multilayer_perceptron_learner0.py","file_name":"multilayer_perceptron_learner0.py","file_ext":"py","file_size_in_byte":12624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"150385000","text":"import sys, os\nfrom optparse import OptionParser\nfrom threading import Thread\n\nparser = OptionParser(usage=\"Usage: python3 %prog codeVersion\")\n(opt,args) = parser.parse_args()\n\ndidConda = input(\"Push enter if you alread did conda activate withRoot or say no and do it\\n\")\nif(didConda):\n sys.exit()\n\ndatasetList = []\n\ncodeVersion = sys.argv[1]\n#just the number, like 18p2\n \nfor fname in os.listdir(\".\") :\n if (codeVersion in fname) and (\".root\" in fname) :\n datasetList.append(fname)\n\ndef task(i):\n runPngFromRoot3 = \"python3 pngsFromRootFilesJustHistos.py \"+i+\" 3\"\n os.system(runPngFromRoot3)\n os.system(\"cp forWebpage/* \"+i[0:-5]+\"_Bin3/.\")\n runPngFromRoot25 = \"python3 pngsFromRootFilesJustHistos.py \"+i+\" 25\"\n# os.system(runPngFromRoot25)\n# os.system(\"cp forWebpage/* \"+i[0:-5]+\"_Bin25/.\")\n \n scpBin3 = \"scp -r \"+i[0:-5]+\"_Bin3 tvami@lxplus.cern.ch:/eos/home-t/tvami/www/projects/HSCP/2022CodeV\"+codeVersion+\"/.\"\n# os.system(scpBin3)\n scpBin25 = \"scp -r \"+i[0:-5]+\"_Bin25 tvami@lxplus.cern.ch:/eos/home-t/tvami/www/projects/HSCP/2022CodeV\"+codeVersion+\"/.\"\n# os.system(scpBin25)\n print(\"Done for sample \"+i)\n\nfor dataset in datasetList:\n t = Thread(target=task, args=(dataset,))\n t.start()\n","sub_path":"Analyzer/test/Tamas/plotHistosMT.py","file_name":"plotHistosMT.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"87144405","text":"# -*- coding: utf-8 -*-\n\"\"\"\ntest testdata\n\nlink -- http://docs.python.org/library/unittest.html\n\nto run on the command line:\npython -m unittest test_testdata[.ClassTest[.test_method]]\n\"\"\"\nfrom __future__ import unicode_literals, division, print_function, absolute_import\nimport os\nimport time\n\nfrom testdata.compat import *\n\nfrom . import TestCase, testdata\n\n\nclass TestdataTest(TestCase):\n def test_environment(self):\n self.assertFalse(\"TDT_ENVIRON_VAL\" in os.environ)\n with testdata.environment(TDT_ENVIRON_VAL=\"foobar\"):\n self.assertEqual(\"foobar\", os.environ[\"TDT_ENVIRON_VAL\"])\n self.assertFalse(\"TDT_ENVIRON_VAL\" in os.environ)\n\n self.assertFalse(hasattr(testdata, \"TDT_ENVIRON_VAL\"))\n with testdata.environment(testdata, TDT_ENVIRON_VAL=\"foobar\"):\n self.assertEqual(\"foobar\", testdata.TDT_ENVIRON_VAL)\n self.assertFalse(hasattr(testdata, \"TDT_ENVIRON_VAL\"))\n\n class Foo(object):\n bar = 1\n che = 2\n\n f = Foo()\n with testdata.environment(f, bar=3):\n self.assertEqual(3, f.bar)\n self.assertEqual(1, f.bar)\n self.assertEqual(2, f.che)\n\n def test_wait(self):\n start = time.time()\n def callback():\n stop = time.time()\n return (stop - start) > 0.5\n testdata.wait(callback)\n stop = time.time()\n self.assertTrue(stop - start > 0.5)\n\n start = time.time()\n def callback(): return False\n with self.assertRaises(RuntimeError):\n testdata.wait(callback, timeout=0.5)\n\n","sub_path":"tests/testdata_test.py","file_name":"testdata_test.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"454171606","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n##############################################\n# The MIT License (MIT)\n# Copyright (c) 2018 Kevin Walchko\n# see LICENSE for full details\n##############################################\n\nfrom pygecko.multiprocessing import geckopy\nfrom pygecko.multiprocessing import GeckoSimpleProcess\nimport socket\nimport time\n\n\ndef chew_up_cpu(interval):\n # chew up some cpu\n start = time.time()\n while (time.time() - start) < interval: 5*5\n\n\n\ndef publisher(**kwargs):\n geckopy.init_node(**kwargs)\n rate = geckopy.Rate(2)\n\n topic = kwargs.get('topic')\n p = geckopy.Publisher(topic)\n start = time.time()\n cnt = 0\n while not geckopy.is_shutdown():\n msg = {'time': time.time() - start}\n p.pub(topic, msg) # topic msg\n\n geckopy.logdebug('[{}] published msg'.format(cnt))\n cnt += 1\n rate.sleep()\n print('pub bye ...')\n\n\n\n# def f(topic, msg):\n# # print(\"recv[{}]: {}\".format(topic, msg))\n# geckopy.log(msg)\n# chew_up_cpu(.1)\n\nclass Callback(object):\n \"\"\"\n So the idea here is instead of using a simple callback function\n like what is commented out above, I need to setup some stuff\n and have it available during the callback. A simple class\n allows me to do this\n \"\"\"\n def __init__(self, name):\n self.name = name\n def __del__(self):\n self.bye()\n def callback(self, topic, msg):\n geckopy.loginfo(\"{}\".format(msg))\n chew_up_cpu(.1)\n def bye(self):\n print(\"*\"*30)\n print(\" {} shutting down ...\".format(self.name))\n print(\"*\"*30)\n\ndef subscriber(**kwargs):\n geckopy.init_node(**kwargs)\n\n\n topic = kwargs.get('topic')\n c = Callback(topic)\n geckopy.Subscriber([topic], c.callback)\n # geckopy.on_shutdown(c.bye)\n\n geckopy.spin(20) # it defaults to 100hz, this is just to slow it down\n print('sub bye ...')\n\n\nif __name__ == '__main__':\n # normally you wouldn't run this here, but is running else where\n # this is just for testing\n # from pygecko.transport import GeckoCore\n # core = GeckoCore()\n # core.start()\n\n # although I don't do anything with procs, because I reuse the variables\n # p and s below, they will kill the processes when new process are created\n # using those names. Appending them to procs allows me to keep them alive\n # until the program ends\n procs = []\n\n for topic in ['ryan', 'mike', 'sammie']:\n # info to pass to processes\n args = {\n 'topic': topic,\n \"geckocore\": {\n \"host\": 'localhost'\n }\n }\n\n p = GeckoSimpleProcess()\n p.start(func=publisher, name='pub_{}'.format(topic), kwargs=args)\n procs.append(p)\n\n s = GeckoSimpleProcess()\n s.start(func=subscriber, name='sub_{}'.format(topic), kwargs=args)\n procs.append(s)\n\n while True:\n try:\n time.sleep(1)\n except KeyboardInterrupt:\n print('main process got ctrl-c')\n break\n","sub_path":"python/dev/new-core/tcp.py","file_name":"tcp.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"310455018","text":"import os\nfrom vyshkant.src.ptx.ptx_iterator import PtxIterator\nfrom vyshkant.src.confirmation.confirmator import Confirmator\n\nclass Splitter:\n\n def split(self, paths_to_input_files, path_to_output_directory):\n\n if not Confirmator().confirm('You are about to split the files: `' + ', '.join(paths_to_input_files) + '` into separate fragments. Continue?'):\n return\n\n for path_to_input_file in paths_to_input_files:\n print('Splitting file `' + path_to_input_file + '`')\n self.__single_split(path_to_input_file, path_to_output_directory)\n\n def __single_split(self, path_to_input_file, path_to_output_directory):\n\n part_number = 1\n\n input_file_name_without_extension = os.path.splitext(os.path.basename(path_to_input_file))[0]\n\n with open(path_to_input_file, 'r') as input_file:\n\n ptx_iterator = PtxIterator()\n\n current_row_index = 0\n\n first_multiplier = None\n\n output_file = None\n\n expected_header_row_index = 0\n for row in input_file:\n\n print('Processing next row: ' + str(current_row_index))\n\n if ptx_iterator.is_first_multiplier_row(current_row_index, expected_header_row_index):\n first_multiplier = int(row)\n\n if output_file is not None:\n output_file.close()\n part_number += 1\n\n output_file = open(os.path.join(path_to_output_directory, input_file_name_without_extension + '_' + str(part_number) + '.ptx'), 'w')\n elif ptx_iterator.is_second_multiplier_row(current_row_index, expected_header_row_index):\n expected_header_row_index = ptx_iterator.get_expected_header_row_index(current_row_index, first_multiplier, int(row))\n\n output_file.write(row)\n\n current_row_index += 1\n\n if output_file is not None:\n output_file.close()\n","sub_path":"vyshkant/src/ptx/splitter.py","file_name":"splitter.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"193868200","text":"# Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nfrom __future__ import absolute_import\n\nimport sys\nimport argparse\nimport json\nimport logging\nimport os\nimport sys\nfrom sagemaker_training import environment\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nlogger.addHandler(logging.StreamHandler(sys.stdout))\n\n\ndef main():\n print(\"Starting\")\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--nproc-per-node\", type=int, default=32)\n parser.add_argument(\"--nnodes\", type=int, default=1)\n parser.add_argument(\"--master-port\", type=str, default=\"55555\")\n parser.add_argument(\n \"--nccl-socket-ifname\", type=str, default=os.environ[\"SM_NETWORK_INTERFACE_NAME\"]\n )\n parser.add_argument(\"--train-script-args\", type=str, default=\" \")\n parser.add_argument(\"--hosts\", type=list, default=json.loads(os.environ[\"SM_HOSTS\"]))\n\n args = parser.parse_args()\n env = environment.Environment()\n master_addr = env.master_hostname\n master_port = args.master_port\n current_host = env.current_host\n\n hosts = args.hosts\n node_rank = hosts.index(current_host)\n\n nccl_socket_ifname = args.nccl_socket_ifname\n\n torchrun_cmd = f'FI_EFA_FORK_SAFE=\"1\" FI_EFA_USE_DEVICE_RDMA=\"1\" FI_PROVIDER=\"efa\" NCCL_DEBUG=\"INFO\" NCCL_INIT=\"INFO\" NCCL_DEBUG_SUBSYS=\"ALL\" NCCL_SOCKET_IFNAME={nccl_socket_ifname} torchrun --nproc_per_node={args.nproc_per_node} --nnodes={args.nnodes} --node_rank={node_rank} --master_addr={master_addr} --master_port={master_port} all_reduce.py {args.train_script_args}'\n logger.info(f\"Calling {torchrun_cmd}\")\n os.system(torchrun_cmd)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"test/sagemaker_tests/pytorch/training/resources/neuron/all_reduce/entrypoint.py","file_name":"entrypoint.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"585062837","text":"#设两个节点的最长距离为mardist,最小高度树高度为ceil(mardist/2),且根节点在最长距离两端叶节点x,y之间的路径上\n# (如果不在路径上,则根节点需要首先到达路径上的一个节点,然后再到x,y节点处,这个路径一定大于ceil(mardist/2)的)\n\n#根节点实际上应该在x,y路径上中间的位置\n#假设最长路径结点为p1,p2...pm,最长路径长度为m-1,\n# 如果 m 为偶数,此时最小高度树的根节点为m/2或m/2+1,最小高度树的最大高度为m/2\n# 如果 m 为奇数,根节点为(m+1)/2,最小高度树的最大高度为(m-1)/2\n\n#寻找最长路径:选取任意节点p,dfs/bfs找到距离p最长的路径终点x,找距离x最长路径的终点y,dist[x][y]即为所求\n\nfrom typing import List\nfrom collections import deque\nclass Solution:\n def findMinHeightTrees(self, n: int, edges: List[List[int]]) -> List[int]:\n if n==1:\n return [0]\n \n #将边信息转换为节点连接信息,能够通过节点下标访问连接到的边 \n g = [[]for _ in range(n)]\n for x,y in edges:\n g[x].append(y)\n g[y].append(x)\n parents = [0]*n\n\n def bfs(start:int):\n vis = [False]*n\n vis[start] = True\n q = deque([start])\n while(q):\n x = q.popleft()\n for y in g[x]:\n if not vis[y]:\n vis[y] = True\n parents[y] = x#储存路径,在一次bfs中不会出现重复覆盖的情况,因为子节点不会有两个父结点\n q.append(y)\n return x #最后一个入队列的一定是距离初始结点start最远的\n\n x = bfs(0) #与结点0最远的结点x\n y = bfs(x) #与结点x最远的结点y,注意这里的执行顺序下parents储存的是以x为根节点的树\n\n path=[]\n parents[x] =-1\n while y!=-1:\n path.append(y)\n y = parents[y]\n m = len(path)\n return [path[m//2]] if m%2 else [path[m//2-1],path[m//2]]\n\n\n \n \n","sub_path":"310. 最小高度树/310. 最小高度树.py","file_name":"310. 最小高度树.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"58013255","text":"import torch\nfrom torch import nn\n\nclass BasicModel(torch.nn.Module):\n \"\"\"\n This is a basic backbone for SSD.\n The feature extractor outputs a list of 6 feature maps, with the sizes:\n [shape(-1, output_channels[0], 38, 38),\n shape(-1, output_channels[1], 19, 19),\n shape(-1, output_channels[2], 10, 10),\n shape(-1, output_channels[3], 5, 5),\n shape(-1, output_channels[3], 3, 3),\n shape(-1, output_channels[4], 1, 1)]\n where \"output_channels\" is the same as cfg.BACKBONE.OUT_CHANNELS\n \"\"\"\n def __init__(self, cfg):\n super().__init__()\n output_channels = cfg.MODEL.BACKBONE.OUT_CHANNELS #[128, 256,128,128,64,64]\n self.output_channels = output_channels\n image_channels = cfg.MODEL.BACKBONE.INPUT_CHANNELS\n self.output_feature_shape = cfg.MODEL.PRIORS.FEATURE_MAPS\n \n #38x38\n self.conv1 = nn.Sequential( \n nn.Conv2d(\n in_channels=image_channels,\n out_channels=32,\n kernel_size=3,\n stride=1,\n padding=1),\n nn.BatchNorm2d(32),\n nn.MaxPool2d(kernel_size=2,stride=2),\n nn.ReLU(),\n nn.Conv2d(\n in_channels=32,\n out_channels=64,\n kernel_size=3,\n stride=1,\n padding=1),\n nn.BatchNorm2d(64),\n nn.MaxPool2d(kernel_size=2,stride=2),\n nn.ReLU(),\n nn.Conv2d(\n in_channels=64,\n out_channels=64,\n kernel_size=3,\n stride=1,\n padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n nn.Conv2d(\n in_channels=64,\n out_channels=output_channels[0],\n kernel_size=3,\n stride=2,\n padding=1),\n # nn.BatchNorm2d(output_channels[0])\n )\n \n #19x19\n self.conv2 = nn.Sequential( \n nn.ReLU(),\n nn.Conv2d(\n in_channels=output_channels[0],\n out_channels=128,\n kernel_size=3,\n stride=1,\n padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(),\n nn.Conv2d(\n in_channels=128,\n out_channels=output_channels[1],\n kernel_size=3,\n stride=2,\n padding=1),\n # nn.BatchNorm2d(output_channels[1])\n )\n \n #9x9\n self.conv3 = nn.Sequential( \n nn.ReLU(),\n nn.Conv2d(\n in_channels=output_channels[1],\n out_channels=256,\n kernel_size=3,\n stride=1,\n padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n nn.Conv2d(\n in_channels=256,\n out_channels=output_channels[2],\n kernel_size=3,\n stride=2,\n padding=1),\n # nn.BatchNorm2d(output_channels[2])\n )\n \n #5x5\n self.conv4 = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(\n in_channels=output_channels[2],\n out_channels=128,\n kernel_size=3,\n stride=1,\n padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(),\n nn.Conv2d(\n in_channels=128,\n out_channels=output_channels[3],\n kernel_size=3,\n stride=2,\n padding=1),\n # nn.BatchNorm2d(output_channels[3])\n ) \n \n #3x3\n self.conv5 = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(\n in_channels=output_channels[3],\n out_channels=128,\n kernel_size=3,\n stride=1,\n padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(),\n nn.Conv2d(\n in_channels=128,\n out_channels=output_channels[4],\n kernel_size=3,\n stride=2,\n padding=1),\n # nn.BatchNorm2d(output_channels[4])\n )\n \n #1x1\n self.conv6 = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(\n in_channels=output_channels[4],\n out_channels=128,\n kernel_size=3,\n stride=1,\n padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(),\n nn.Conv2d(\n in_channels=128,\n out_channels=output_channels[5],\n kernel_size=3,\n stride=1,\n padding=0)\n )\n \n self.convs = [self.conv1, self.conv2, self.conv3,self.conv4, self.conv5,self.conv6]\n \n \n def forward(self, x):\n \"\"\"\n The forward functiom should output features with shape:\n [shape(-1, output_channels[0], 38, 38),\n shape(-1, output_channels[1], 19, 19),\n shape(-1, output_channels[2], 10, 10),\n shape(-1, output_channels[3], 5, 5),\n shape(-1, output_channels[3], 3, 3),\n shape(-1, output_channels[4], 1, 1)]\n We have added assertion tests to check this, iteration through out_features,\n where out_features[0] should have the shape:\n shape(-1, output_channels[0], 38, 38),\n \"\"\"\n out_features = []\n \n #input image into first conv layer\n out_features.append(self.conv1(x))\n \n #take in output of prev conv as input to current one\n #so self.conv2(self.conv1) etc..\n for i in range(1,len(self.convs)):\n out_features.append(self.convs[i](out_features[i-1]))\n \n \n for idx, feature in enumerate(out_features):\n w, h = self.output_feature_shape[idx]\n expected_shape = (self.output_channels[idx], h, w)\n assert feature.shape[1:] == expected_shape, \\\n f\"Expected shape: {expected_shape}, got: {feature.shape[1:]} at output IDX: {idx}\"\n return tuple(out_features)\n\n","sub_path":"hw4 - CNN & SSD/SSD/ssd/modeling/backbone/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":6212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"132387237","text":"#!/usr/bin/env python\nimport os\nimport glob\nimport shutil\nimport subprocess\n\ndef backup(path):\n if not os.path.exists(path):\n return None\n \n i = 0\n while True:\n if i == 0:\n bak = path + \".bak\"\n else:\n bak = path + \".bak.{0}\".format(i)\n\n if not os.path.exists(bak):\n os.rename(path, bak)\n return bak\n\n i += 1\n\ndirpath = os.path.abspath(os.path.dirname(__file__))\n\nhome = os.environ['HOME']\n\nconf_files = [p for p in glob.glob(os.path.join(dirpath, \".*\")) if os.path.isfile(p)]\n\nmessage = \\\n\"\"\"{1}\n\nDo you create symbolic links to the files in {0}? (y/n) \"\"\".format(home, \"\\n\".join(conf_files))\n\nres = raw_input(message)\n\nif res != \"y\":\n exit()\n\nfor p in conf_files:\n base = os.path.basename(p)\n dst = os.path.join(home, base)\n cmd = \"ln -s {0} {1}\".format(p, dst)\n \n bak = backup(dst)\n if bak:\n msg = \"backup {0} to {1}\".format(dst, bak)\n print(msg)\n\n print(cmd)\n subprocess.check_call(cmd.split(\" \"))\n\n","sub_path":"install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"476804153","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n@Time : 2018/11/10 11:03\n@Author : jzs\n@File : __init__.py.py\n@Software : PyCharm\n@Description: 初始化app包\n\"\"\"\nimport logging\n\nfrom flask import Flask\nfrom flask_bootstrap import Bootstrap\nfrom flask_login import LoginManager\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom config import config, Config\n\ndb = SQLAlchemy()\n\nbootstrap = Bootstrap()\n\nlogin_manager = LoginManager()\n\nlogger = None\n\n\ndef config_extensions(app: 'Flask'):\n \"\"\"\n # 初始化其他\n :param app:\n :return:\n \"\"\"\n # 初始化数据库\n db.init_app(app)\n\n # 初始化bootstrap\n bootstrap.init_app(app)\n\n # 初始化用户安装配置\n login_manager.init_app(app)\n login_manager.session_protection = \"strong\"\n login_manager.login_view = \"login\"\n login_manager.login_message = \"需要先登录\"\n login_manager.login_message_category = \"info\"\n\n # 初始化日志\n handler = logging.FileHandler(Config.LOG_FILE_PATH, encoding='UTF-8')\n handler.setLevel(logging.DEBUG)\n logging_format = logging.Formatter(\n '%(asctime)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s')\n handler.setFormatter(logging_format)\n app.logger.addHandler(handler)\n global logger\n logger = app.logger\n\n\ndef create_app(config_name: str):\n \"\"\"\n 创建app实例对象,初始化项目app中的所有配置\n :param config_name:\n :return:\n \"\"\"\n # 配置蓝本\n app = Flask(__name__)\n # 加载配置\n app.config.from_object(config[config_name])\n # 执行额外的初始化\n config.get(config_name).init_app(app)\n\n # 配置其他\n config_extensions(app)\n\n # 配置蓝本\n from app.views import config_blueprint\n config_blueprint(app)\n\n return app\n","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"455960873","text":"import os\nfrom time import perf_counter\n\nfrom numba.typed import List\nimport numpy\n\nimport fibonacci\nimport matrix_multiplication\nimport quicksort\n\n\ndef chop_suffix_from_path(path, suffix):\n if suffix and path.endswith(suffix):\n return path[:-len(suffix)]\n return path\n\n\ndef check_if_array_is_sorted(array: list):\n if(all(array[i] <= array[i + 1] for i in range(len(array) - 1))): \n return True\n return False\n\n\ndef run_test(suite, ntests):\n tests={\n '1': test_fibonacci,\n '2': test_quicksort,\n '3': test_matrix_multiplication,\n '4': test_all\n }\n test = tests.get(suite, lambda: 'Invalid test suite')\n return test(ntests)\n\n\ndef test_fibonacci(ntests):\n fibarg = 20\n base_path = chop_suffix_from_path(os.path.dirname(os.path.abspath(__file__)), '/benchmarks/Python++/Numba')\n path = base_path + '/results/fibonacci/numba_fibonacci_benchmark.txt'\n\n assert(fibonacci.fib(fibarg) == 6765)\n\n with open(path, 'w') as fd:\n for _ in range(ntests):\n start = perf_counter()\n fibonacci.fib(fibarg)\n end = perf_counter()\n\n time = end - start\n fd.write(f'{time:.6f}\\n')\n\n\ndef test_quicksort(ntests):\n arraysize = 1000000\n base_path = chop_suffix_from_path(os.path.dirname(os.path.abspath(__file__)), '/benchmarks/Python++/Numba')\n read_path = base_path + '/utils/data/integers.txt'\n\n with open(read_path, 'r') as fdread:\n test_array = List()\n for i in fdread.readlines():\n test_array.append(int(i))\n quicksort.quicksort(test_array, 0, arraysize - 1)\n assert(check_if_array_is_sorted(test_array))\n del test_array\n\n write_path = base_path + '/results/quicksort/numba_quicksort_benchmark.txt'\n with open(write_path, 'w') as fdwrite:\n for _ in range(ntests):\n fdread.seek(0)\n array = List()\n for i in fdread.readlines():\n array.append(int(i))\n start = perf_counter()\n quicksort.quicksort(array, 0, arraysize - 1)\n end = perf_counter()\n del array\n \n time = end - start\n fdwrite.write(f'{time:.6f}\\n')\n\n\ndef test_matrix_multiplication(ntests):\n matrix_size = 200\n base_path = chop_suffix_from_path(os.path.dirname(os.path.abspath(__file__)), '/benchmarks/Python++/Numba')\n write_path = base_path + '/results/matrix_multiplication/numba_matrix_multiplication_benchmark.txt'\n read_path = base_path + '/utils/data/matmul_assert.txt'\n\n A = B = [[row for row in range(matrix_size)] for col in range(matrix_size)]\n R = [[0] * matrix_size for _ in range(matrix_size)]\n A = numpy.matrix(A)\n B = numpy.matrix(B)\n R = numpy.matrix(R)\n matrix_multiplication.matmul(A, B, R)\n C = numpy.loadtxt(read_path)\n assert(R.all() == C.all())\n del R, C\n\n with open(write_path, 'w') as fd:\n for _ in range(ntests):\n R = [[0] * matrix_size for _ in range(matrix_size)]\n R = numpy.matrix(R)\n start = perf_counter()\n matrix_multiplication.matmul(A, B, R)\n end = perf_counter()\n del R\n\n time = end - start\n fd.write(f'{time:.6f}\\n')\n\n\ndef test_all(ntests):\n test_fibonacci(ntests)\n test_quicksort(ntests)\n test_matrix_multiplication(ntests)","sub_path":"benchmarks/Python++/Numba/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"559451199","text":"import json\nimport logging\nfrom traceback import format_exc\n\nfrom django.contrib.auth.signals import user_logged_in\nfrom django.contrib.auth.signals import user_logged_out\nfrom django.contrib.auth.signals import user_login_failed\n\nfrom .models import SystemEventModel\n\nlogger = logging.getLogger(__name__)\n\n\ndef format_log_message(type, user, request_info, other_info):\n return \"{}: pk: {}, username: {}, request info: {}{}\".format(\n type,\n getattr(user, \"pk\", \"\"),\n getattr(user, \"username\", \"\"),\n request_info,\n other_info\n )\n\n\ndef request_info(request):\n return json.dumps({\n \"method\": request.method,\n \"path\": request.get_full_path(),\n \"REMOTE_ADDR\": request.META.get(\"REMOTE_ADDR\", \"\"),\n \"HTTP_USER_AGENT\": request.META.get(\"HTTP_USER_AGENT\", \"\"),\n \"HTTP_CLIENT_IP\": request.META.get(\"HTTP_CLIENT_IP\", \"\"),\n \"HTTP_X_FORWARDED\": request.META.get(\"HTTP_X_FORWARDED\", \"\"),\n \"HTTP_FORWARDED_FOR\": request.META.get(\"HTTP_FORWARDED_FOR\", \"\"),\n \"HTTP_FORWARDED\": request.META.get(\"HTTP_FORWARDED\", \"\"),\n \"HTTP_X_REAL_IP\": request.META.get(\"HTTP_X_REAL_IP\", \"\"),\n })\n\n\ndef response_info(response):\n return json.dumps({\n \"response__status_code\": response.status_code\n })\n\n\ndef exception_info(exception):\n return json.dumps({\n \"exception\": format_exc(exception)\n })\n\n\nclass LoginEventMonitor(object):\n\n def __init__(self, extra_user_attr_infos=None):\n self.started = False\n self.extra_user_attr_infos = extra_user_attr_infos\n\n def get_extra_user_info(self, user):\n info = \"\"\n if self.extra_user_attr_infos:\n for extra_user_attr_info in self.extra_user_attr_infos:\n info += \", {}: {}\".format(\n extra_user_attr_info.get(\"title\", \"\"),\n getattr(user, extra_user_attr_info.get(\"attr\", \"\"))\n )\n return info\n\n def logged_in(self, sender, request, user, **kwargs):\n logger.info(format_log_message(\"logged_in\", user, request_info(request), self.get_extra_user_info(user)))\n\n def logged_out(self, sender, request, user, **kwargs):\n logger.info(format_log_message(\"logged_out\", user, request_info(request), self.get_extra_user_info(user)))\n\n def login_failed(self, sender, credentials, **kwargs):\n logger.info(format_log_message(\"login_failed\", None, None, json.dumps(credentials)))\n\n def connect(self):\n if not self.started:\n user_logged_in.connect(self.logged_in, weak=False, dispatch_uid=\"dsl-logged-in\")\n user_logged_out.connect(self.logged_out, weak=False, dispatch_uid=\"dsl-logged-out\")\n user_login_failed.connect(self.login_failed, weak=False, dispatch_uid=\"dsl-logged-failed\")\n self.started = True\n return self\n\n def disconnect(self):\n if self.started:\n user_logged_in.disconnect(self.logged_in, weak=False, dispatch_uid=\"dsl-logged-in\")\n user_logged_out.disconnect(self.logged_out, weak=False, dispatch_uid=\"dsl-logged-out\")\n user_login_failed.disconnect(self.login_failed, weak=False, dispatch_uid=\"dsl-logged-failed\")\n self.started = False\n return self\n\n def destroy(self):\n self.disconnect()\n\n\nclass LoginEventPersistMonitor(LoginEventMonitor):\n\n def logged_in(self, sender, request, user, **kwargs):\n SystemEventModel.objects.create(\n type=SystemEventModel.TYPES.logged_in,\n user_id=user.id,\n user_class=\"{0._meta.app_label}.{0.__class__.__name__}\".format(user),\n request_info=request_info(request),\n )\n super(LoginEventPersistMonitor, self).logged_in(sender, request, user, **kwargs)\n\n def logged_out(self, sender, request, user, **kwargs):\n if user and user.is_authenticated():\n event_kwargs = dict(\n type=SystemEventModel.TYPES.logged_out,\n user_id=user.id,\n user_class=\"{0._meta.app_label}.{0.__class__.__name__}\".format(user),\n request_info=request_info(request),\n other_info=kwargs.get(\"other_info\", {})\n )\n if \"created_at\" in kwargs:\n event_kwargs.update(created_at=kwargs.get(\"created_at\"))\n SystemEventModel.objects.create(**event_kwargs)\n super(LoginEventPersistMonitor, self).logged_out(sender, request, user, **kwargs)\n\n def login_failed(self, sender, credentials, **kwargs):\n SystemEventModel.objects.create(\n type=SystemEventModel.TYPES.login_failed,\n other_info=json.dumps(credentials)\n )\n super(LoginEventPersistMonitor, self).login_failed(sender, credentials, **kwargs)\n","sub_path":"django_save_logger/monitors.py","file_name":"monitors.py","file_ext":"py","file_size_in_byte":4403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"140396972","text":"import torch\nimport copy\nimport pdb\n\n\ndef get_apn(inputs, targets, features, idx, t2i, all_idx_l):\n\n pos = torch.zeros([targets.shape[0], features.shape[1]])\n neg = torch.zeros([targets.shape[0], features.shape[1]])\n anchor = torch.zeros([targets.shape[0], features.shape[1]])\n sim_mat = torch.mm(inputs, features.t().cuda())\n\n for i in range(targets.shape[0]):\n ind = idx[i]\n anchor[i] = features[ind]\n sample = sim_mat[i, :]\n target_iden = str(targets[i].item())\n pos_ind_l = copy.deepcopy(t2i[target_iden])\n neg_ind_l = list(set(all_idx_l) - set(pos_ind_l))\n pos_ind_l.remove(ind)\n _, pos_idx = torch.min(sample[pos_ind_l], dim=0)\n pos_ind = pos_ind_l[pos_idx]\n _, neg_idx = torch.max(sample[neg_ind_l], dim=0)\n neg_ind = neg_ind_l[neg_idx]\n pos[i] = features[pos_ind]\n neg[i] = features[neg_ind]\n\n return anchor, pos, neg\n","sub_path":"evaluations/get_apn.py","file_name":"get_apn.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"476457071","text":"import abc\nimport socket\n\n\nclass User:\n __metaclass__ = abc.ABCMeta\n @abc.abstractmethod\n def get_permissions_e_id_from_host(self, hostname):\n \"\"\"\n implementar de acordo com database de usuarios utilizados\n :param hostname: hostname given by local ns\n :type hostname: str\n :return: id, permissions\n :rtype: str, list\n \"\"\"\n return\n\n def __init__(self):\n self.codigo_unico = None\n self.permissoes = []\n\n def get_user_from_ip(self, ip):\n \"\"\"\n returns the user and permission list from ip address of request\n :param ip: ip address of request\n :type ip: str\n :return: id code and permission list\n :rtype: (str, list)\n \"\"\"\n if ip == '127.0.0.1' or ip is None:\n hostname = socket.gethostname()\n else:\n try:\n hostname = socket.gethostbyaddr(ip)[0]\n except Exception as e:\n print(e)\n hostname = ''\n self.codigo_unico, self.permissoes = self.get_permissions_e_id_from_host(hostname)\n\n\n","sub_path":"dashboard_lib/user_functions.py","file_name":"user_functions.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"394849107","text":"import numpy as np\nimport scipy\nfrom scipy.special import comb\nimport cv2\nfrom scipy import stats\n\ndef noise_exponential(M,N,a=1,b=0.25):\n R = a * np.exp(b * np.random.randn(M,N))\n return R\n\nsrc = cv2.imread('resources/images/cat.jpg',0)\ncols,rows = src.shape\nnoise = noise_exponential(cols,rows)\nsrc_noise = src.astype(np.float32) + noise * 255.0\nsrc_noise = np.clip(src_noise,0,255).astype(np.uint8)\nnoise = np.clip(noise*255,0,255).astype(np.uint8)\n\ncv2.imshow('noise',noise)\ncv2.imshow('src+noise',src_noise)\n\ncv2.waitKey()","sub_path":"projects/image-processing/python/image-noise/python/08-noise-exponential-distribution.py","file_name":"08-noise-exponential-distribution.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"400489419","text":"\"\"\"\nDjango settings for {{ project_name }} project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/{{ docs_version }}/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/\n\"\"\"\nimport os\n\nfrom configurations import Configuration, values\n\n\nclass Common(Configuration):\n # Build paths inside the project like this: os.path.join(BASE_DIR, ...)\n BASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n # SECURITY WARNING: keep the secret key used in production secret!\n SECRET_KEY = values.SecretValue()\n\n # SECURITY WARNING: don't run with debug turned on in production!\n DEBUG = values.BooleanValue(False)\n\n TEMPLATE_DEBUG = values.BooleanValue(DEBUG)\n\n ALLOWED_HOSTS = []\n\n # Application definition\n INSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n # The Django sites framework is required by allauth\n 'django.contrib.sites',\n\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n # include the social auth providers you want to enable:\n # 'allauth.socialaccount.providers.facebook',\n # 'allauth.socialaccount.providers.google',\n\n 'rest_framework',\n 'rest_framework.authtoken',\n 'rest_auth',\n 'rest_auth.registration',\n 'corsheaders',\n\n 'django_extensions',\n )\n\n MIDDLEWARE_CLASSES = (\n 'djangosecure.middleware.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware', # must be above common\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n )\n\n ROOT_URLCONF = '{{ project_name }}.urls'\n\n SITE_ID = 1\n\n WSGI_APPLICATION = '{{ project_name }}.wsgi.application'\n\n # Database\n # https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#databases\n DATABASES = values.DatabaseURLValue(\n 'sqlite:///{}'.format(os.path.join(BASE_DIR, 'db.sqlite3'))\n )\n\n # Internationalization\n # https://docs.djangoproject.com/en/{{ docs_version }}/topics/i18n/\n LANGUAGE_CODE = 'en-gb'\n\n TIME_ZONE = 'UTC'\n\n USE_I18N = True\n\n USE_L10N = True\n\n USE_TZ = True\n\n # Static files (CSS, JavaScript, Images)\n # https://docs.djangoproject.com/en/{{ docs_version }}/howto/static-files/\n STATIC_ROOT = 'staticfiles'\n STATIC_URL = '/static/'\n\n # Template Context Processors\n TEMPLATE_CONTEXT_PROCESSORS = (\n # Default Django context processors\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n\n # Required by `allauth` template tags\n 'django.core.context_processors.request',\n\n # `allauth` specific context processors\n 'allauth.account.context_processors.account',\n 'allauth.socialaccount.context_processors.socialaccount',\n )\n\n # Authentication Configuration\n AUTHENTICATION_BACKENDS = (\n\n # Needed to login by username in Django admin, regardless of `allauth`\n 'django.contrib.auth.backends.ModelBackend',\n\n # `allauth` specific authentication methods, such as login by e-mail\n 'allauth.account.auth_backends.AuthenticationBackend',\n\n )\n\n # API Configuration\n # http://django-rest-auth.readthedocs.org/\n # http://www.django-rest-framework.org/\n\n REST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.TokenAuthentication',\n ),\n # 'PAGINATE_BY': 10\n }\n\n REST_AUTH_SERIALIZERS = {\n 'LOGIN_SERIALIZER': 'rest_auth.serializers.LoginSerializer',\n 'TOKEN_SERIALIZER': 'rest_auth.serializers.TokenSerializer',\n 'USER_DETAILS_SERIALIZER': 'rest_auth.serializers.UserDetailsSerializer',\n 'PASSWORD_RESET_SERIALIZER': 'rest_auth.serializers.PasswordResetSerializer',\n 'PASSWORD_RESET_CONFIRM_SERIALIZER': 'rest_auth.serializers.PasswordResetConfirmSerializer',\n 'PASSWORD_CHANGE_SERIALIZER': 'rest_auth.serializers.PasswordChangeSerializer',\n }\n\n\nclass Development(Common):\n \"\"\"\n The in-development settings and the default configuration.\n \"\"\"\n DEBUG = True\n\n TEMPLATE_DEBUG = True\n\n ALLOWED_HOSTS = []\n\n CORS_ORIGIN_ALLOW_ALL = True\n\n INSTALLED_APPS = Common.INSTALLED_APPS + (\n 'debug_toolbar',\n )\n\n\nclass Staging(Common):\n \"\"\"\n The in-staging settings.\n \"\"\"\n INSTALLED_APPS = Common.INSTALLED_APPS + (\n 'djangosecure',\n )\n\n # django-secure\n SESSION_COOKIE_SECURE = values.BooleanValue(True)\n SECURE_SSL_REDIRECT = values.BooleanValue(True)\n SECURE_HSTS_SECONDS = values.IntegerValue(31536000)\n SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True)\n SECURE_FRAME_DENY = values.BooleanValue(True)\n SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True)\n SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True)\n SECURE_PROXY_SSL_HEADER = values.TupleValue(\n ('HTTP_X_FORWARDED_PROTO', 'https')\n )\n\n\nclass Production(Staging):\n \"\"\"\n The in-production settings.\n \"\"\"\n pass\n","sub_path":"project_name/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":5877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"107574362","text":"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.9.1+dev\n# kernelspec:\n# display_name: Python [conda env:generic_expression] *\n# language: python\n# name: conda-env-generic_expression-py\n# ---\n\n# # Process PBTA data\n\n# +\n# %load_ext autoreload\n# %autoreload 2\n\nimport os\nimport pandas as pd\nimport pickle\n\nfrom ponyo import utils\nimport rpy2.robjects as ro\nfrom rpy2.robjects import pandas2ri\nfrom rpy2.robjects.conversion import localconverter\n\n# +\nbase_dir = os.path.abspath(os.path.join(os.getcwd(), \"../\"))\n\n# Read in config variables\nconfig_filename = os.path.abspath(\n os.path.join(base_dir, \"configs\", \"config_human_general.tsv\")\n)\n\nparams = utils.read_config(config_filename)\n\nlocal_dir = params[\"local_dir\"]\nprocessed_template_filename = params[\"processed_template_filename\"]\npbta_dir = os.path.join(local_dir, \"openPBTA\")\n# -\n\n# ## Load RDS objects\n\nreadRDS = ro.r[\"readRDS\"]\n\npolya_matrix = readRDS(\n os.path.join(pbta_dir, \"pbta-gene-counts-rsem-expected_counts-collapsed.polya.rds\")\n)\nribo_matrix = readRDS(\n os.path.join(\n pbta_dir, \"pbta-gene-counts-rsem-expected_counts-collapsed.stranded.rds\"\n )\n)\n\nwith localconverter(ro.default_converter + pandas2ri.converter):\n polya_matrix_values = ro.conversion.rpy2py(polya_matrix)\n ribo_matrix_values = ro.conversion.rpy2py(ribo_matrix)\n\npolya_matrix_df = pd.DataFrame(\n data=polya_matrix_values,\n index=polya_matrix.rownames,\n columns=polya_matrix.colnames,\n)\nribo_matrix_df = pd.DataFrame(\n data=ribo_matrix_values,\n index=ribo_matrix.rownames,\n columns=ribo_matrix.colnames,\n)\n\nprint(polya_matrix_df.shape)\npolya_matrix_df.head()\n\nprint(ribo_matrix_df.shape)\nribo_matrix_df.head()\n\n# ## Get matching samples\n\n# +\n# Load metadata that maps RNA sample ids in expression matrices above\n# to patient sample id\npatient_metadata_filename = \"https://raw.githubusercontent.com/kgaonkar6/OpenPBTA-analysis/532c29ab743bc643e687044bdb3e90241925186a/analyses/tp53_nf1_score/results/tp53_altered_status.tsv\"\n\npatient_metadata = pd.read_csv(\n patient_metadata_filename, sep=\"\\t\", index_col=0, header=0\n)\n# -\n\npatient_metadata.head(10)\n\n# +\n# Select those patient sample ids (`sample_id) with multiple measurements\npatient_metadata_tmp = patient_metadata[patient_metadata.index.value_counts() > 1]\n\n# Select those with RNA sample ids (`Kids_First_Biospecimen_ID_RNA`) available\npatient_metadata_selected = patient_metadata_tmp[\n patient_metadata_tmp[\"Kids_First_Biospecimen_ID_RNA\"].isnull() == False\n]\n\n# +\n# Create dictionary to map patient sample ids with those RNA ids\n# for polyA-selection and ribo-depleted processing (column ids from gene expression matrices)\npatient_sample_ids = list(patient_metadata_selected.index.unique())\npolya_sample_ids = list(polya_matrix_df.columns)\nribo_sample_ids = list(ribo_matrix_df.columns)\n\npatient_to_polya_id = {}\npatient_to_ribo_id = {}\nfor patient_id in patient_sample_ids:\n rna_sample_ids = patient_metadata_selected.loc[\n patient_id, \"Kids_First_Biospecimen_ID_RNA\"\n ]\n for rna_sample_id in rna_sample_ids:\n if rna_sample_id in polya_sample_ids:\n patient_to_polya_id[patient_id] = rna_sample_id\n if rna_sample_id in ribo_sample_ids:\n patient_to_ribo_id[patient_id] = rna_sample_id\n\npatient_to_polya_id\n# -\n\npatient_to_ribo_id\n\n# +\n# Select patient sample ids with both polyA-selected and ribo-depleted measurements\nshared_patient_ids = list(patient_to_polya_id.keys() & patient_to_ribo_id.keys())\n\n# Manually checked that these patient ids were consistent with previous analysis comparing TP53 status across platform:\n# https://github.com/AlexsLemonade/OpenPBTA-analysis/pull/930\nshared_patient_ids\n# -\n\n# ## Select expression data\n\nselect_polya_ids = [patient_to_polya_id[x] for x in shared_patient_ids]\nselect_ribo_ids = [patient_to_ribo_id[x] for x in shared_patient_ids]\n\nselect_polya_expression = polya_matrix_df[select_polya_ids].T\nselect_ribo_expression = ribo_matrix_df[select_ribo_ids].T\n\n# ## Format data matrix\n#\n# * Include only those genes that were used in our analysis\n# - Note: gene ENSEMBL ids already mapped to HGNC ids: https://github.com/AlexsLemonade/OpenPBTA-analysis/tree/master/analyses/collapse-rnaseq\n# * Select only those samples with measurements from both polyA-selection and ribo-depleted protocols\n# * Create metadata dataframe with grouping information for DE analysis.\n\n# Read template file\ntemplate_SOPHIE = pd.read_csv(\n os.path.join(base_dir, \"human_general_analysis\", processed_template_filename),\n sep=\"\\t\",\n index_col=0,\n header=0,\n)\n\ntemplate_SOPHIE.head()\n\n# Get SOPHIE gene ids\nSOPHIE_gene_ids = list(template_SOPHIE.columns)\n\n# +\n# Get shared gene ids between polyA and ribo\nshared_platform_gene_ids = select_polya_expression.columns.intersection(\n select_ribo_expression.columns\n)\nprint(len(shared_platform_gene_ids))\n\nshared_gene_ids = list(set(shared_platform_gene_ids).intersection(SOPHIE_gene_ids))\nprint(len(shared_gene_ids))\n\n# +\n# Select shared genes\nselect_polya_expression = select_polya_expression[shared_gene_ids]\nselect_ribo_expression = select_ribo_expression[shared_gene_ids]\n\nprint(select_polya_expression.shape)\nprint(select_ribo_expression.shape)\n# -\n\nselect_polya_expression.head()\n\nselect_ribo_expression.head()\n\n# +\n# Concatenate expression data\nselect_expression = pd.concat([select_polya_expression, select_ribo_expression])\n\nselect_expression.head(12)\n\n# +\n# Create metadata grouping matrix\npolya_ids = list(select_polya_expression.index)\nribo_ids = list(select_ribo_expression.index)\n\nsample_ids = polya_ids + ribo_ids\nlabels = [1] * len(polya_ids) + [2] * len(ribo_ids)\n\nsample_grouping_metadata = pd.DataFrame(data={\"Sample\": sample_ids, \"group\": labels})\n\nsample_grouping_metadata.set_index(\"Sample\", inplace=True)\nsample_grouping_metadata\n# -\n\n# ## Save\n\n# +\nexpression_data_filename = \"polya_ribo_expression.tsv\"\nsample_grouping_filename = \"polya_ribo_sample_grouping.tsv\"\n\nselect_expression.to_csv(expression_data_filename, sep=\"\\t\")\nsample_grouping_metadata.to_csv(sample_grouping_filename, sep=\"\\t\")\n","sub_path":"explore_RNAseq_only_generic_genes/0a_process_PBTA_data.py","file_name":"0a_process_PBTA_data.py","file_ext":"py","file_size_in_byte":6243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"67141508","text":"#!/usr/bin/env python\n\n# -*- coding: utf-8 -*-\n\nimport pytest\nimport time\nfrom unittestzero import Assert\nfrom pages.login import LoginPage\n\n\n@pytest.mark.parametrize(\"ldap_groups\", [\n \"evmgroup-administrator\",\n \"evmgroup-approver\",\n \"evmgroup-auditor\",\n \"evmgroup-desktop\",\n \"evmgroup-operator\",\n \"evmgroup-security\",\n \"evmgroup-super_administrator\",\n \"evmgroup-support\",\n \"evmgroup-user\",\n \"evmgroup-user_limited_self_service\",\n \"evmgroup-user_self_service\",\n \"evmgroup-vm_user\" ])\n@pytest.mark.usefixtures(\n \"maximized\",\n \"setup_infrastructure_providers\",\n \"configure_auth_mode\")\nclass TestLdap:\n def test_default_ldap_group_roles(self, mozwebqa, ldap_groups, cfme_data):\n \"\"\"Basic default LDAP group role RBAC test\n\n Validates expected menu and submenu names are present for default\n LDAP group roles\n \"\"\"\n if ldap_groups not in cfme_data.data['group_roles']:\n pytest.xfail(\"No match in cfme_data for group '%s'\" % ldap_groups)\n _group_roles = cfme_data.data['group_roles'][ldap_groups]\n login_pg = LoginPage(mozwebqa)\n login_pg.go_to_login_page()\n if ldap_groups not in login_pg.testsetup.credentials:\n pytest.xfail(\n \"No match in credentials file for group '%s'\" % ldap_groups)\n # login as LDAP user\n home_pg = login_pg.login(user=ldap_groups)\n Assert.true(home_pg.is_logged_in, \"Could not determine if logged in\")\n for menu in _group_roles[\"menus\"]:\n Assert.equal(home_pg.header.site_navigation_menu(menu).name, menu)\n for item in home_pg.header.site_navigation_menu(menu).items:\n Assert.contains(item.name, _group_roles[\"menus\"][menu])\n","sub_path":"tests/ui/integration/test_ldap_auth_and_roles.py","file_name":"test_ldap_auth_and_roles.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"163168127","text":"##################################################\n### Created by Lilian Sao de Rivera\n### Project Name : The economics of happiness\n### Date 04/23/2017\n### Data Mining\n##################################################\n\nimport sys\n\n#from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QPushButton, QAction, QComboBox, QLabel, QGridLayout, QCheckBox, QGroupBox\nfrom PyQt5.QtWidgets import (QMainWindow, QApplication, QWidget, QPushButton, QAction, QComboBox, QLabel,\n QGridLayout, QCheckBox, QGroupBox, QVBoxLayout, QHBoxLayout, QLineEdit, QPlainTextEdit)\n\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtCore import pyqtSignal\nfrom PyQt5.QtCore import Qt\n\nfrom scipy import interp\nfrom itertools import cycle\n\n\nfrom PyQt5.QtWidgets import QDialog, QVBoxLayout, QSizePolicy, QMessageBox\n\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\nfrom matplotlib.figure import Figure\nimport pandas as pd\nimport numpy as np\nfrom numpy.polynomial.polynomial import polyfit\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.preprocessing import label_binarize\n\n# Libraries to display decision tree\nfrom pydotplus import graph_from_dot_data\nfrom sklearn.tree import export_graphviz\nimport webbrowser\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport matplotlib.pyplot as plt\n\nimport random\nimport seaborn as sns\n\n#%%-----------------------------------------------------------------------\nimport os\nos.environ[\"PATH\"] += os.pathsep + 'C:\\\\Program Files (x86)\\\\graphviz-2.38\\\\release\\\\bin'\n#%%-----------------------------------------------------------------------\n\n\n#::--------------------------------\n# Deafault font size for all the windows\n#::--------------------------------\nfont_size_window = 'font-size:15px'\n\nclass RandomForest(QMainWindow):\n #::--------------------------------------------------------------------------------\n # Implementation of Random Forest Classifier using the happiness dataset\n # the methods in this class are\n # _init_ : initialize the class\n # initUi : creates the canvas and all the elements in the canvas\n # update : populates the elements of the canvas base on the parametes\n # chosen by the user\n #::---------------------------------------------------------------------------------\n send_fig = pyqtSignal(str)\n\n def __init__(self):\n super(RandomForest, self).__init__()\n self.Title = \"Radom Forest Classifier\"\n self.initUi()\n\n def initUi(self):\n #::-----------------------------------------------------------------\n # Create the canvas and all the element to create a dashboard with\n # all the necessary elements to present the results from the algorithm\n # The canvas is divided using a grid loyout to facilitate the drawing\n # of the elements\n #::-----------------------------------------------------------------\n\n self.setWindowTitle(self.Title)\n self.setStyleSheet(font_size_window)\n\n self.main_widget = QWidget(self)\n\n self.layout = QGridLayout(self.main_widget)\n\n self.groupBox1 = QGroupBox('ML Random Forest Features')\n self.groupBox1Layout= QGridLayout() # Grid\n self.groupBox1.setLayout(self.groupBox1Layout)\n\n # We create a checkbox of each Features\n self.feature0 = QCheckBox(features_list[0],self)\n self.feature1 = QCheckBox(features_list[1],self)\n self.feature2 = QCheckBox(features_list[2], self)\n self.feature3 = QCheckBox(features_list[3], self)\n self.feature4 = QCheckBox(features_list[4],self)\n self.feature5 = QCheckBox(features_list[5],self)\n self.feature6 = QCheckBox(features_list[6], self)\n self.feature7 = QCheckBox(features_list[7], self)\n self.feature0.setChecked(True)\n self.feature1.setChecked(True)\n self.feature2.setChecked(True)\n self.feature3.setChecked(True)\n self.feature4.setChecked(True)\n self.feature5.setChecked(True)\n self.feature6.setChecked(True)\n self.feature7.setChecked(True)\n\n self.lblPercentTest = QLabel('Percentage for Test :')\n self.lblPercentTest.adjustSize()\n\n self.txtPercentTest = QLineEdit(self)\n self.txtPercentTest.setText(\"30\")\n\n self.btnExecute = QPushButton(\"Execute RF\")\n self.btnExecute.clicked.connect(self.update)\n\n self.groupBox1Layout.addWidget(self.feature0,0,0)\n self.groupBox1Layout.addWidget(self.feature1,0,1)\n self.groupBox1Layout.addWidget(self.feature2,1,0)\n self.groupBox1Layout.addWidget(self.feature3,1,1)\n self.groupBox1Layout.addWidget(self.feature4,2,0)\n self.groupBox1Layout.addWidget(self.feature5,2,1)\n self.groupBox1Layout.addWidget(self.feature6,3,0)\n self.groupBox1Layout.addWidget(self.feature7,3,1)\n self.groupBox1Layout.addWidget(self.lblPercentTest,4,0)\n self.groupBox1Layout.addWidget(self.txtPercentTest,4,1)\n self.groupBox1Layout.addWidget(self.btnExecute,5,0)\n\n self.groupBox2 = QGroupBox('Results from the model')\n self.groupBox2Layout = QVBoxLayout()\n self.groupBox2.setLayout(self.groupBox2Layout)\n\n self.lblResults = QLabel('Results:')\n self.lblResults.adjustSize()\n self.txtResults = QPlainTextEdit()\n self.lblAccuracy = QLabel('Accuracy:')\n self.txtAccuracy = QLineEdit()\n\n self.groupBox2Layout.addWidget(self.lblResults)\n self.groupBox2Layout.addWidget(self.txtResults)\n self.groupBox2Layout.addWidget(self.lblAccuracy)\n self.groupBox2Layout.addWidget(self.txtAccuracy)\n\n #::--------------------------------------\n # Graphic 1 : Confusion Matrix\n #::--------------------------------------\n\n self.fig = Figure()\n self.ax1 = self.fig.add_subplot(111)\n self.axes=[self.ax1]\n self.canvas = FigureCanvas(self.fig)\n\n self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n\n self.canvas.updateGeometry()\n\n self.groupBoxG1 = QGroupBox('Confusion Matrix')\n self.groupBoxG1Layout= QVBoxLayout()\n self.groupBoxG1.setLayout(self.groupBoxG1Layout)\n\n self.groupBoxG1Layout.addWidget(self.canvas)\n\n #::---------------------------------------\n # Graphic 2 : ROC Curve\n #::---------------------------------------\n\n self.fig2 = Figure()\n self.ax2 = self.fig2.add_subplot(111)\n self.axes2 = [self.ax2]\n self.canvas2 = FigureCanvas(self.fig2)\n\n self.canvas2.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n\n self.canvas2.updateGeometry()\n\n self.groupBoxG2 = QGroupBox('ROC Curve')\n self.groupBoxG2Layout = QVBoxLayout()\n self.groupBoxG2.setLayout(self.groupBoxG2Layout)\n\n self.groupBoxG2Layout.addWidget(self.canvas2)\n\n #::-------------------------------------------\n # Graphic 3 : Importance of Features\n #::-------------------------------------------\n\n self.fig3 = Figure()\n self.ax3 = self.fig3.add_subplot(111)\n self.axes3 = [self.ax3]\n self.canvas3 = FigureCanvas(self.fig3)\n\n self.canvas3.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n\n self.canvas3.updateGeometry()\n\n self.groupBoxG3 = QGroupBox('Importance of Features')\n self.groupBoxG3Layout = QVBoxLayout()\n self.groupBoxG3.setLayout(self.groupBoxG3Layout)\n self.groupBoxG3Layout.addWidget(self.canvas3)\n\n #::--------------------------------------------\n # Graphic 4 : ROC Curve by class\n #::--------------------------------------------\n\n self.fig4 = Figure()\n self.ax4 = self.fig4.add_subplot(111)\n self.axes4 = [self.ax4]\n self.canvas4 = FigureCanvas(self.fig4)\n\n self.canvas4.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n\n self.canvas4.updateGeometry()\n\n self.groupBoxG4 = QGroupBox('ROC Curve by Class')\n self.groupBoxG4Layout = QVBoxLayout()\n self.groupBoxG4.setLayout(self.groupBoxG4Layout)\n self.groupBoxG4Layout.addWidget(self.canvas4)\n\n #::-------------------------------------------------\n # End of graphs\n #::-------------------------------------------------\n\n self.layout.addWidget(self.groupBox1,0,0)\n self.layout.addWidget(self.groupBoxG1,0,1)\n self.layout.addWidget(self.groupBox2,1,0)\n self.layout.addWidget(self.groupBoxG2,1,1)\n self.layout.addWidget(self.groupBoxG3,0,2)\n self.layout.addWidget(self.groupBoxG4,1,2)\n\n self.setCentralWidget(self.main_widget)\n self.resize(1100, 700)\n self.show()\n\n def update(self):\n '''\n Random Forest Classifier\n We pupulate the dashboard using the parametres chosen by the user\n The parameters are processed to execute in the skit-learn Random Forest algorithm\n then the results are presented in graphics and reports in the canvas\n :return:None\n '''\n\n # processing the parameters\n\n self.list_corr_features = pd.DataFrame([])\n if self.feature0.isChecked():\n if len(self.list_corr_features)==0:\n self.list_corr_features = ff_happiness[features_list[0]]\n else:\n self.list_corr_features = pd.concat([self.list_corr_features, ff_happiness[features_list[0]]],axis=1)\n\n if self.feature1.isChecked():\n if len(self.list_corr_features) == 0:\n self.list_corr_features = ff_happiness[features_list[1]]\n else:\n self.list_corr_features = pd.concat([self.list_corr_features, ff_happiness[features_list[1]]],axis=1)\n\n if self.feature2.isChecked():\n if len(self.list_corr_features) == 0:\n self.list_corr_features = ff_happiness[features_list[2]]\n else:\n self.list_corr_features = pd.concat([self.list_corr_features, ff_happiness[features_list[2]]],axis=1)\n\n if self.feature3.isChecked():\n if len(self.list_corr_features) == 0:\n self.list_corr_features = ff_happiness[features_list[3]]\n else:\n self.list_corr_features = pd.concat([self.list_corr_features, ff_happiness[features_list[3]]],axis=1)\n\n if self.feature4.isChecked():\n if len(self.list_corr_features) == 0:\n self.list_corr_features = ff_happiness[features_list[4]]\n else:\n self.list_corr_features = pd.concat([self.list_corr_features, ff_happiness[features_list[4]]],axis=1)\n\n if self.feature5.isChecked():\n if len(self.list_corr_features) == 0:\n self.list_corr_features = ff_happiness[features_list[5]]\n else:\n self.list_corr_features = pd.concat([self.list_corr_features, ff_happiness[features_list[5]]],axis=1)\n\n if self.feature6.isChecked():\n if len(self.list_corr_features) == 0:\n self.list_corr_features = ff_happiness[features_list[6]]\n else:\n self.list_corr_features = pd.concat([self.list_corr_features, ff_happiness[features_list[6]]],axis=1)\n\n if self.feature7.isChecked():\n if len(self.list_corr_features) == 0:\n self.list_corr_features = ff_happiness[features_list[7]]\n else:\n self.list_corr_features = pd.concat([self.list_corr_features, ff_happiness[features_list[7]]],axis=1)\n\n\n vtest_per = float(self.txtPercentTest.text())\n\n # Clear the graphs to populate them with the new information\n\n self.ax1.clear()\n self.ax2.clear()\n self.ax3.clear()\n self.ax4.clear()\n self.txtResults.clear()\n self.txtResults.setUndoRedoEnabled(False)\n\n vtest_per = vtest_per / 100\n\n # Assign the X and y to run the Random Forest Classifier\n\n X_dt = self.list_corr_features\n y_dt = ff_happiness[\"Happiness.Scale\"]\n\n class_le = LabelEncoder()\n\n # fit and transform the class\n\n y_dt = class_le.fit_transform(y_dt)\n\n # split the dataset into train and test\n\n X_train, X_test, y_train, y_test = train_test_split(X_dt, y_dt, test_size=vtest_per, random_state=100)\n\n # perform training with entropy.\n # Decision tree with entropy\n\n #specify random forest classifier\n self.clf_rf = RandomForestClassifier(n_estimators=100, random_state=100)\n\n # perform training\n self.clf_rf.fit(X_train, y_train)\n\n #-----------------------------------------------------------------------\n\n # predicton on test using all features\n y_pred = self.clf_rf.predict(X_test)\n y_pred_score = self.clf_rf.predict_proba(X_test)\n\n\n # confusion matrix for RandomForest\n conf_matrix = confusion_matrix(y_test, y_pred)\n\n # clasification report\n\n self.ff_class_rep = classification_report(y_test, y_pred)\n self.txtResults.appendPlainText(self.ff_class_rep)\n\n # accuracy score\n\n self.ff_accuracy_score = accuracy_score(y_test, y_pred) * 100\n self.txtAccuracy.setText(str(self.ff_accuracy_score))\n\n #::------------------------------------\n ## Ghaph1 :\n ## Confusion Matrix\n #::------------------------------------\n class_names1 = ['','Happy', 'Med.Happy', 'Low.Happy', 'Not.Happy']\n\n self.ax1.matshow(conf_matrix, cmap= plt.cm.get_cmap('Blues', 14))\n self.ax1.set_yticklabels(class_names1)\n self.ax1.set_xticklabels(class_names1,rotation = 90)\n self.ax1.set_xlabel('Predicted label')\n self.ax1.set_ylabel('True label')\n\n for i in range(len(class_names)):\n for j in range(len(class_names)):\n y_pred_score = self.clf_rf.predict_proba(X_test)\n self.ax1.text(j, i, str(conf_matrix[i][j]))\n\n self.fig.tight_layout()\n self.fig.canvas.draw_idle()\n\n ## End Graph1 -- Confusion Matrix\n\n #::----------------------------------------\n ## Graph 2 - ROC Curve\n #::----------------------------------------\n y_test_bin = label_binarize(y_test, classes=[0, 1, 2, 3])\n n_classes = y_test_bin.shape[1]\n\n #From the sckict learn site\n #https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n for i in range(n_classes):\n fpr[i], tpr[i], _ = roc_curve(y_test_bin[:, i], y_pred_score[:, i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n\n # Compute micro-average ROC curve and ROC area\n fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_test_bin.ravel(), y_pred_score.ravel())\n\n roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n\n lw = 2\n self.ax2.plot(fpr[2], tpr[2], color='darkorange',\n lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])\n self.ax2.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n self.ax2.set_xlim([0.0, 1.0])\n self.ax2.set_ylim([0.0, 1.05])\n self.ax2.set_xlabel('False Positive Rate')\n self.ax2.set_ylabel('True Positive Rate')\n self.ax2.set_title('ROC Curve Random Forest')\n self.ax2.legend(loc=\"lower right\")\n\n self.fig2.tight_layout()\n self.fig2.canvas.draw_idle()\n ######################################\n # Graph - 3 Feature Importances\n #####################################\n # get feature importances\n importances = self.clf_rf.feature_importances_\n\n # convert the importances into one-dimensional 1darray with corresponding df column names as axis labels\n f_importances = pd.Series(importances, self.list_corr_features.columns)\n\n # sort the array in descending order of the importances\n f_importances.sort_values(ascending=False, inplace=True)\n\n X_Features = f_importances.index\n y_Importance = list(f_importances)\n\n self.ax3.barh(X_Features, y_Importance )\n self.ax3.set_aspect('auto')\n\n # show the plot\n self.fig3.tight_layout()\n self.fig3.canvas.draw_idle()\n\n #::-----------------------------------------------------\n # Graph 4 - ROC Curve by Class\n #::-----------------------------------------------------\n str_classes= ['HP','MEH','LOH','NH']\n colors = cycle(['magenta', 'darkorange', 'green', 'blue'])\n for i, color in zip(range(n_classes), colors):\n self.ax4.plot(fpr[i], tpr[i], color=color, lw=lw,\n label='{0} (area = {1:0.2f})'\n ''.format(str_classes[i], roc_auc[i]))\n\n self.ax4.plot([0, 1], [0, 1], 'k--', lw=lw)\n self.ax4.set_xlim([0.0, 1.0])\n self.ax4.set_ylim([0.0, 1.05])\n self.ax4.set_xlabel('False Positive Rate')\n self.ax4.set_ylabel('True Positive Rate')\n self.ax4.set_title('ROC Curve by Class')\n self.ax4.legend(loc=\"lower right\")\n\n # show the plot\n self.fig4.tight_layout()\n self.fig4.canvas.draw_idle()\n\n #::-----------------------------\n # End of graph 4 - ROC curve by class\n #::-----------------------------\n\nclass DecisionTree(QMainWindow):\n #::----------------------\n # Implementation of Decision Tree Algorithm using the happiness dataset\n # the methods in this class are\n # _init_ : initialize the class\n # initUi : creates the canvas and all the elements in the canvas\n # update : populates the elements of the canvas base on the parametes\n # chosen by the user\n # view_tree : shows the tree in a pdf form\n #::----------------------\n\n send_fig = pyqtSignal(str)\n\n def __init__(self):\n super(DecisionTree, self).__init__()\n\n self.Title =\"Decision Tree Classifier\"\n self.initUi()\n\n def initUi(self):\n #::-----------------------------------------------------------------\n # Create the canvas and all the element to create a dashboard with\n # all the necessary elements to present the results from the algorithm\n # The canvas is divided using a grid loyout to facilitate the drawing\n # of the elements\n #::-----------------------------------------------------------------\n\n self.setWindowTitle(self.Title)\n self.setStyleSheet(font_size_window)\n\n self.main_widget = QWidget(self)\n\n self.layout = QGridLayout(self.main_widget)\n\n self.groupBox1 = QGroupBox('ML Decision Tree Features')\n self.groupBox1Layout= QGridLayout()\n self.groupBox1.setLayout(self.groupBox1Layout)\n\n self.feature0 = QCheckBox(features_list[0],self)\n self.feature1 = QCheckBox(features_list[1],self)\n self.feature2 = QCheckBox(features_list[2], self)\n self.feature3 = QCheckBox(features_list[3], self)\n self.feature4 = QCheckBox(features_list[4],self)\n self.feature5 = QCheckBox(features_list[5],self)\n self.feature6 = QCheckBox(features_list[6], self)\n self.feature7 = QCheckBox(features_list[7], self)\n self.feature0.setChecked(True)\n self.feature1.setChecked(True)\n self.feature2.setChecked(True)\n self.feature3.setChecked(True)\n self.feature4.setChecked(True)\n self.feature5.setChecked(True)\n self.feature6.setChecked(True)\n self.feature7.setChecked(True)\n\n self.lblPercentTest = QLabel('Percentage for Test :')\n self.lblPercentTest.adjustSize()\n\n self.txtPercentTest = QLineEdit(self)\n self.txtPercentTest.setText(\"30\")\n\n self.lblMaxDepth = QLabel('Maximun Depth :')\n self.txtMaxDepth = QLineEdit(self)\n self.txtMaxDepth.setText(\"3\")\n\n self.btnExecute = QPushButton(\"Execute DT\")\n self.btnExecute.clicked.connect(self.update)\n\n self.btnDTFigure = QPushButton(\"View Tree\")\n self.btnDTFigure.clicked.connect(self.view_tree)\n\n # We create a checkbox for each feature\n\n self.groupBox1Layout.addWidget(self.feature0,0,0)\n self.groupBox1Layout.addWidget(self.feature1,0,1)\n self.groupBox1Layout.addWidget(self.feature2,1,0)\n self.groupBox1Layout.addWidget(self.feature3,1,1)\n self.groupBox1Layout.addWidget(self.feature4,2,0)\n self.groupBox1Layout.addWidget(self.feature5,2,1)\n self.groupBox1Layout.addWidget(self.feature6,3,0)\n self.groupBox1Layout.addWidget(self.feature7,3,1)\n self.groupBox1Layout.addWidget(self.lblPercentTest,4,0)\n self.groupBox1Layout.addWidget(self.txtPercentTest,4,1)\n self.groupBox1Layout.addWidget(self.lblMaxDepth,5,0)\n self.groupBox1Layout.addWidget(self.txtMaxDepth,5,1)\n self.groupBox1Layout.addWidget(self.btnExecute,6,0)\n self.groupBox1Layout.addWidget(self.btnDTFigure,6,1)\n\n self.groupBox2 = QGroupBox('Results from the model')\n self.groupBox2Layout = QVBoxLayout()\n self.groupBox2.setLayout(self.groupBox2Layout)\n\n self.lblResults = QLabel('Results:')\n self.lblResults.adjustSize()\n self.txtResults = QPlainTextEdit()\n self.lblAccuracy = QLabel('Accuracy:')\n self.txtAccuracy = QLineEdit()\n\n self.groupBox2Layout.addWidget(self.lblResults)\n self.groupBox2Layout.addWidget(self.txtResults)\n self.groupBox2Layout.addWidget(self.lblAccuracy)\n self.groupBox2Layout.addWidget(self.txtAccuracy)\n\n #::-------------------------------------\n # Graphic 1 : Confusion Matrix\n #::-------------------------------------\n\n self.fig = Figure()\n self.ax1 = self.fig.add_subplot(111)\n self.axes=[self.ax1]\n self.canvas = FigureCanvas(self.fig)\n\n self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n\n self.canvas.updateGeometry()\n\n self.groupBoxG1 = QGroupBox('Confusion Matrix')\n self.groupBoxG1Layout= QVBoxLayout()\n self.groupBoxG1.setLayout(self.groupBoxG1Layout)\n\n self.groupBoxG1Layout.addWidget(self.canvas)\n\n #::--------------------------------------------\n ## End Graph1\n #::--------------------------------------------\n\n #::---------------------------------------------\n # Graphic 2 : ROC Curve\n #::---------------------------------------------\n\n self.fig2 = Figure()\n self.ax2 = self.fig2.add_subplot(111)\n self.axes2 = [self.ax2]\n self.canvas2 = FigureCanvas(self.fig2)\n\n self.canvas2.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n\n self.canvas2.updateGeometry()\n\n self.groupBoxG2 = QGroupBox('ROC Curve')\n self.groupBoxG2Layout = QVBoxLayout()\n self.groupBoxG2.setLayout(self.groupBoxG2Layout)\n\n self.groupBoxG2Layout.addWidget(self.canvas2)\n\n #::---------------------------------------------------\n # Graphic 3 : ROC Curve by Class\n #::---------------------------------------------------\n\n self.fig3 = Figure()\n self.ax3 = self.fig3.add_subplot(111)\n self.axes3 = [self.ax3]\n self.canvas3 = FigureCanvas(self.fig3)\n\n self.canvas3.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n\n self.canvas3.updateGeometry()\n\n self.groupBoxG3 = QGroupBox('ROC Curve by Class')\n self.groupBoxG3Layout = QVBoxLayout()\n self.groupBoxG3.setLayout(self.groupBoxG3Layout)\n\n self.groupBoxG3Layout.addWidget(self.canvas3)\n\n ## End of elements o the dashboard\n\n self.layout.addWidget(self.groupBox1,0,0)\n self.layout.addWidget(self.groupBoxG1,0,1)\n self.layout.addWidget(self.groupBox2,0,2)\n self.layout.addWidget(self.groupBoxG2,1,1)\n self.layout.addWidget(self.groupBoxG3,1,2)\n\n self.setCentralWidget(self.main_widget)\n self.resize(1100, 700)\n self.show()\n\n\n def update(self):\n '''\n Decision Tree Algorithm\n We pupulate the dashboard using the parametres chosen by the user\n The parameters are processed to execute in the skit-learn Decision Tree algorithm\n then the results are presented in graphics and reports in the canvas\n :return: None\n '''\n\n # We process the parameters\n self.list_corr_features = pd.DataFrame([])\n if self.feature0.isChecked():\n if len(self.list_corr_features)==0:\n self.list_corr_features = ff_happiness[features_list[0]]\n else:\n self.list_corr_features = pd.concat([self.list_corr_features, ff_happiness[features_list[0]]],axis=1)\n\n if self.feature1.isChecked():\n if len(self.list_corr_features) == 0:\n self.list_corr_features = ff_happiness[features_list[1]]\n else:\n self.list_corr_features = pd.concat([self.list_corr_features, ff_happiness[features_list[1]]],axis=1)\n\n if self.feature2.isChecked():\n if len(self.list_corr_features) == 0:\n self.list_corr_features = ff_happiness[features_list[2]]\n else:\n self.list_corr_features = pd.concat([self.list_corr_features, ff_happiness[features_list[2]]],axis=1)\n\n if self.feature3.isChecked():\n if len(self.list_corr_features) == 0:\n self.list_corr_features = ff_happiness[features_list[3]]\n else:\n self.list_corr_features = pd.concat([self.list_corr_features, ff_happiness[features_list[3]]],axis=1)\n\n if self.feature4.isChecked():\n if len(self.list_corr_features) == 0:\n self.list_corr_features = ff_happiness[features_list[4]]\n else:\n self.list_corr_features = pd.concat([self.list_corr_features, ff_happiness[features_list[4]]],axis=1)\n\n if self.feature5.isChecked():\n if len(self.list_corr_features) == 0:\n self.list_corr_features = ff_happiness[features_list[5]]\n else:\n self.list_corr_features = pd.concat([self.list_corr_features, ff_happiness[features_list[5]]],axis=1)\n\n if self.feature6.isChecked():\n if len(self.list_corr_features) == 0:\n self.list_corr_features = ff_happiness[features_list[6]]\n else:\n self.list_corr_features = pd.concat([self.list_corr_features, ff_happiness[features_list[6]]],axis=1)\n\n if self.feature7.isChecked():\n if len(self.list_corr_features) == 0:\n self.list_corr_features = ff_happiness[features_list[7]]\n else:\n self.list_corr_features = pd.concat([self.list_corr_features, ff_happiness[features_list[7]]],axis=1)\n\n\n vtest_per = float(self.txtPercentTest.text())\n vmax_depth = float(self.txtMaxDepth.text())\n\n self.ax1.clear()\n self.ax2.clear()\n self.ax3.clear()\n self.txtResults.clear()\n self.txtResults.setUndoRedoEnabled(False)\n\n vtest_per = vtest_per / 100\n\n\n # We assign the values to X and y to run the algorithm\n\n X_dt = self.list_corr_features\n y_dt = ff_happiness[\"Happiness.Scale\"]\n\n class_le = LabelEncoder()\n\n # fit and transform the class\n\n y_dt = class_le.fit_transform(y_dt)\n\n # split the dataset into train and test\n X_train, X_test, y_train, y_test = train_test_split(X_dt, y_dt, test_size=vtest_per, random_state=100)\n # perform training with entropy.\n # Decision tree with entropy\n self.clf_entropy = DecisionTreeClassifier(criterion=\"entropy\", random_state=100, max_depth=vmax_depth, min_samples_leaf=5)\n\n # Performing training\n self.clf_entropy.fit(X_train, y_train)\n\n # predicton on test using entropy\n y_pred_entropy = self.clf_entropy.predict(X_test)\n\n # confusion matrix for entropy model\n\n conf_matrix = confusion_matrix(y_test, y_pred_entropy)\n\n # clasification report\n\n self.ff_class_rep = classification_report(y_test, y_pred_entropy)\n self.txtResults.appendPlainText(self.ff_class_rep)\n\n # accuracy score\n\n self.ff_accuracy_score = accuracy_score(y_test, y_pred_entropy) * 100\n self.txtAccuracy.setText(str(self.ff_accuracy_score))\n\n\n #::----------------------------------------------------------------\n # Graph1 -- Confusion Matrix\n #::-----------------------------------------------------------------\n\n self.ax1.set_xlabel('Predicted label')\n self.ax1.set_ylabel('True label')\n\n class_names1 = ['','Happy', 'Med.Happy', 'Low.Happy', 'Not.Happy']\n\n self.ax1.matshow(conf_matrix, cmap= plt.cm.get_cmap('Blues', 14))\n self.ax1.set_yticklabels(class_names1)\n self.ax1.set_xticklabels(class_names1,rotation = 90)\n\n for i in range(len(class_names)):\n for j in range(len(class_names)):\n y_pred_score = self.clf_entropy.predict_proba(X_test)\n self.ax1.text(j, i, str(conf_matrix[i][j]))\n\n self.fig.tight_layout()\n self.fig.canvas.draw_idle()\n\n #::-----------------------------------------------------\n # End Graph 1 -- Confusioin Matrix\n #::-----------------------------------------------------\n\n #::-----------------------------------------------------\n # Graph 2 -- ROC Cure\n #::-----------------------------------------------------\n\n y_test_bin = label_binarize(y_test, classes=[0, 1, 2, 3])\n n_classes = y_test_bin.shape[1]\n\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n for i in range(n_classes):\n fpr[i], tpr[i], _ = roc_curve(y_test_bin[:, i], y_pred_score[:, i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n\n # Compute micro-average ROC curve and ROC area\n fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_test_bin.ravel(), y_pred_score.ravel())\n\n roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n\n lw = 2\n self.ax2.plot(fpr[2], tpr[2], color='darkorange',\n lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])\n self.ax2.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n self.ax2.set_xlim([0.0, 1.0])\n self.ax2.set_ylim([0.0, 1.05])\n self.ax2.set_xlabel('False Positive Rate')\n self.ax2.set_ylabel('True Positive Rate')\n self.ax2.set_title('ROC Curve Decision Tree')\n self.ax2.legend(loc=\"lower right\")\n\n self.fig2.tight_layout()\n self.fig2.canvas.draw_idle()\n\n #::--------------------------------\n ### Graph 3 Roc Curve by class\n #::--------------------------------\n\n str_classes= ['HP','MEH','LOH','NH']\n colors = cycle(['magenta', 'darkorange', 'green', 'blue'])\n for i, color in zip(range(n_classes), colors):\n self.ax3.plot(fpr[i], tpr[i], color=color, lw=lw,\n label='{0} (area = {1:0.2f})'\n ''.format(str_classes[i], roc_auc[i]))\n\n self.ax3.plot([0, 1], [0, 1], 'k--', lw=lw)\n self.ax3.set_xlim([0.0, 1.0])\n self.ax3.set_ylim([0.0, 1.05])\n self.ax3.set_xlabel('False Positive Rate')\n self.ax3.set_ylabel('True Positive Rate')\n self.ax3.set_title('ROC Curve by Class')\n self.ax3.legend(loc=\"lower right\")\n\n # show the plot\n self.fig3.tight_layout()\n self.fig3.canvas.draw_idle()\n\n\n def view_tree(self):\n '''\n Executes the graphviz to create a tree view of the information\n then it presents the graphic in a pdf formt using webbrowser\n :return:None\n '''\n dot_data = export_graphviz(self.clf_entropy, filled=True, rounded=True, class_names=class_names,\n feature_names=self.list_corr_features.columns, out_file=None)\n\n\n graph = graph_from_dot_data(dot_data)\n graph.write_pdf(\"decision_tree_entropy.pdf\")\n webbrowser.open_new(r'decision_tree_entropy.pdf')\n\n\nclass CorrelationPlot(QMainWindow):\n #;:-----------------------------------------------------------------------\n # This class creates a canvas to draw a correlation plot\n # It presents all the features plus the happiness score\n # the methods for this class are:\n # _init_\n # initUi\n # update\n #::-----------------------------------------------------------------------\n send_fig = pyqtSignal(str)\n\n def __init__(self):\n #::--------------------------------------------------------\n # Initialize the values of the class\n #::--------------------------------------------------------\n super(CorrelationPlot, self).__init__()\n\n self.Title = 'Correlation Plot'\n self.initUi()\n\n def initUi(self):\n #::--------------------------------------------------------------\n # Creates the canvas and elements of the canvas\n #::--------------------------------------------------------------\n self.setWindowTitle(self.Title)\n self.setStyleSheet(font_size_window)\n\n self.main_widget = QWidget(self)\n\n self.layout = QVBoxLayout(self.main_widget)\n\n self.groupBox1 = QGroupBox('Correlation Plot Features')\n self.groupBox1Layout= QGridLayout()\n self.groupBox1.setLayout(self.groupBox1Layout)\n\n\n self.feature0 = QCheckBox(features_list[0],self)\n self.feature1 = QCheckBox(features_list[1],self)\n self.feature2 = QCheckBox(features_list[2], self)\n self.feature3 = QCheckBox(features_list[3], self)\n self.feature4 = QCheckBox(features_list[4],self)\n self.feature5 = QCheckBox(features_list[5],self)\n self.feature6 = QCheckBox(features_list[6], self)\n self.feature7 = QCheckBox(features_list[7], self)\n self.feature0.setChecked(True)\n self.feature1.setChecked(True)\n self.feature2.setChecked(True)\n self.feature3.setChecked(True)\n self.feature4.setChecked(True)\n self.feature5.setChecked(True)\n self.feature6.setChecked(True)\n self.feature7.setChecked(True)\n\n self.btnExecute = QPushButton(\"Create Plot\")\n self.btnExecute.clicked.connect(self.update)\n\n self.groupBox1Layout.addWidget(self.feature0,0,0)\n self.groupBox1Layout.addWidget(self.feature1,0,1)\n self.groupBox1Layout.addWidget(self.feature2,0,2)\n self.groupBox1Layout.addWidget(self.feature3,0,3)\n self.groupBox1Layout.addWidget(self.feature4,1,0)\n self.groupBox1Layout.addWidget(self.feature5,1,1)\n self.groupBox1Layout.addWidget(self.feature6,1,2)\n self.groupBox1Layout.addWidget(self.feature7,1,3)\n self.groupBox1Layout.addWidget(self.btnExecute,2,0)\n\n\n self.fig = Figure()\n self.ax1 = self.fig.add_subplot(111)\n self.axes=[self.ax1]\n self.canvas = FigureCanvas(self.fig)\n\n self.canvas.setSizePolicy(QSizePolicy.Expanding,\n QSizePolicy.Expanding)\n\n self.canvas.updateGeometry()\n\n\n self.groupBox2 = QGroupBox('Correlation Plot')\n self.groupBox2Layout= QVBoxLayout()\n self.groupBox2.setLayout(self.groupBox2Layout)\n\n self.groupBox2Layout.addWidget(self.canvas)\n\n\n self.layout.addWidget(self.groupBox1)\n self.layout.addWidget(self.groupBox2)\n\n self.setCentralWidget(self.main_widget)\n self.resize(900, 700)\n self.show()\n self.update()\n\n def update(self):\n\n #::------------------------------------------------------------\n # Populates the elements in the canvas using the values\n # chosen as parameters for the correlation plot\n #::------------------------------------------------------------\n self.ax1.clear()\n\n X_1 = ff_happiness[\"Happiness.Score\"]\n\n list_corr_features = pd.DataFrame(ff_happiness[\"Happiness.Score\"])\n if self.feature0.isChecked():\n list_corr_features = pd.concat([list_corr_features, ff_happiness[features_list[0]]],axis=1)\n\n if self.feature1.isChecked():\n list_corr_features = pd.concat([list_corr_features, ff_happiness[features_list[1]]],axis=1)\n\n if self.feature2.isChecked():\n list_corr_features = pd.concat([list_corr_features, ff_happiness[features_list[2]]],axis=1)\n\n if self.feature3.isChecked():\n list_corr_features = pd.concat([list_corr_features, ff_happiness[features_list[3]]],axis=1)\n if self.feature4.isChecked():\n list_corr_features = pd.concat([list_corr_features, ff_happiness[features_list[4]]],axis=1)\n\n if self.feature5.isChecked():\n list_corr_features = pd.concat([list_corr_features, ff_happiness[features_list[5]]],axis=1)\n\n if self.feature6.isChecked():\n list_corr_features = pd.concat([list_corr_features, ff_happiness[features_list[6]]],axis=1)\n\n if self.feature7.isChecked():\n list_corr_features = pd.concat([list_corr_features, ff_happiness[features_list[7]]],axis=1)\n\n\n vsticks = [\"dummy\"]\n vsticks1 = list(list_corr_features.columns)\n vsticks1 = vsticks + vsticks1\n res_corr = list_corr_features.corr()\n self.ax1.matshow(res_corr, cmap= plt.cm.get_cmap('Blues', 14))\n self.ax1.set_yticklabels(vsticks1)\n self.ax1.set_xticklabels(vsticks1,rotation = 90)\n\n self.fig.tight_layout()\n self.fig.canvas.draw_idle()\n\n\nclass HappinessGraphs(QMainWindow):\n #::---------------------------------------------------------\n # This class crates a canvas with a plot to show the relation\n # from each feature in the dataset with the happiness score\n # methods\n # _init_\n # update\n #::---------------------------------------------------------\n send_fig = pyqtSignal(str)\n\n def __init__(self):\n #::--------------------------------------------------------\n # Crate a canvas with the layout to draw a dotplot\n # The layout sets all the elements and manage the changes\n # made on the canvas\n #::--------------------------------------------------------\n super(HappinessGraphs, self).__init__()\n\n self.Title = \"Features vrs Happiness Score\"\n self.main_widget = QWidget(self)\n\n self.setWindowTitle(self.Title)\n self.setStyleSheet(font_size_window)\n\n self.fig = Figure()\n self.ax1 = self.fig.add_subplot(111)\n self.axes=[self.ax1]\n self.canvas = FigureCanvas(self.fig)\n\n\n self.canvas.setSizePolicy(QSizePolicy.Expanding,\n QSizePolicy.Expanding)\n\n self.canvas.updateGeometry()\n\n self.dropdown1 = QComboBox()\n self.dropdown1.addItems([\"GDP\", \"GINI\", \"VoiceandAccountability\", \"PoliticalStabilityNoViolence\",\n \"GovermentEffectiveness\", \"RegulatoryQuality\", \"RuleofLaw\", \"ControlofCorruption\"])\n\n self.dropdown1.currentIndexChanged.connect(self.update)\n self.label = QLabel(\"A plot:\")\n\n self.checkbox1 = QCheckBox('Show Regression Line', self)\n self.checkbox1.stateChanged.connect(self.update)\n\n self.layout = QGridLayout(self.main_widget)\n self.layout.addWidget(QLabel(\"Select Index for subplots\"))\n self.layout.addWidget(self.dropdown1)\n self.layout.addWidget(self.checkbox1)\n self.layout.addWidget(self.canvas)\n\n self.setCentralWidget(self.main_widget)\n self.show()\n self.update()\n\n def update(self):\n #::--------------------------------------------------------\n # This method executes each time a change is made on the canvas\n # containing the elements of the graph\n # The purpose of the method es to draw a dot graph using the\n # score of happiness and the feature chosen the canvas\n #::--------------------------------------------------------\n colors=[\"b\", \"r\", \"g\", \"y\", \"k\", \"c\"]\n self.ax1.clear()\n cat1 = self.dropdown1.currentText()\n\n X_1 = ff_happiness[\"Happiness.Score\"]\n y_1 = ff_happiness[cat1]\n\n\n self.ax1.scatter(X_1,y_1)\n\n if self.checkbox1.isChecked():\n\n b, m = polyfit(X_1, y_1, 1)\n\n self.ax1.plot(X_1, b + m * X_1, '-', color=\"orange\")\n\n vtitle = \"Happiness vrs \"+ cat1+ \" 2017\"\n self.ax1.set_title(vtitle)\n self.ax1.set_xlabel(\"Level of Happiness\")\n self.ax1.set_ylabel(cat1)\n self.ax1.grid(True)\n\n self.fig.tight_layout()\n self.fig.canvas.draw_idle()\n\n\nclass PlotCanvas(FigureCanvas):\n #::----------------------------------------------------------\n # creates a figure on the canvas\n # later on this element will be used to draw a histogram graph\n #::----------------------------------------------------------\n def __init__(self, parent=None, width=5, height=4, dpi=100):\n fig = Figure(figsize=(width, height), dpi=dpi)\n\n FigureCanvas.__init__(self, fig)\n self.setParent(parent)\n\n FigureCanvas.setSizePolicy(self,\n QSizePolicy.Expanding,\n QSizePolicy.Expanding)\n FigureCanvas.updateGeometry(self)\n\n def plot(self):\n self.ax = self.figure.add_subplot(111)\n\nclass CanvasWindow(QMainWindow):\n #::----------------------------------\n # Creates a canvaas containing the plot for the initial analysis\n #;;----------------------------------\n def __init__(self, parent=None):\n super(CanvasWindow, self).__init__(parent)\n\n self.left = 200\n self.top = 200\n self.Title = 'Distribution'\n self.width = 500\n self.height = 500\n self.initUI()\n\n def initUI(self):\n\n self.setWindowTitle(self.Title)\n self.setStyleSheet(font_size_window)\n\n self.setGeometry(self.left, self.top, self.width, self.height)\n\n self.m = PlotCanvas(self, width=5, height=4)\n self.m.move(0, 30)\n\nclass App(QMainWindow):\n #::-------------------------------------------------------\n # This class creates all the elements of the application\n #::-------------------------------------------------------\n\n def __init__(self):\n super().__init__()\n self.left = 100\n self.top = 100\n self.Title = 'The economics of happiness'\n self.width = 500\n self.height = 300\n self.initUI()\n\n def initUI(self):\n #::-------------------------------------------------\n # Creates the manu and the items\n #::-------------------------------------------------\n self.setWindowTitle(self.Title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n\n #::-----------------------------\n # Create the menu bar\n # and three items for the menu, File, EDA Analysis and ML Models\n #::-----------------------------\n mainMenu = self.menuBar()\n mainMenu.setStyleSheet('background-color: lightblue')\n\n fileMenu = mainMenu.addMenu('File')\n EDAMenu = mainMenu.addMenu('EDA Analysis')\n MLModelMenu = mainMenu.addMenu('ML Models')\n\n #::--------------------------------------\n # Exit application\n # Creates the actions for the fileMenu item\n #::--------------------------------------\n\n exitButton = QAction(QIcon('enter.png'), 'Exit', self)\n exitButton.setShortcut('Ctrl+Q')\n exitButton.setStatusTip('Exit application')\n exitButton.triggered.connect(self.close)\n\n fileMenu.addAction(exitButton)\n\n #::----------------------------------------\n # EDA analysis\n # Creates the actions for the EDA Analysis item\n # Initial Assesment : Histogram about the level of happiness in 2017\n # Happiness Final : Presents the correlation between the index of happiness and a feature from the datasets.\n # Correlation Plot : Correlation plot using all the dims in the datasets\n #::----------------------------------------\n\n EDA1Button = QAction(QIcon('analysis.png'),'Initial Assesment', self)\n EDA1Button.setStatusTip('Presents the initial datasets')\n EDA1Button.triggered.connect(self.EDA1)\n EDAMenu.addAction(EDA1Button)\n\n EDA2Button = QAction(QIcon('analysis.png'), 'Happiness Final', self)\n EDA2Button.setStatusTip('Final Happiness Graph')\n EDA2Button.triggered.connect(self.EDA2)\n EDAMenu.addAction(EDA2Button)\n\n EDA4Button = QAction(QIcon('analysis.png'), 'Correlation Plot', self)\n EDA4Button.setStatusTip('Features Correlation Plot')\n EDA4Button.triggered.connect(self.EDA4)\n EDAMenu.addAction(EDA4Button)\n\n #::--------------------------------------------------\n # ML Models for prediction\n # There are two models\n # Decision Tree\n # Random Forest\n #::--------------------------------------------------\n # Decision Tree Model\n #::--------------------------------------------------\n MLModel1Button = QAction(QIcon(), 'Decision Tree Entropy', self)\n MLModel1Button.setStatusTip('ML algorithm with Entropy ')\n MLModel1Button.triggered.connect(self.MLDT)\n\n #::------------------------------------------------------\n # Random Forest Classifier\n #::------------------------------------------------------\n MLModel2Button = QAction(QIcon(), 'Random Forest Classifier', self)\n MLModel2Button.setStatusTip('Random Forest Classifier ')\n MLModel2Button.triggered.connect(self.MLRF)\n\n MLModelMenu.addAction(MLModel1Button)\n MLModelMenu.addAction(MLModel2Button)\n\n self.dialogs = list()\n\n def EDA1(self):\n #::------------------------------------------------------\n # Creates the histogram\n # The X variable contains the happiness.score\n # X was populated in the method data_happiness()\n # at the start of the application\n #::------------------------------------------------------\n dialog = CanvasWindow(self)\n dialog.m.plot()\n dialog.m.ax.hist(X, bins=12, facecolor='green', alpha=0.5)\n dialog.m.ax.set_title('Frequency of Happiness Year 2017')\n dialog.m.ax.set_xlabel(\"Level of Happiness\")\n dialog.m.ax.set_ylabel(\"Number of Countries\")\n dialog.m.ax.grid(True)\n dialog.m.draw()\n self.dialogs.append(dialog)\n dialog.show()\n\n def EDA2(self):\n #::---------------------------------------------------------\n # This function creates an instance of HappinessGraphs class\n # This class creates a graph using the features in the dataset\n # happiness vrs the score of happiness\n #::---------------------------------------------------------\n dialog = HappinessGraphs()\n self.dialogs.append(dialog)\n dialog.show()\n\n def EDA4(self):\n #::----------------------------------------------------------\n # This function creates an instance of the CorrelationPlot class\n #::----------------------------------------------------------\n dialog = CorrelationPlot()\n self.dialogs.append(dialog)\n dialog.show()\n\n def MLDT(self):\n #::-----------------------------------------------------------\n # This function creates an instance of the DecisionTree class\n # This class presents a dashboard for a Decision Tree Algorithm\n # using the happiness dataset\n #::-----------------------------------------------------------\n dialog = DecisionTree()\n self.dialogs.append(dialog)\n dialog.show()\n\n def MLRF(self):\n #::-------------------------------------------------------------\n # This function creates an instance of the Random Forest Classifier Algorithm\n # using the happiness dataset\n #::-------------------------------------------------------------\n dialog = RandomForest()\n self.dialogs.append(dialog)\n dialog.show()\n\ndef main():\n #::-------------------------------------------------\n # Initiates the application\n #::-------------------------------------------------\n app = QApplication(sys.argv)\n app.setStyle('Fusion')\n ex = App()\n ex.show()\n sys.exit(app.exec_())\n\n\ndef data_happiness():\n #::--------------------------------------------------\n # Loads the dataset 2017.csv ( Index of happiness and esplanatory variables original dataset)\n # Loads the dataset final_happiness_dataset (index of happiness\n # and explanatory variables which are already preprocessed)\n # Populates X,y that are used in the classes above\n #::--------------------------------------------------\n global happiness\n global ff_happiness\n global X\n global y\n global features_list\n global class_names\n happiness = pd.read_csv('2017.csv')\n X= happiness[\"Happiness.Score\"]\n y= happiness[\"Country\"]\n ff_happiness = pd.read_csv('final_happiness_dataset.csv')\n features_list = [\"GDP\", \"GINI\", \"VoiceandAccountability\", \"PoliticalStabilityNoViolence\",\n \"GovermentEffectiveness\", \"RegulatoryQuality\", \"RuleofLaw\", \"ControlofCorruption\"]\n class_names = ['Happy', 'Med.Happy', 'Low.Happy', 'Not.Happy']\n\n\nif __name__ == '__main__':\n #::------------------------------------\n # First reads the data then calls for the application\n #::------------------------------------\n data_happiness()\n main()","sub_path":"Demo/PyQt5/Demo/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":50137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"25"} +{"seq_id":"32802547","text":"'''\nDemo Chat Client\n\n End points for sending messages between everyone connected to a server\n'''\n\nimport sys\nimport json\nimport time\nimport socket\nimport threading\nfrom random import randint\nfrom struct import pack, unpack\n\ndef recv_json(server_socket):\n header = server_socket.recv(8)\n size = unpack(' 1024 * 1024:\n raise \"Incoming JSON is too large: \" + str(size)\n # read incoming size from socket, then remove the trailing newline\n body = server_socket.recv(size)[:-1]\n # parse into json\n return json.loads(body)\n\n\ndef send_json(server_socket, msg_payload):\n if msg_payload[-1] != \"\\n\":\n msg_payload += \"\\n\"\n prefix = \"JSON\".encode(\"utf-8\")\n size = pack(\"
    stepTransformedMasksReconstruction
    %s