diff --git "a/1091.jsonl" "b/1091.jsonl" new file mode 100644--- /dev/null +++ "b/1091.jsonl" @@ -0,0 +1,363 @@ +{"seq_id": "175864001", "text": "s = 'dfjakdlfaeijflasdklfaiejlqladkjfaadfa'\n\n#d = {}\n#for c in s:\n# if c not in d:\n# d[c] = 0\n# d[c] += 1\n#\n\n#############################################\n\nfrom collections import defaultdict\n\nd = defaultdict(int)\n\nfor c in s:\n d[c] += 1\n\nprint(d)\n\n", "sub_path": "session6_2.py", "file_name": "session6_2.py", "file_ext": "py", "file_size_in_byte": 255, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "collections.defaultdict", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "300899968", "text": "\nimport telebot\n# need to create file ./config.py\nfrom pyzabbix import ZabbixAPI\nfrom config import *\n\n\nbot = telebot.TeleBot(tg_bot_key)\n@bot.message_handler(commands=['start', 'help'])\ndef sendwelcome(message):\n bot.reply_to(message, 'Closed bot. Only for private use.')\n\n@bot.message_handler(commands=['status'])\ndef getZabbixStatus(message):\n\n if (message.chat.id == -42960475):\n # install pip install zabbi\n\n zapi = ZabbixAPI(zabbix_url, user=zabbix_user, password=zabbix_password)\n bot.send_message(message.chat.id, \"Запрашиваю проблемы у Zabbix. Жди.\")\n msg = ''\n for problem in zapi.problem.get():\n item = zapi.item.get(triggerids=problem['objectid'])\n host = zapi.host.get(hostids=item[0]['hostid'], output=['hostid', 'name'])\n msg += 'Host: {} \\nProb: {}\\n======\\n'.format(host[0]['name'], problem['name'])\n bot.send_message(message.chat.id,msg)\n else:\n bot.reply_to(message, 'Closed bot. Only for private use.')\n\nbot.polling()", "sub_path": "zabbix-sender.py", "file_name": "zabbix-sender.py", "file_ext": "py", "file_size_in_byte": 1050, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "telebot.TeleBot", "line_number": 8, "usage_type": "call"}, {"api_name": "pyzabbix.ZabbixAPI", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "13352237", "text": "import cv2\r\nimport numpy as np\r\nimport ImageName\r\n\r\n\r\n\r\n\r\n\r\nsensitivity = 15\r\n\r\n\r\n\r\ndef videoCapture():\r\n\r\n cap = cv2.VideoCapture(0)\r\n\r\n count = 0\r\n\r\n\r\n while(1):\r\n _,frame = cap.read()\r\n\r\n h,w = frame.shape[:2]\r\n\r\n # top_left_x = int(w / 10)\r\n # top_left_y = int(h / 4)\r\n # bottom_right_x = int(w /10) * 9\r\n # bottom_right_y = int(h/1.5)\r\n #\r\n # cv2.rectangle(frame, (top_left_x, top_left_y), (bottom_right_x, bottom_right_y),(0,255,0), 3)\r\n #\r\n # cropped = frame[bottom_right_y:top_left_y, top_left_x:bottom_right_x]\r\n #\r\n # # frame = cv2.flip(frame, 1)\r\n\r\n\r\n hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)\r\n\r\n lower_White = np.array([0,0,0],dtype=np.uint8)\r\n upper_White = np.array([0,0,255],dtype=np.uint8)\r\n\r\n mask = cv2.inRange(hsv,lower_White,upper_White)\r\n\r\n res = cv2.bitwise_and(frame,frame,mask=mask)\r\n\r\n\r\n\r\n\r\n cv2.imshow('FRAME',frame)\r\n cv2.imshow('mask',mask)\r\n cv2.imshow('res',res)\r\n\r\n k = cv2.waitKey(1) & 0xFF\r\n\r\n if k==ord('c'): # Press C key\r\n\r\n count = count + 1\r\n print('SAVING IMAGE - ' + str(count))\r\n imgName = ImageName.imageName(count)\r\n\r\n cv2.imwrite(imgName, frame)\r\n\r\n elif k == 27: # Press ESC key\r\n break\r\n\r\n\r\n\r\n\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n\r\nif __name__ == '__main__':\r\n videoCapture()", "sub_path": "FixAreaCamera.py", "file_name": "FixAreaCamera.py", "file_ext": "py", "file_size_in_byte": 1466, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "cv2.VideoCapture", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 40, "usage_type": "attribute"}, {"api_name": "cv2.inRange", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 49, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 53, "usage_type": "call"}, {"api_name": "ImageName.imageName", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 61, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "48566784", "text": "# Loading modules\nimport pandas as pd\nimport numpy as np\nimport os\nfrom sklearn.preprocessing import StandardScaler\n\n# Current directory\nos.getcwd()\n\n# Loading in data\ntrain = pd.read_csv('Melanoma/data/train.csv')\ntest = pd.read_csv(\"Melanoma/data/test.csv\")\n\n# Replacing missing data. Based off intuition gained in the eda process\n\n# Mole location replacing nas with 'missing'\ntrain['anatom_site_general_challenge'].fillna('missing', inplace=True)\ntest['anatom_site_general_challenge'].fillna('missing', inplace=True)\n\n# Imputing age with average age per target group\ntrain['age_approx'] = train.groupby('target')['age_approx'].transform(lambda x: x.fillna(x.mean()))\n\n# Imputing sex with 'missing'\ntrain['sex'].fillna('missing', inplace=True)\n\n# Dropping features that arent in test set\ntrain_dropped = train.drop(['image_name', 'patient_id', 'diagnosis', 'benign_malignant', 'target'], axis=1)\ntest_dropped = test.drop(['image_name', 'patient_id'], axis=1)\n\n# Encoding categorical features\ntrain_encoded = pd.get_dummies(train_dropped, drop_first=True)\ntest_encoded = pd.get_dummies(test_dropped, drop_first=True)\ntest_encoded['sex_missing'] = 0\ntest_encoded = test_encoded[train_encoded.columns]\n\n# Scaling data\nscaler = StandardScaler()\ntrain_scaled = scaler.fit_transform(train_encoded)\ntest_scaled = scaler.transform(test_encoded)\n\n# Saving data to .npz file\nnp.savez_compressed(\"Melanoma/data/preprocessed_meta.npz\",\n train_meta=train_scaled,\n test_meta=test_scaled,\n meta_labels=train_encoded.columns.values)", "sub_path": "Melanoma/initial_exploration/preprocessing_metadata.py", "file_name": "preprocessing_metadata.py", "file_ext": "py", "file_size_in_byte": 1575, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "os.getcwd", "line_number": 8, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 32, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.savez_compressed", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "526589206", "text": "from flask import Flask, request, render_template\nimport requests\nimport json\n\napp = Flask(__name__)\napp.debug = True \n\n\n\n@app.route('/')\ndef hello_world():\n return 'Hello World!'\n\n@app.route('/NewBandNameICallIt')\ndef show_form():\n return render_template(\"new_band_name_form.html\")\n\n\n@app.route('/newBandNameResult')\n# I have changed this function to search only for 'musicArtist' as the entity since this\n# is intended to check whether a band name has already been used.\ndef resultTunes():\n if request.method == 'GET':\n result = request.args\n params = {}\n params['term'] = result.get('artist')\n params['entity'] = 'musicArtist'\n resp = requests.get('https://itunes.apple.com/search?', params = params)\n data = json.loads(resp.text)\n return render_template('band_name_result.html', results = data['results'])", "sub_path": "new_application/new_flask_app.py", "file_name": "new_flask_app.py", "file_ext": "py", "file_size_in_byte": 863, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 28, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "232686118", "text": "#!/usr/bin/env python\n\"\"\"\nScales data for the box kernel\n\"\"\"\nimport os\nimport numpy as np\n\nfrom data import get_dataset\n\nNAMES_FMT = '%s.names'\nDATA_FMT = '%s.data'\n\ndef main(dataset, factor, outputdir):\n factor = float(factor)\n dset = get_dataset(dataset)\n X = (factor*dset.instances).astype(int)\n\n # Remove irrelevant columns (all feature values identical)\n relevant = np.nonzero(np.max(X, axis=0) - np.min(X, axis=0))[0]\n X = X[:, relevant]\n\n namesfile = os.path.join(outputdir, NAMES_FMT % dataset)\n datafile = os.path.join(outputdir, DATA_FMT % dataset)\n\n with open(namesfile, 'w+') as f:\n f.write('0,1.\\n')\n f.write('bag_id: %s.\\n' % ','.join(dset.bag_ids))\n f.write('instance_id: %s.\\n'\n % ','.join([iid[1] for iid in dset.instance_ids]))\n for i in range(X.shape[1]):\n f.write('f%d: continuous.\\n' % (i+1))\n\n with open(datafile, 'w+') as f:\n for (bid, iid), xx, y in zip(dset.instance_ids, X, dset.instance_labels):\n xs = ','.join(map(str, xx))\n f.write('%s,%s,%s,%d.\\n' % (bid, iid, xs, y))\n\nif __name__ == '__main__':\n from optparse import OptionParser, OptionGroup\n parser = OptionParser(usage=\"Usage: %prog dataset factor outputdir\")\n options, args = parser.parse_args()\n options = dict(options.__dict__)\n if len(args) != 3:\n parser.print_help()\n exit()\n main(*args, **options)\n", "sub_path": "src/scale_data.py", "file_name": "scale_data.py", "file_ext": "py", "file_size_in_byte": 1430, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "data.get_dataset", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "optparse.OptionParser", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "318137380", "text": "import requests \nimport time\nfrom random import seed\nfrom random import random\n\nseed(1)\n\nPRIVATE_KEY = 'a123'\n\nAPI_ENDPOINT = \"http://localhost:8000/api/user\"\n\ntemp=25.0\npress=1000.0\n\nwhile True:\n\n temp = temp+2*random()-1\n press = press+2*random()-1\n\n print(temp,press)\n\n data = {'private_key':PRIVATE_KEY, \n 'temperature':temp, \n 'pressure':press\n } \n\n r = requests.post(url = API_ENDPOINT, json = data) \n\n print('posted')\n\n time.sleep(1)\n", "sub_path": "node_basic_2/poster.py", "file_name": "poster.py", "file_ext": "py", "file_size_in_byte": 495, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "random.seed", "line_number": 6, "usage_type": "call"}, {"api_name": "random.random", "line_number": 17, "usage_type": "call"}, {"api_name": "random.random", "line_number": 18, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 27, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "224643153", "text": "import os\nfrom os.path import sep, join, splitext\nimport json\nfrom tqdm import tqdm\n\nimport cv2\nimport imagesize\n\n\ndef pjoin(*args, **kwargs):\n return join(*args, **kwargs).replace(sep, '/')\n\ndef get_img_dicts(input_dir):\n\n result = []\n for item_id in tqdm(os.listdir(input_dir)):\n for img in os.listdir(pjoin(input_dir, item_id)):\n image_dict = {}\n\n image_dict['file_name'] = pjoin(input_dir, item_id, img)\n w, h = imagesize.get(image_dict['file_name'])\n image_dict['height'] = h\n image_dict['width'] = w\n image_dict['item_id'] = item_id\n image_dict['type'] = 'image'\n\n result.append(image_dict)\n\n return result\n\ndef get_vid_dicts(input_dir):\n\n result = []\n for vid_name in tqdm(os.listdir(input_dir)):\n item_id = splitext(vid_name)[0]\n image_dict = {}\n\n image_dict['file_name'] = pjoin(input_dir, vid_name)\n capture = cv2.VideoCapture(image_dict['file_name'])\n w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n image_dict['height'] = h\n image_dict['width'] = w\n image_dict['item_id'] = item_id\n image_dict['type'] = 'video'\n\n result.append(image_dict)\n\n return result\n\n# d = get_img_dicts('../input/validation_dataset_part1')", "sub_path": "bbox_sub/src_infer/prepare_json.py", "file_name": "prepare_json.py", "file_ext": "py", "file_size_in_byte": 1367, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "os.path.sep", "line_number": 11, "usage_type": "argument"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 16, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 16, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 17, "usage_type": "call"}, {"api_name": "imagesize.get", "line_number": 21, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 34, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 40, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 41, "usage_type": "attribute"}]} +{"seq_id": "344279490", "text": "'''\r\nCreated on 6 aug. 2018\r\n\r\n@author: m.vanturnhout\r\n'''\r\nimport logging\r\nimport globals\r\n\r\nfrom Control import Control\r\n\r\nlog = logging.getLogger(__name__)\r\n\r\nclass TextArea(Control):\r\n '''\r\n classdocs\r\n '''\r\n\r\n\r\n def __init__(self, text=None):\r\n '''\r\n Constructor\r\n '''\r\n Control.__init__(self)\r\n self.interactive = False\r\n self.padding = (5,0)\r\n self.font = globals.font\r\n self.lines = []\r\n \r\n def draw(self):\r\n surf = Control.draw(self)\r\n ypos = 0\r\n for line in self.lines:\r\n text_surf = self.font.render(str(line), True, self.fgcolor, self.bgcolor)\r\n size = text_surf.get_size()\r\n point = (self.padding[0],ypos)\r\n ypos = ypos + size[1]\r\n# surf = pygame.surface.Surface(size)\r\n surf.blit(text_surf, point)\r\n if ypos > self.frame.height:\r\n #print 'pop line lines='+ str(len(self.lines))\r\n self.lines.pop(0)\r\n\r\n return surf ", "sub_path": "Application/gui/TextArea.py", "file_name": "TextArea.py", "file_ext": "py", "file_size_in_byte": 1052, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "Control.Control", "line_number": 13, "usage_type": "name"}, {"api_name": "Control.Control.__init__", "line_number": 23, "usage_type": "call"}, {"api_name": "Control.Control", "line_number": 23, "usage_type": "name"}, {"api_name": "globals.font", "line_number": 26, "usage_type": "attribute"}, {"api_name": "Control.Control.draw", "line_number": 30, "usage_type": "call"}, {"api_name": "Control.Control", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "579037094", "text": "\r\n#Kaggle Home Price dataset fit with variouse regression techniques for high dimensionality data\r\n\r\n# import all the necessaries\r\nimport pandas as pd\r\nimport numpy as np\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\n\r\n# Read in data\r\ndf = pd.read_csv('HomePrice_train.csv')\r\n\r\n# Define y early on as the target (unscaled so far)\r\ny=np.ravel(df.SalePrice)\r\n\r\n#Drop columns with too many nan values to be useful\r\ndf.isnull().sum()\r\ndf = df.drop(['LotFrontage', 'Alley','FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'\r\n ,'Condition2', 'RoofMatl','Exterior1st', 'Exterior2nd', 'GarageYrBlt', 'Heating',\r\n 'Utilities', 'GarageQual', 'HouseStyle', 'Electrical'], axis = 1)\r\n\r\n# Fill nan in remaining datafraime in place using 'None' signifier to be picked up by dummies\r\n# Garage qualitative fills\r\ndf['GarageType'].fillna('None', inplace = True) \r\ndf['GarageFinish'].fillna('None', inplace = True)\r\ndf['GarageCond'].fillna('None', inplace = True)\r\n\r\n# Basement qualitative fills\r\ndf['BsmtQual'].fillna('None', inplace = True)\r\ndf['BsmtCond'].fillna('None', inplace = True) \r\ndf['BsmtExposure'].fillna('None', inplace = True)\r\ndf['BsmtFinType1'].fillna('None', inplace = True)\r\n\r\n# Quantitative Fills for original dataframe training data\r\ndf['MasVnrArea'].fillna(df['MasVnrArea'].mean(), inplace = True)\r\n\r\n\r\n#Encode all categoricals to dataframe (no further processing yet)\r\ndf = pd.get_dummies(df, drop_first = True)\r\nX = df.drop(['Id', 'SalePrice'], axis =1)\r\n\r\n\r\n#Scale all x and y features for use in ridge, lasso, elastic regressions:\r\nfrom sklearn.preprocessing import StandardScaler\r\nXscaler = StandardScaler() # can also do on whole dataframe\r\nyscaler = StandardScaler()\r\nXscl = pd.DataFrame(Xscaler.fit_transform(X), columns = X.columns)\r\nyscl = yscaler.fit_transform(y.reshape(-1,1))\r\nyscl = np.ravel(yscl)\r\n\r\n########################################################################################################### Ridge\r\n# Fitting Ridge regression\r\nfrom sklearn.linear_model import Ridge\r\nridge = Ridge()\r\nridge.fit(Xscl, yscl.reshape(-1,1))\r\nKeep = ridge.coef_\r\n\r\n\r\n######################################################################################################### Lasso\r\n# Fitting Lasso regression\r\nfrom sklearn.linear_model import LassoCV\r\nlasso = LassoCV()\r\nlasso.fit(Xscl, np.ravel(yscl))\r\nKeep = pd.DataFrame(lasso.coef_, index = X.columns)\r\n\r\n\r\n######################################################################################################### Elastic Net\r\n# Elastic Net\r\nfrom sklearn.linear_model import ElasticNetCV\r\nelastic = ElasticNetCV()\r\nelastic.fit(Xscl, np.ravel(yscl))\r\n\r\n\r\n######################################################################################################### Test Data\r\ndf_test = pd.read_csv('HomePrice_test.csv')\r\n\r\n# Define y early on as the target (unscaled so far)\r\ndf_test.isnull().sum()\r\ndf_test = df_test.drop(['LotFrontage', 'Alley','FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'\r\n ,'Condition2', 'RoofMatl','Exterior1st', 'Exterior2nd', 'GarageYrBlt', 'Heating',\r\n 'Utilities', 'GarageQual', 'HouseStyle', 'Electrical'], axis = 1)\r\n\r\n# Fill nan in remaining datafraime in place using 'None' signifier to be picked up by dummies\r\n# Garage test data qualitative fills\r\ndf_test['GarageType'].fillna('None', inplace = True) \r\ndf_test['GarageFinish'].fillna('None', inplace = True)\r\ndf_test['GarageCond'].fillna('None', inplace = True)\r\n\r\n# Basement test data qualitative fills\r\ndf_test['BsmtQual'].fillna('None', inplace = True)\r\ndf_test['BsmtCond'].fillna('None', inplace = True) \r\ndf_test['BsmtExposure'].fillna('None', inplace = True)\r\ndf_test['BsmtFinType1'].fillna('None', inplace = True)\r\n\r\n# Fill remaining NA quantitative values with zeroes\r\n#Basement quantitative fills test data\r\ndf_test['BsmtFinSF1'].fillna(0, inplace = True)\r\ndf_test['BsmtFinSF2'].fillna(0, inplace = True)\r\ndf_test['BsmtUnfSF'].fillna(0, inplace = True)\r\ndf_test['TotalBsmtSF'].fillna(0, inplace = True)\r\ndf_test['BsmtFullBath'].fillna(0, inplace = True)\r\ndf_test['BsmtHalfBath'].fillna(0, inplace = True)\r\n\r\n#Garage quantitative fills test data\r\ndf_test['GarageCars'].fillna(0, inplace = True)\r\ndf_test['GarageArea'].fillna(0, inplace = True)\r\n\r\n# Fill remaining test data quantitative NAs with averages where enough data exists\r\ndf_test['MasVnrArea'].fillna(df_test['MasVnrArea'].mean(), inplace = True)\r\n\r\n\r\n#Encode all categoricals to testing dataframe\r\ndf_test = pd.get_dummies(df_test, drop_first = True)\r\nX_test = df_test.drop(['Id'], axis =1)\r\n\r\n#Scale test data X features for final fits\r\nX_test_scl = pd.DataFrame(Xscaler.fit_transform(X_test), columns = X_test.columns)\r\n\r\n############################################################################# FIT FINAL MODELS TO X_test scaled\r\n\r\n# Final model predictions in scaled format\r\nLasso_Pred = lasso.predict(X_test_scl)\r\nRidge_Pred = ridge.predict(X_test_scl)\r\nElastic_Pred = elastic.predict(X_test_scl)\r\n\r\n# Final model predictions in original $ units format\r\nLasso_Final = yscaler.inverse_transform(Lasso_Pred)\r\nRidge_Final = yscaler.inverse_transform(Ridge_Pred)\r\nElastic_Final = yscaler.inverse_transform(Elastic_Pred)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "sub_path": "Real Estate_LassoRidge_Elastic.py", "file_name": "Real Estate_LassoRidge_Elastic.py", "file_ext": "py", "file_size_in_byte": 5230, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pandas.read_csv", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 39, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 45, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 49, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Ridge", "line_number": 54, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LassoCV", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 64, "usage_type": "call"}, {"api_name": "sklearn.linear_model.ElasticNetCV", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 71, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 75, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 113, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 117, "usage_type": "call"}]} +{"seq_id": "210375635", "text": "\"\"\"Demonstrate the creation of a wxPanel object.\"\"\"\n\n\nimport wx\n\n\nclass MainFrame(wx.Frame):\n \"\"\"Create and show the frame for the application.\"\"\"\n def __init__(self, *args, **kwargs):\n \"\"\"Initialise the MainFrame class.\"\"\"\n super(MainFrame, self).__init__(*args, **kwargs)\n panel = MainPanel(parent=self)\n self.Show()\n\n\nclass MainPanel(wx.Panel):\n \"\"\"Create a panel to hold application widgets.\"\"\"\n def __init__(self, parent, *args, **kwargs):\n \"\"\"Initialise the MainPanel class.\"\"\"\n super(MainPanel, self).__init__(parent, *args, **kwargs)\n\n\nif __name__ == \"__main__\":\n \"\"\"Implement the wxPython loop.\"\"\"\n SCREEN_APP = wx.App()\n MAIN_FRAME = MainFrame(parent=None, title=\"Basic panel\")\n SCREEN_APP.MainLoop()\n", "sub_path": "wx_python/snippets/basic_panel.py", "file_name": "basic_panel.py", "file_ext": "py", "file_size_in_byte": 777, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "wx.Frame", "line_number": 7, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 16, "usage_type": "attribute"}, {"api_name": "wx.App", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "434017354", "text": "from five import grok\nfrom plone.directives import dexterity, form\nfrom Acquisition import aq_inner\nfrom Products.CMFCore.utils import getToolByName\nfrom zope import schema\nfrom plone.app.contentlisting.interfaces import IContentListing\nfrom imu.contacttable import MessageFactory as _\n\nfrom imu.contacttable.sglcontact import ISglContact\n\n# Interface class; used to define content-type schema.\n\nclass IContactsTable(form.Schema): \n \"\"\"\n Contacts Folder\n \"\"\"\n title = schema.TextLine(\n title=_(u\"Name\"),\n required=True,\n )\n \n description = schema.Text(\n title=_(u\"A short summary\"),\n required=False,\n )\n\n\nclass ContactsTable(dexterity.Item):\n grok.implements(IContactsTable)\n\n\nclass ContactTableView(grok.View):\n grok.context(IContactsTable)\n grok.require('zope2.View')\n grok.name('view') \n \n def haveSglContacts(self):\n return len(self.contained_sglcontacts()) > 0\n \n def contacts(self):\n import math\n results = IContentListing(self.contained_sglcontacts())\n count = len(results)\n rowcount = count/2.0\n rows = math.ceil(rowcount)\n matrix = []\n for i in range(int(rows)):\n row = []\n for j in range(2):\n index = 2*i + j\n if index <= int(count - 1):\n cell = {}\n cell['item'] = results[index]\n row.append(cell)\n matrix.append(row)\n return matrix\n \n def contained_sglcontacts(self):\n \"\"\" Return a list of contained teasers in order to construct a item scrollable. \"\"\"\n context = aq_inner(self.context)\n catalog = getToolByName(context, 'portal_catalog')\n results = catalog(object_provides=ISglContact.__identifier__,\n path='/'.join(context.getPhysicalPath()),\n review_state='published',\n sort_on='sortable_title')\n return results\n", "sub_path": "src/imu.contacttable/imu/contacttable/contactstable.py", "file_name": "contactstable.py", "file_ext": "py", "file_size_in_byte": 2034, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "plone.directives.form.Schema", "line_number": 13, "usage_type": "attribute"}, {"api_name": "plone.directives.form", "line_number": 13, "usage_type": "name"}, {"api_name": "zope.schema.TextLine", "line_number": 17, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 17, "usage_type": "name"}, {"api_name": "imu.contacttable.MessageFactory", "line_number": 18, "usage_type": "call"}, {"api_name": "zope.schema.Text", "line_number": 22, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 22, "usage_type": "name"}, {"api_name": "imu.contacttable.MessageFactory", "line_number": 23, "usage_type": "call"}, {"api_name": "plone.directives.dexterity.Item", "line_number": 28, "usage_type": "attribute"}, {"api_name": "plone.directives.dexterity", "line_number": 28, "usage_type": "name"}, {"api_name": "five.grok.implements", "line_number": 29, "usage_type": "call"}, {"api_name": "five.grok", "line_number": 29, "usage_type": "name"}, {"api_name": "five.grok.View", "line_number": 32, "usage_type": "attribute"}, {"api_name": "five.grok", "line_number": 32, "usage_type": "name"}, {"api_name": "five.grok.context", "line_number": 33, "usage_type": "call"}, {"api_name": "five.grok", "line_number": 33, "usage_type": "name"}, {"api_name": "five.grok.require", "line_number": 34, "usage_type": "call"}, {"api_name": "five.grok", "line_number": 34, "usage_type": "name"}, {"api_name": "five.grok.name", "line_number": 35, "usage_type": "call"}, {"api_name": "five.grok", "line_number": 35, "usage_type": "name"}, {"api_name": "plone.app.contentlisting.interfaces.IContentListing", "line_number": 42, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 45, "usage_type": "call"}, {"api_name": "Acquisition.aq_inner", "line_number": 60, "usage_type": "call"}, {"api_name": "Products.CMFCore.utils.getToolByName", "line_number": 61, "usage_type": "call"}, {"api_name": "imu.contacttable.sglcontact.ISglContact.__identifier__", "line_number": 62, "usage_type": "attribute"}, {"api_name": "imu.contacttable.sglcontact.ISglContact", "line_number": 62, "usage_type": "name"}]} +{"seq_id": "76558590", "text": "from speech_perturb import SpeedPerturb\nfrom asr_data_noise_rir import add_noise_rir ,NoiseRIR_Dataset\nfrom volume_perturb import volume_perturb\nimport soundfile as sf\nimport pdb\nimport torch as th\n\nsource_wav = '/data/LibriSpeech/example.wav'\nrir_path = '/data/LibriSpeech/RIRS_NOISES/real_rirs_isotropic_noises/air_type1_air_binaural_stairway_1_3_60.wav'\nnoise_path = '/data/LibriSpeech/noise.wav'\n\nsamples, sr = sf.read(source_wav)\nprint(1111,samples.shape)\n\n#add noise rir\nnoise_rir_dataset = NoiseRIR_Dataset( '/workspace/fairseq/manifest/augmentation/noises.txt','/workspace/fairseq/manifest/augmentation/rirs.txt',5,20)\nsamples = noise_rir_dataset.add_noise_rir(source_wav)\n#samples = add_noise_rir(source_wav, noise_path, rir_path)\n\n#pdb.set_trace()\nprint(22222,samples.shape)\n# add speed perturb\nsp = SpeedPerturb(sr= 16000, perturb= \"0.9,1.1\")\n# pdb.set_trace()\nsamples = th.tensor(samples)\n\n# samples = samples.unsqueeze(0)\n# print(33333,samples.size())\n# input a 2-D tensor\nout = sp(samples)\nprint(31233434,out.size())\n# add volume perturb\nout = volume_perturb(out,low=-1.6,high=1.6)\n# print(3213123,out)\nprint(4444,out.size())\nprint('done!')\n\n\n", "sub_path": "fairseq/data/data_augment/run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 1158, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "soundfile.read", "line_number": 12, "usage_type": "call"}, {"api_name": "asr_data_noise_rir.NoiseRIR_Dataset", "line_number": 16, "usage_type": "call"}, {"api_name": "speech_perturb.SpeedPerturb", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 25, "usage_type": "call"}, {"api_name": "volume_perturb.volume_perturb", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "204944932", "text": "import os\nimport unittest\nfrom collections import OrderedDict\nfrom jeanpaulstart.batch import parser\n\n\nYAML_TEXT = r\"\"\"---\nkey1: value1\nkey2: 2\nkey3:\n - true\n - false\n...\"\"\"\n\n\nJSON_TEXT = r\"\"\"{\n \"key1\": \"value1\",\n \"key2\": 2,\n \"key3\": [true, false]\n}\"\"\"\n\n\ndef _make_expected_data():\n expected_data = OrderedDict()\n expected_data['key1'] = 'value1'\n expected_data['key2'] = 2\n expected_data['key3'] = [True, False]\n\n return expected_data\n\n\nclass TestParser(unittest.TestCase):\n\n def setUp(self):\n self.expected_data = _make_expected_data()\n self.folder = os.path.expandvars(\"$TEMP/jeanpaulstart\")\n self.yaml_filepath = os.path.expandvars(\"$TEMP/jeanpaulstart/content1.yml\").replace('\\\\', '/')\n self.json_filepath = os.path.expandvars(\"$TEMP/jeanpaulstart/content2.json\").replace('\\\\', '/')\n\n if os.path.isfile(self.yaml_filepath):\n os.remove(self.yaml_filepath)\n\n if os.path.isfile(self.json_filepath):\n os.remove(self.json_filepath)\n\n if os.path.isdir(self.folder):\n os.rmdir(self.folder)\n\n os.makedirs(self.folder)\n\n with open(self.yaml_filepath, \"w+\") as f_yaml:\n f_yaml.write(YAML_TEXT)\n\n with open(self.json_filepath, \"w+\") as f_json:\n f_json.write(JSON_TEXT)\n\n def tearDown(self):\n if os.path.isfile(self.yaml_filepath):\n os.remove(self.yaml_filepath)\n\n if os.path.isfile(self.json_filepath):\n os.remove(self.json_filepath)\n\n if os.path.isdir(self.folder):\n os.rmdir(self.folder)\n\n def test_from_yaml_content(self):\n data = parser.from_yaml(YAML_TEXT)\n\n self.assertEqual(\n data,\n self.expected_data\n )\n\n def test_from_json_content(self):\n data = parser.from_json(JSON_TEXT)\n\n self.assertEqual(\n data,\n self.expected_data\n )\n\n def test_from_yaml_file(self):\n data = parser._from_yaml_file(self.yaml_filepath)\n\n self.assertEqual(\n data,\n self.expected_data\n )\n\n def test_from_json_file(self):\n data = parser._from_json_file(self.json_filepath)\n\n self.assertEqual(\n data,\n self.expected_data\n )\n\n def test_from_folder(self):\n data = parser.from_folders([self.folder])\n\n self.assertEqual(\n data,\n [\n (self.yaml_filepath, self.expected_data),\n (self.json_filepath, self.expected_data)\n ]\n )\n", "sub_path": "tests/test_parser.py", "file_name": "test_parser.py", "file_ext": "py", "file_size_in_byte": 2576, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "collections.OrderedDict", "line_number": 24, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.expandvars", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.expandvars", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.expandvars", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.rmdir", "line_number": 47, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.rmdir", "line_number": 65, "usage_type": "call"}, {"api_name": "jeanpaulstart.batch.parser.from_yaml", "line_number": 68, "usage_type": "call"}, {"api_name": "jeanpaulstart.batch.parser", "line_number": 68, "usage_type": "name"}, {"api_name": "jeanpaulstart.batch.parser.from_json", "line_number": 76, "usage_type": "call"}, {"api_name": "jeanpaulstart.batch.parser", "line_number": 76, "usage_type": "name"}, {"api_name": "jeanpaulstart.batch.parser._from_yaml_file", "line_number": 84, "usage_type": "call"}, {"api_name": "jeanpaulstart.batch.parser", "line_number": 84, "usage_type": "name"}, {"api_name": "jeanpaulstart.batch.parser._from_json_file", "line_number": 92, "usage_type": "call"}, {"api_name": "jeanpaulstart.batch.parser", "line_number": 92, "usage_type": "name"}, {"api_name": "jeanpaulstart.batch.parser.from_folders", "line_number": 100, "usage_type": "call"}, {"api_name": "jeanpaulstart.batch.parser", "line_number": 100, "usage_type": "name"}]} +{"seq_id": "619568019", "text": "from django.db import models\n\n\n# 출판사 : 책 = 1:N 관계\n# 책 : 저자 = N:N 관계 (한 책에 저자 여러명, 한 저자가 여러책). -> N:N관계는 관계형테이블에서 만들수없다.-> 프로그램에서 자동으로 중간에 테이블 생성해줌.\nclass Book(models.Model):\n title = models.CharField(max_length=100)\n publisher = models.ForeignKey('Publisher', on_delete=models.CASCADE) # 출판사 외래키. 출판사와 1:N 관계\n authors = models.ManyToManyField('Author') # 책:저자 = N:N 관계이므로 manytomanyfield 속성 부여...\n pub_date = models.DateField()\n\n def __str__(self): # admin페이지에 이름이 보이도록 object객체를 string으로 만들어줌///\n return self.title\n\n\nclass Publisher(models.Model): # models상속받아야함\n name = models.CharField(max_length=50)\n address = models.CharField(max_length=100)\n website = models.URLField()\n\n def __str__(self):\n return self.name\n\n\nclass Author(models.Model): # models상속받아야함\n name = models.CharField(max_length=50)\n intro = models.TextField() # TextField()자리수 제한없음\n email = models.EmailField()\n photo = models.ImageField(upload_to='photos/%Y',\n default='photos/noimg.png') # upload to =위치에 자동으로 폴더 만들어진다. %Y : 2020 해당년도.....default 사진이 없을경우\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs): # 이미지 변경시 기존파일 삭제되고 새로운파일 올라오도록....\n try:\n old = Author.objects.get(id=self.id) # 해당 데이터의 id key값에 기존에 저장되어있는 정보\n if old.photo != self.photo: # 기존에 올라간 사진 != 지금 올린 사진\n old.photo.delete(save=False) # 기존 사진 삭제\n except:\n pass\n super(Author, self).save() # 상위클래스의 save함수를 재정의\n", "sub_path": "django_bookstore/shoppingmall/books/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 2000, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "django.db.models.Model", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 6, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 7, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 8, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 9, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 10, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 10, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.URLField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.EmailField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "578201989", "text": "__author__ = 'Wenwei Huang'\n\nfrom matplotlib.colors import colorConverter\nfrom matplotlib.collections import PolyCollection\nfrom matplotlib.collections import LineCollection\n\nfrom .ohlc_textbox import OHLCVTextBox\nfrom .baserenderer import BaseRenderer\nimport config\nimport collections\n\nclass MainRenderer(BaseRenderer):\n def __init__(self, axes, df):\n super(MainRenderer, self).__init__(axes)\n self.ohlcvtextbox = None\n self.set_data(df)\n\n def set_data(self, df):\n super(MainRenderer, self).set_data(df)\n self.x = self.dates\n self.y = self.closes\n\n def render(self):\n super(MainRenderer, self).render()\n if self.x is None or self.y is None: return\n\n self.ohlcvtextbox = OHLCVTextBox(self.axes, self.df)\n self.ohlcvtextbox.render()\n self.artists.append(self.ohlcvtextbox)\n\n self.lines = self.axes.plot(\n self.x, self.y, '-', color=config.main_curve_color)\n\n self.axes.yaxis.tick_right()\n\n def remove_lines(self):\n if self.lines:\n if isinstance(self.lines, collections.Iterable):\n for line in self.lines:\n line.remove()\n del line\n else:\n self.lines.remove()\n del self.lines\n\n def candlestick(self, width=0.2, alpha=1.0):\n delta = width/2.0\n r,g,b = colorConverter.to_rgb(config.colorup)\n colorup = r,g,b,alpha\n r,g,b = colorConverter.to_rgb(config.colordown)\n colordown = r,g,b,alpha\n colord = {True: colorup, False: colordown}\n colors = [colord[open pandas.DataFrame:\n data = {'slideviewer_path': ['CMU-1.svs'],\n 'slide_id': ['CMU-1'],\n 'sv_project_id': [155],\n 'bmp_filepath': ['tests/luna_pathology/proxy_table/regional_annotation/test_data/input/labels.bmp'],\n 'user': ['someuser'],\n 'date_added': [1612403271],\n 'date_updated': [1612403271],\n 'bmp_record_uuid': ['SVBMP-90836da'],\n 'latest': [True],\n 'SLIDE_BMP_DIR': [\n 'tests/luna_pathology/proxy_table/regional_annotation/test_data/output/regional_bmps'],\n 'TMP_ZIP_DIR': [\n 'tests/luna_pathology/proxy_table/regional_annotation/test_data/output/gynocology_tmp_zips'],\n 'SLIDEVIEWER_API_URL': ['https://fakeslides-res.mskcc.org/']}\n\n return pandas.DataFrame(data=data)\n\n\n monkeypatch.setattr(generate, \"process_regional_annotation_slide_row_pandas\",\n mock_process)\n\n\n assert create_proxy_table() == 0 # exit code\n", "sub_path": "tests/luna_pathology/proxy_table/regional_annotation/test_generate.py", "file_name": "test_generate.py", "file_ext": "py", "file_size_in_byte": 5717, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "luna_core.common.config.ConfigSet", "line_number": 27, "usage_type": "call"}, {"api_name": "luna_core.common.constants.APP_CFG", "line_number": 27, "usage_type": "attribute"}, {"api_name": "luna_core.common.constants", "line_number": 27, "usage_type": "name"}, {"api_name": "luna_core.common.config.ConfigSet", "line_number": 28, "usage_type": "call"}, {"api_name": "luna_core.common.constants.DATA_CFG", "line_number": 28, "usage_type": "attribute"}, {"api_name": "luna_core.common.constants", "line_number": 28, "usage_type": "name"}, {"api_name": "luna_core.common.sparksession.SparkConfig", "line_number": 30, "usage_type": "call"}, {"api_name": "luna_core.common.constants.APP_CFG", "line_number": 30, "usage_type": "attribute"}, {"api_name": "luna_core.common.constants", "line_number": 30, "usage_type": "name"}, {"api_name": "luna_core.common.config.ConfigSet", "line_number": 32, "usage_type": "call"}, {"api_name": "luna_core.common.constants.DATA_CFG", "line_number": 33, "usage_type": "attribute"}, {"api_name": "luna_core.common.constants", "line_number": 33, "usage_type": "name"}, {"api_name": "luna_core.common.constants.DATA_CFG", "line_number": 34, "usage_type": "attribute"}, {"api_name": "luna_core.common.constants", "line_number": 34, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 37, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 41, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 42, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 48, "usage_type": "call"}, {"api_name": "luna_pathology.proxy_table.regional_annotation.generate.convert_bmp_to_npy", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "sys.modules", "line_number": 66, "usage_type": "attribute"}, {"api_name": "luna_pathology.common", "line_number": 66, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 69, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 93, "usage_type": "call"}, {"api_name": "luna_pathology.proxy_table.regional_annotation.generate.process_regional_annotation_slide_row_pandas", "line_number": 95, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 107, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 123, "usage_type": "call"}, {"api_name": "luna_pathology.proxy_table.regional_annotation.generate", "line_number": 126, "usage_type": "argument"}, {"api_name": "luna_pathology.proxy_table.regional_annotation.generate.create_proxy_table", "line_number": 130, "usage_type": "call"}]} +{"seq_id": "236979703", "text": "import pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nimport pickle\n\npenguins = pd.read_csv(\n '/media/nagachi/Nagachi/Datasets/data-master/penguins_cleaned.csv')\n\ndf = penguins.copy()\ntarget = 'species'\nencode = ['island', 'sex']\n\nprint(df.head())\n\n# Encode categoricacl column\nfor col in encode:\n dummy = pd.get_dummies(df[col], prefix=col)\n df = pd.concat([df, dummy], axis=1)\n del df[col]\n\ntarget_mapper = {'Adelie': 0, 'Chinstrap': 1, 'Gentoo': 2}\n\n\ndef target_encode(numValue):\n return target_mapper[numValue]\n\n\ndf['species'] = df['species'].apply(target_encode)\n\nX = df.drop('species', axis=1)\ny = df['species']\n\nclf = RandomForestClassifier()\nclf.fit(X, y)\npickle.dump(clf, open('Basic-project/DataScienceApp/penguins_clf.pkl', 'wb'))\n", "sub_path": "Basic-project/DataScienceApp/penguin_algo.py", "file_name": "penguin_algo.py", "file_ext": "py", "file_size_in_byte": 775, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pandas.read_csv", "line_number": 5, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 17, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 32, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "474008699", "text": "#Autor: Pablo Gullith\n#Bibliotecas\nfrom scipy import array, arange\nfrom pylab import plot, show, xlabel, ylabel\n\n#Constantes\nomega = 1\nt_0 = 0\nt_f = 50\nx_0 = 1\nv_0 = 0\nN = 5000\nh = (t_f - t_0) / N\n\ndef f_harmonico(r, t):\n x = r[0]\n v = r[1]\n return array([v, - omega ** 2 * x], float)\n\npontos_t = arange(t_0, t_f, h)\ndef x_harmonico(amplitude):\n pontos_x = []\n r = [amplitude, v_0]\n for t in pontos_t:\n pontos_x.append(r[0])\n k1 = h * f_harmonico(r, t)\n k2 = h * f_harmonico(r + 0.5 * k1, t + 0.5 * h)\n k3 = h * f_harmonico(r + 0.5 * k2, t + 0.5 * h)\n k4 = h * f_harmonico(r + k3, t + h)\n r += (k1 + 2 * k2 + 2 * k3 + k4) / 6\n return array(pontos_x, float)\n\ndef f_anarmonico(r, t):\n x = r[0]\n v = r[1]\n return array([v, - omega ** 2 * x ** 3], float)\n\ndef x_anarmonico(amplitude):\n pontos_x = []\n pontos_v = []\n r = array([amplitude, v_0], float)\n for t in pontos_t:\n pontos_x.append(r[0])\n pontos_v.append(r[1])\n k1 = h * f_anarmonico(r, t)\n k2 = h * f_anarmonico(r + 0.5 * k1, t + 0.5 * h)\n k3 = h * f_anarmonico(r + 0.5 * k2, t + 0.5 * h)\n k4 = h * f_anarmonico(r + k3, t + h)\n r += (k1 + 2 * k2 + 2 * k3 + k4) / 6\n return array(pontos_x, float), array(pontos_v, float)\n\n\nplot(pontos_t, x_anarmonico(x_0)[0])\nplot(pontos_t, x_anarmonico(2 * x_0)[0])\nxlabel('t (s)')\nylabel('x (m)')\nshow()\n\nx, v = x_anarmonico(x_0)\nplot(x, v)\nxlabel('x')\nylabel('v')\nshow()\n\n\n#Constantes\nt_f = 20\nN = 10000\nh = (t_f - t_0) / N\ndef g(r, t, mu):\n x = r[0]\n v = r[1]\n return array([v, mu * (1 - x ** 2) * v - omega ** 2 * x], float)\n\npontos_t = arange(t_0, t_f, h)\n\ndef x_van_der_pol(mu):\n pontos_x = []\n pontos_v = []\n r = array([x_0, v_0], float)\n for t in pontos_t:\n pontos_x.append(r[0])\n pontos_v.append(r[1])\n k1 = h * g(r, t, mu)\n k2 = h * g(r + 0.5 * k1, t + 0.5 * h, mu)\n k3 = h * g(r + 0.5 * k2, t + 0.5 * h, mu)\n k4 = h * g(r + k3, t + h, mu)\n r += (k1 + 2 * k2 + 2 * k3 + k4) / 6\n return array(pontos_x, float), array(pontos_v, float)\n\n\nmu1_x, mu1_v = x_van_der_pol(1)\nmu2_x, mu2_v = x_van_der_pol(2)\nmu3_x, mu3_v = x_van_der_pol(4)\nplot(mu1_x, mu1_v, 'r')\nplot(mu2_x, mu2_v, 'b')\nplot(mu3_x, mu3_v, 'g')\nxlabel('x')\nylabel('v')\nshow()", "sub_path": "Questao2.py", "file_name": "Questao2.py", "file_ext": "py", "file_size_in_byte": 2339, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "scipy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "scipy.arange", "line_number": 20, "usage_type": "call"}, {"api_name": "scipy.array", "line_number": 31, "usage_type": "call"}, {"api_name": "scipy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "scipy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "scipy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 53, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 54, "usage_type": "call"}, {"api_name": "pylab.xlabel", "line_number": 55, "usage_type": "call"}, {"api_name": "pylab.ylabel", "line_number": 56, "usage_type": "call"}, {"api_name": "pylab.show", "line_number": 57, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 60, "usage_type": "call"}, {"api_name": "pylab.xlabel", "line_number": 61, "usage_type": "call"}, {"api_name": "pylab.ylabel", "line_number": 62, "usage_type": "call"}, {"api_name": "pylab.show", "line_number": 63, "usage_type": "call"}, {"api_name": "scipy.array", "line_number": 73, "usage_type": "call"}, {"api_name": "scipy.arange", "line_number": 75, "usage_type": "call"}, {"api_name": "scipy.array", "line_number": 80, "usage_type": "call"}, {"api_name": "scipy.array", "line_number": 89, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 95, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 96, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 97, "usage_type": "call"}, {"api_name": "pylab.xlabel", "line_number": 98, "usage_type": "call"}, {"api_name": "pylab.ylabel", "line_number": 99, "usage_type": "call"}, {"api_name": "pylab.show", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "644827032", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\"\"\"\nArchitecture based on InfoGAN paper.\n\"\"\"\n\nclass Generator(nn.Module):\n def __init__(self):\n super().__init__()\n\n self.tconv1 = nn.ConvTranspose2d(138, 448, 2, 1, bias=False)\n self.bn1 = nn.BatchNorm2d(448)\n\n self.tconv2 = nn.ConvTranspose2d(448, 256, 4, 2, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(256)\n\n self.tconv3 = nn.ConvTranspose2d(256, 128, 4, 2, padding=1, bias=False)\n\n self.tconv4 = nn.ConvTranspose2d(128, 64, 4, 2, padding=1, bias=False)\n\n self.tconv5 = nn.ConvTranspose2d(64, 3, 4, 2, padding=1, bias=False)\n\n def forward(self, x):\n x = F.relu(self.bn1(self.tconv1(x)))\n x = F.relu(self.bn2(self.tconv2(x)))\n x = F.relu(self.tconv3(x))\n x = F.relu(self.tconv4(x))\n\n img = torch.tanh(self.tconv5(x))\n\n return img\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super().__init__()\n\n self.conv1 = nn.Conv2d(3, 64, 4, 2, 1)\n\n self.conv2 = nn.Conv2d(64, 128, 4, 2, 1, bias=False)\n self.bn2 = nn.BatchNorm2d(128)\n\n self.conv3 = nn.Conv2d(128, 256, 4, 2, 1, bias=False)\n self.bn3 = nn.BatchNorm2d(256)\n\n def forward(self, x):\n x = F.leaky_relu(self.conv1(x), 0.1, inplace=True)\n x = F.leaky_relu(self.bn2(self.conv2(x)), 0.1, inplace=True)\n x = F.leaky_relu(self.bn3(self.conv3(x)), 0.1, inplace=True)\n\n return x\n\nclass DHead(nn.Module):\n def __init__(self):\n super().__init__()\n\n self.conv = nn.Conv2d(256, 1, 4)\n\n def forward(self, x):\n output = torch.sigmoid(self.conv(x))\n\n return output\n\nclass QHead(nn.Module):\n def __init__(self):\n super().__init__()\n\n self.conv1 = nn.Conv2d(256, 128, 4, bias=False)\n self.bn1 = nn.BatchNorm2d(128)\n\n self.conv_disc = nn.Conv2d(128, 40, 1)\n self.conv_mu = nn.Conv2d(128, 4, 1)\n self.conv_var = nn.Conv2d(128, 4, 1)\n\n def forward(self, x):\n x = F.leaky_relu(self.bn1(self.conv1(x)), 0.1, inplace=True)\n\n disc_logits = self.conv_disc(x).squeeze()\n\n mu = self.conv_mu(x).squeeze()\n var = torch.exp(self.conv_var(x).squeeze())\n\n return disc_logits, mu, var\n", "sub_path": "models/svhn_model.py", "file_name": "svhn_model.py", "file_ext": "py", "file_size_in_byte": 2285, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "torch.nn.Module", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.tanh", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 35, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 41, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 44, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 45, "usage_type": "name"}, {"api_name": "torch.nn.functional.leaky_relu", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.nn.functional.leaky_relu", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.nn.functional.leaky_relu", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 54, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 58, "usage_type": "name"}, {"api_name": "torch.sigmoid", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 65, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 72, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 73, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 74, "usage_type": "name"}, {"api_name": "torch.nn.functional.leaky_relu", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 77, "usage_type": "name"}, {"api_name": "torch.exp", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "104339295", "text": "import numpy as np\nimport pandas as pd\nimport time\nimport sys\nfrom bitarray import bitarray\n\nctrl_df = pd.read_csv('binarized_ado_ctrl', header=None, dtype=str)\ncase_df = pd.read_csv('binarized_ado_case', header=None, dtype=str)\n\n#print(str(ctrl_df[0]))\n\nctrl_df.columns=['bitarray']\nx=np.arange(0,len(ctrl_df.iloc[0][0])/2, dtype=int)\n\nk=0\nfor i in x:\n\tctrl_df[i] = ctrl_df['bitarray'].str[k:k+2]\n\tk+=2\n\nctrl_df.drop('bitarray', inplace=True, axis=1)\nctrl_df=ctrl_df.T\n\n\ncase_df.columns=['bitarray']\nx=np.arange(0,len(case_df.iloc[0][0])/2, dtype=int)\n\nk=0\nfor i in x:\n\tcase_df[i] = case_df['bitarray'].str[k:k+2]\n\tk+=2\n\ncase_df.drop('bitarray', inplace=True, axis=1)\ncase_df=case_df.T\n\ndef match(C, AB):\n\tA=bitarray(''.join([a[0] for a in AB]))\n\tB=bitarray(''.join([a[1] for a in AB]))\n\t#print(A,B,C)\n\t#print((~C & ~A) | (C & B) | (A & ~B))\n\treturn ((~C & ~A) | (C & B) | (A & ~B)).all()==True\n\n#print(match(bitarray('0101'), [bitarray('00'), bitarray('01'), bitarray('11'), bitarray('11')]))\n\ncarriers_case=[]\ncase_count=[]\ncluster_num=[] \nwith open(sys.argv[1]) as cluster_file:\n\tfor num, line in enumerate(cluster_file, 1):\n\t\tcarriers_case_temp=[]\n\t\tcase_count_temp=0\n\t\tnodes=line.rstrip('\\n').split(',')\n\t\tif len(nodes)==1:\n\t\t\tcontinue\n\t\tsnps_indices=[]\n\t\talleles=[]\n\t\tfor node in nodes:\n\t\t\tsnps_indices+=[int(node.split('_')[0]), ]\n\t\t\talleles+=[int(node.split('_')[1]), ]\n\t\tflag=0\n\t\tfor index, row in case_df.iterrows():\n\t\t\tAB = (row[snps_indices]).to_list()\n\t\t\tif match(bitarray(alleles), AB) == True:\n\t\t\t\tif flag==0:\n\t\t\t\t\tcluster_num.append(num)\n\t\t\t\t\tflag=1\n\t\t\t\tcarriers_case_temp+=[index,]\n\t\t\t\tcase_count_temp+=1\n\t\tcarriers_case+=[carriers_case_temp, ]\n\t\tcase_count+=[case_count_temp,]\n\ncarriers_ctrl=[]\nctrl_count=[]\nfor line in open(sys.argv[1], 'r').readlines():\n\tcarriers_ctrl_temp=[]\n\tctrl_count_temp=0\n\tnodes=line.rstrip('\\n').split(',')\n\tif len(nodes)==1:\n\t\tcontinue\n\tsnps_indices=[]\n\talleles=[]\n\tfor node in nodes:\n\t\tsnps_indices+=[int(node.split('_')[0]), ]\n\t\talleles+=[int(node.split('_')[1]), ]\n\n\tfor index, row in ctrl_df.iterrows():\n\t\tAB = (row[snps_indices]).to_list()\n\t\tif match(bitarray(alleles), AB) == True:\n\t\t\tcarriers_ctrl_temp+=[index,]\n\t\t\tctrl_count_temp+=1\n\tcarriers_ctrl+=[carriers_ctrl_temp, ]\n\tctrl_count+=[ctrl_count_temp,]\n\n\n\nprint('Case Count:\\n', case_count)\nprint('\\nControl Count:\\n', ctrl_count)\nprint('\\nCluster Number:\\n', cluster_num)\n\np = np.array(case_count)/case_df.shape[0]\nq = np.array(ctrl_count)/ctrl_df.shape[0]\n\nodds_ratio = (p*(1-q))/(q*(1-p))\nprint('\\nOdds Ratio:\\n')\nfor oR in odds_ratio:\n\tprint(oR)\n", "sub_path": "Carriers/carriers.py", "file_name": "carriers.py", "file_ext": "py", "file_size_in_byte": 2558, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pandas.read_csv", "line_number": 7, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 25, "usage_type": "call"}, {"api_name": "bitarray.bitarray", "line_number": 36, "usage_type": "call"}, {"api_name": "bitarray.bitarray", "line_number": 37, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 47, "usage_type": "attribute"}, {"api_name": "bitarray.bitarray", "line_number": 62, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 73, "usage_type": "attribute"}, {"api_name": "bitarray.bitarray", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "619335747", "text": "import discord\r\n\r\nfrom discord.ext import commands\r\n\r\n\r\ndef owl_postfix(num: int, postfix_1: str = 'год', postfix_2: str = 'года', postfix_3: str = 'лет'):\r\n if num > 20:\r\n num = int(str(num)[-1])\r\n if num == 1:\r\n return postfix_1\r\n if num >= 2 and num <= 4:\r\n return postfix_2\r\n if num == 0 or num >= 5 and num <= 9:\r\n return postfix_3\r\n else:\r\n if num == 1:\r\n return postfix_1\r\n if num >= 2 and num <= 4:\r\n return postfix_2\r\n if num == 0 or num >= 5 and num <= 20:\r\n return postfix_3\r\n\r\n\r\nclass Clear(commands.Cog):\r\n \"\"\"docstring for PressF\"\"\"\r\n\r\n def __init__(self, Bot):\r\n self.Bot = Bot\r\n\r\n @commands.command()\r\n async def clear(self, ctx, amount: int):\r\n if amount > 10:\r\n await ctx.send(\"Столько незя\")\r\n else:\r\n await ctx.channel.purge(limit=amount + 1)\r\n edit1 = discord.Embed(\r\n title=f\"Успешно удалено **{str(amount)}** {owl_postfix(amount, 'сообщение', 'сообщения', 'сообщений')}\",\r\n colour=0x42f4f4)\r\n await ctx.send(embed=edit1, delete_after=10)\r\n\r\n\r\ndef setup(Bot):\r\n Bot.add_cog(Clear(Bot))\r\n", "sub_path": "cogs/clear.py", "file_name": "clear.py", "file_ext": "py", "file_size_in_byte": 1302, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 24, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 24, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 36, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 30, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "228722147", "text": "import re\n\nfrom common.site import omit_keyword_quotes\nfrom scraping.chunks.prefix_mapping import chunk_prefixes, chunk_prefix_len\nfrom scraping.scraper import SearchResultItem\n\n\ndef find_chunk_index(search_result_item: SearchResultItem) -> int:\n '''\n Return index of chunk document\n by title and link extracted from SERP:\n Example:\n link: https://hydromel-chouchenn.eu.org/chunks/chunk_83\n index: 83\n\n title: azpoicvsdu un mt mll. uf ehq iull rqoquvq yq, ea ygoh thq ...\n index: 43\n ^ Because chunk_prefixes = {... 'azpoicvsdu un mt mll' : 43 ...}\n '''\n chunk_index = -1\n\n title = omit_keyword_quotes(search_result_item.title)\n link = search_result_item.link\n\n chunk_match = re.search(r'(chunk(_)?)(\\d+)', title)\n if chunk_match != None:\n chunk_index = int(chunk_match.group(3))\n\n else:\n title_prefix = title[: chunk_prefix_len]\n \n if title_prefix in chunk_prefixes.keys():\n chunk_index = chunk_prefixes[title_prefix]\n\n else:\n chunk_match = re.search(r'(chunk_)(\\d+)', link)\n\n if chunk_match != None:\n chunk_index = int(chunk_match.group(2))\n \n else:\n print('no appropriate index for', title)\n\n return chunk_index\n", "sub_path": "src/scraping/chunks/processor.py", "file_name": "processor.py", "file_ext": "py", "file_size_in_byte": 1190, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "scraping.scraper.SearchResultItem", "line_number": 8, "usage_type": "name"}, {"api_name": "common.site.omit_keyword_quotes", "line_number": 22, "usage_type": "call"}, {"api_name": "re.search", "line_number": 25, "usage_type": "call"}, {"api_name": "scraping.chunks.prefix_mapping.chunk_prefix_len", "line_number": 30, "usage_type": "name"}, {"api_name": "scraping.chunks.prefix_mapping.chunk_prefixes.keys", "line_number": 32, "usage_type": "call"}, {"api_name": "scraping.chunks.prefix_mapping.chunk_prefixes", "line_number": 32, "usage_type": "name"}, {"api_name": "scraping.chunks.prefix_mapping.chunk_prefixes", "line_number": 33, "usage_type": "name"}, {"api_name": "re.search", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "25190724", "text": "\n# Stolen from https://devcenter.heroku.com/articles/clock-processes-python\n\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nimport database as db\nimport unseen_scrapper as scrapper\nimport configparser\nimport traceback\nimport requests\n\nsched = BlockingScheduler()\nMINUTES_INTERVAL = 20\nUPDATE_VIP_JOB_NAME = 'updateVip'\n\n\n@sched.scheduled_job('interval', minutes=MINUTES_INTERVAL)\ndef timed_job():\n # print('This job is run every three minutes.')\n try:\n update_vips()\n db.register_job_result(UPDATE_VIP_JOB_NAME, True, None)\n except Exception as e:\n print('Error with cron')\n message = traceback.format_exc()\n print(message)\n db.register_job_result(UPDATE_VIP_JOB_NAME, False, message)\n\n\ndef update_vips():\n\n bot_id = get_bot_id()\n\n vip = db.get_vip_user_ids()\n\n for chat_id in vip:\n update_unseen(bot_id, chat_id)\n\n\ndef get_bot_id():\n config = configparser.ConfigParser()\n config.read('sensitive.conf')\n bot_id = config['telegram.com']['bot-id']\n return bot_id\n\n\ndef update_unseen(bot_id, chat_id):\n urls = db.get_urls(chat_id)\n history = list(map(lambda ad: ad['id'], db.get_history(chat_id)))\n\n if len(urls) == 0:\n # Not sending messages on purpose.\n # send_message(bot_id, chat_id, 'There are no registered urls')\n return\n\n process_unseen(bot_id, chat_id, urls, history)\n\n\ndef process_unseen(bot_id, chat_id, urls, history):\n seen, unseen = scrapper.scrap_for_unseen(urls, history)\n\n if len(unseen) == 0:\n return\n\n send_message(bot_id, chat_id, 'You have already seen {} ads of your current urls and {} in total'.format(len(seen), len(history)))\n\n for ad in unseen:\n send_message(bot_id, chat_id, ad['url'])\n\n mark_as_seen(chat_id, unseen)\n\n\ndef mark_as_seen(chat_id, unseen):\n db.add_seen(chat_id, unseen)\n\n\ndef send_message(bot_id, chat_id, text):\n url = \"https://api.telegram.org/bot{}/sendMessage?chat_id={}&text={}\".format(bot_id, chat_id, text)\n try:\n r = requests.get(url)\n except:\n print('Error in request: {}'.format(url))\n\n\nsched.start()\n\n", "sub_path": "propfinder/clock.py", "file_name": "clock.py", "file_ext": "py", "file_size_in_byte": 2134, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "apscheduler.schedulers.blocking.BlockingScheduler", "line_number": 11, "usage_type": "call"}, {"api_name": "database.register_job_result", "line_number": 21, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 24, "usage_type": "call"}, {"api_name": "database.register_job_result", "line_number": 26, "usage_type": "call"}, {"api_name": "database.get_vip_user_ids", "line_number": 33, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 40, "usage_type": "call"}, {"api_name": "database.get_urls", "line_number": 47, "usage_type": "call"}, {"api_name": "database.get_history", "line_number": 48, "usage_type": "call"}, {"api_name": "unseen_scrapper.scrap_for_unseen", "line_number": 59, "usage_type": "call"}, {"api_name": "database.add_seen", "line_number": 73, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 79, "usage_type": "call"}]} +{"seq_id": "446595018", "text": "from django.conf import settings\nfrom django.urls import path\n\nfrom rest_framework.schemas import get_schema_view\n\nfrom device_registry import views, api_views\n\nschema_view = get_schema_view(title='WoTT API')\napi_version = 'v0.2'\n\nurlpatterns = []\n\n# API\nif settings.IS_API:\n urlpatterns += [\n path('api/{}/list-devices'.format(api_version), api_views.DeviceListView.as_view(), name='list_devices'),\n path('api/{}/ca-bundle'.format(api_version), api_views.CABundleView.as_view(), name='get_ca_bundle'),\n path('api/{}/ca'.format(api_version), api_views.CACertView.as_view(), name='get_ca'),\n path('api/{}/generate-id'.format(api_version), api_views.DeviceIDView.as_view(), name='get_device_id'),\n path('api/{}/device-cert/'.format(api_version), api_views.DeviceCertView.as_view(),\n name='get_device_cert'),\n path('api/{}/sign-csr'.format(api_version), api_views.SignNewDeviceView.as_view(), name='sign_device_cert'),\n path('api/{}/sign-expired-csr'.format(api_version), api_views.RenewExpiredCertView.as_view(),\n name='sign_expired_cert'),\n path('api/{}/claim-device'.format(api_version), api_views.ClaimByLink.as_view(), name='claim_by_link'),\n path('api/{}/enroll-device'.format(api_version), api_views.DeviceEnrollView.as_view(), name='enroll_by_key'),\n ]\n\n# Only load if mTLS\nif settings.IS_MTLS_API:\n urlpatterns += [\n path('api/{}/sign-csr'.format(api_version), # TODO: change to some unique path.\n api_views.MtlsRenewCertView.as_view(),\n name='mtls-sign-device-cert'),\n # Only for tests! We need it because of IS_API and IS_MTLS_API url duplication.\n path('api/{}/sign-csr-test'.format(api_version),\n api_views.MtlsRenewCertView.as_view(),\n name='mtls-sign-device-cert-test'),\n path('api/{}/ping'.format(api_version), api_views.MtlsPingView.as_view(), name='mtls-ping'),\n path('api/{}/hello'.format(api_version), api_views.MtlsTesterView.as_view(), name='mtls-tester'),\n path('api/{}/action//'.format(api_version), api_views.ActionView.as_view(),\n name='action'),\n path('api/{}/claimed'.format(api_version), api_views.IsDeviceClaimedView.as_view(), name='mtls-is_claimed'),\n\n path('api/{}/credentials'.format(api_version), api_views.MtlsCredsView.as_view(), name='mtls-credentials'),\n path('api/{}/device-metadata'.format(api_version), api_views.MtlsDeviceMetadataView.as_view(),\n name='mtls-device-metadata'),\n\n # TODO: deprecated names should be removed later (/creds, /dev-md) 2019-08-26:\n path('api/{}/creds'.format(api_version), api_views.MtlsCredsView.as_view(), name='mtls-credentials'),\n path('api/{}/dev-md'.format(api_version), api_views.MtlsDeviceMetadataView.as_view(),\n name='mtls-device-metadata')\n ]\n\n# Front-end\nif settings.IS_DASH or settings.IS_CELERY:\n urlpatterns += [\n path('', views.DashboardView.as_view(), name='dashboard'),\n path('nodes/', views.RootView.as_view(), name='root'),\n path('policies/', views.GlobalPoliciesListView.as_view(), name='global_policies'),\n path('policies/add/', views.GlobalPolicyCreateView.as_view(), name='create_global_policy'),\n path('policies//', views.GlobalPolicyEditView.as_view(), name='edit_global_policy'),\n path('policies//delete/', views.GlobalPolicyDeleteView.as_view(), name='delete_global_policy'),\n path('claim-device/',\n views.claim_device_view,\n name='claim-device'),\n path(\n 'devices//',\n views.DeviceDetailView.as_view(),\n name='device-detail'\n ),\n path(\n 'devices//software/',\n views.DeviceDetailSoftwareView.as_view(),\n name='device-detail-software'\n ),\n path('devices//security/', views.DeviceDetailSecurityView.as_view(),\n name='device-detail-security'),\n path('devices//security/save-as-policy/', views.GlobalPolicyCreateView.as_view(),\n name='save_as_policy'),\n path(\n 'devices//network/',\n views.DeviceDetailNetworkView.as_view(),\n name='device-detail-network'\n ),\n path(\n 'devices//hardware/',\n views.DeviceDetailHardwareView.as_view(),\n name='device-detail-hardware'\n ),\n path(\n 'devices//metadata/',\n views.DeviceDetailMetadataView.as_view(),\n name='device-detail-metadata'\n ),\n path('credentials/',\n views.CredentialsView.as_view(),\n name='credentials'),\n path('ajax-credentials/', api_views.CredentialsView.as_view(), name='ajax-credentials'),\n path('ajax-credentials//delete/', api_views.DeleteCredentialView.as_view(),\n name='ajax-credentials-delete'),\n path('ajax-credentials//update/', api_views.UpdateCredentialView.as_view(),\n name='ajax-credentials-update'),\n path('ajax-credentials/create/', api_views.CreateCredentialView.as_view(), name='ajax-credentials-create'),\n path('ajax-policies//device-nr/', api_views.PolicyDeviceNumberView.as_view(),\n name='ajax_policy_device_nr'),\n path('actions/', views.RecommendedActionsView.as_view(), name='actions'),\n path('devices//actions/', views.RecommendedActionsView.as_view(), name='device_actions'),\n path('cve/', views.CVEView.as_view(), name='cve'),\n path('devices//cve/', views.CVEView.as_view(), name='device_cve'),\n path('ajax/tags/autocomplete/', api_views.autocomplete_tags, name='ajax-tags-autocomplete'),\n path('pairing-keys/',\n views.PairingKeysView.as_view(),\n name='pairing-keys'),\n path('pairing-keys/download',\n views.PairingKeySaveFileView.as_view(),\n name='pairing-keys-download'),\n path('ajax-pairing-keys/', api_views.PairingKeyListView.as_view(), name='ajax_pairing_keys'),\n path('ajax-pairing-keys/create/', api_views.CreatePairingKeyView.as_view(), name='ajax_pairing_keys_create'),\n path(\n 'ajax-pairing-keys//delete/',\n api_views.DeletePairingKeyView.as_view(),\n name='ajax_pairing_keys_delete'\n ),\n path(\n 'ajax-pairing-keys//update/',\n api_views.UpdatePairingKeyView.as_view(),\n name='ajax_pairing_keys_update'\n ),\n path('ajax-pairing-keys/add_dev/', api_views.InstallInstructionKeyView.as_view(),\n name='ajax_install_instruction'),\n path('devices/device-cert//', api_views.DeviceCertView.as_view(), name='download_device_cert'),\n path('ajax-batch/list//', api_views.GetBatchActionsView.as_view(), name='get_batch_list'),\n path('ajax-batch/apply//tags/', api_views.BatchUpdateTagsView.as_view(), name='tags_batch'),\n path('ajax-devices/list/', api_views.DeviceListAjaxView.as_view(), name='ajax_device_list'),\n path('snooze-action/', api_views.SnoozeActionView.as_view(), name='snooze_action')\n ]\n", "sub_path": "backend/device_registry/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 7383, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "rest_framework.schemas.get_schema_view", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.settings.IS_API", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 14, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "device_registry.api_views.DeviceListView.as_view", "line_number": 16, "usage_type": "call"}, {"api_name": "device_registry.api_views.DeviceListView", "line_number": 16, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 16, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "device_registry.api_views.CABundleView.as_view", "line_number": 17, "usage_type": "call"}, {"api_name": "device_registry.api_views.CABundleView", "line_number": 17, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 17, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "device_registry.api_views.CACertView.as_view", "line_number": 18, "usage_type": "call"}, {"api_name": "device_registry.api_views.CACertView", "line_number": 18, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 18, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "device_registry.api_views.DeviceIDView.as_view", "line_number": 19, "usage_type": "call"}, {"api_name": "device_registry.api_views.DeviceIDView", "line_number": 19, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 19, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "device_registry.api_views.DeviceCertView.as_view", "line_number": 20, "usage_type": "call"}, {"api_name": "device_registry.api_views.DeviceCertView", "line_number": 20, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 20, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "device_registry.api_views.SignNewDeviceView.as_view", "line_number": 22, "usage_type": "call"}, {"api_name": "device_registry.api_views.SignNewDeviceView", "line_number": 22, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 22, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "device_registry.api_views.RenewExpiredCertView.as_view", "line_number": 23, "usage_type": "call"}, {"api_name": "device_registry.api_views.RenewExpiredCertView", "line_number": 23, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 23, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "device_registry.api_views.ClaimByLink.as_view", "line_number": 25, "usage_type": "call"}, {"api_name": "device_registry.api_views.ClaimByLink", "line_number": 25, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 25, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "device_registry.api_views.DeviceEnrollView.as_view", "line_number": 26, "usage_type": "call"}, {"api_name": "device_registry.api_views.DeviceEnrollView", "line_number": 26, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 26, "usage_type": "name"}, {"api_name": "django.conf.settings.IS_MTLS_API", "line_number": 30, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 30, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 32, "usage_type": "call"}, {"api_name": "device_registry.api_views.MtlsRenewCertView.as_view", "line_number": 33, "usage_type": "call"}, {"api_name": "device_registry.api_views.MtlsRenewCertView", "line_number": 33, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 33, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 36, "usage_type": "call"}, {"api_name": "device_registry.api_views.MtlsRenewCertView.as_view", "line_number": 37, "usage_type": "call"}, {"api_name": "device_registry.api_views.MtlsRenewCertView", "line_number": 37, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 37, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 39, "usage_type": "call"}, {"api_name": "device_registry.api_views.MtlsPingView.as_view", "line_number": 39, "usage_type": "call"}, {"api_name": "device_registry.api_views.MtlsPingView", "line_number": 39, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 39, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 40, "usage_type": "call"}, {"api_name": "device_registry.api_views.MtlsTesterView.as_view", "line_number": 40, "usage_type": "call"}, {"api_name": "device_registry.api_views.MtlsTesterView", "line_number": 40, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 40, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 41, "usage_type": "call"}, {"api_name": "device_registry.api_views.ActionView.as_view", "line_number": 41, "usage_type": "call"}, {"api_name": "device_registry.api_views.ActionView", "line_number": 41, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 41, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 43, "usage_type": "call"}, {"api_name": "device_registry.api_views.IsDeviceClaimedView.as_view", "line_number": 43, "usage_type": "call"}, {"api_name": "device_registry.api_views.IsDeviceClaimedView", "line_number": 43, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 43, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 45, "usage_type": "call"}, {"api_name": "device_registry.api_views.MtlsCredsView.as_view", "line_number": 45, "usage_type": "call"}, {"api_name": "device_registry.api_views.MtlsCredsView", "line_number": 45, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 45, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 46, "usage_type": "call"}, {"api_name": "device_registry.api_views.MtlsDeviceMetadataView.as_view", "line_number": 46, "usage_type": "call"}, {"api_name": "device_registry.api_views.MtlsDeviceMetadataView", "line_number": 46, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 46, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 50, "usage_type": "call"}, {"api_name": "device_registry.api_views.MtlsCredsView.as_view", "line_number": 50, "usage_type": "call"}, {"api_name": "device_registry.api_views.MtlsCredsView", "line_number": 50, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 50, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 51, "usage_type": "call"}, {"api_name": "device_registry.api_views.MtlsDeviceMetadataView.as_view", "line_number": 51, "usage_type": "call"}, {"api_name": "device_registry.api_views.MtlsDeviceMetadataView", "line_number": 51, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 51, "usage_type": "name"}, {"api_name": "django.conf.settings.IS_DASH", "line_number": 56, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 56, "usage_type": "name"}, {"api_name": "django.conf.settings.IS_CELERY", "line_number": 56, "usage_type": "attribute"}, {"api_name": "django.urls.path", "line_number": 58, "usage_type": "call"}, {"api_name": "device_registry.views.DashboardView.as_view", "line_number": 58, "usage_type": "call"}, {"api_name": "device_registry.views.DashboardView", "line_number": 58, "usage_type": "attribute"}, {"api_name": "device_registry.views", "line_number": 58, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 59, "usage_type": "call"}, {"api_name": "device_registry.views.RootView.as_view", "line_number": 59, "usage_type": "call"}, {"api_name": "device_registry.views.RootView", "line_number": 59, "usage_type": "attribute"}, {"api_name": "device_registry.views", "line_number": 59, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 60, "usage_type": "call"}, {"api_name": "device_registry.views.GlobalPoliciesListView.as_view", "line_number": 60, "usage_type": "call"}, {"api_name": "device_registry.views.GlobalPoliciesListView", "line_number": 60, "usage_type": "attribute"}, {"api_name": "device_registry.views", "line_number": 60, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 61, "usage_type": "call"}, {"api_name": "device_registry.views.GlobalPolicyCreateView.as_view", "line_number": 61, "usage_type": "call"}, {"api_name": "device_registry.views.GlobalPolicyCreateView", "line_number": 61, "usage_type": "attribute"}, {"api_name": "device_registry.views", "line_number": 61, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 62, "usage_type": "call"}, {"api_name": "device_registry.views.GlobalPolicyEditView.as_view", "line_number": 62, "usage_type": "call"}, {"api_name": "device_registry.views.GlobalPolicyEditView", "line_number": 62, "usage_type": "attribute"}, {"api_name": "device_registry.views", "line_number": 62, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 63, "usage_type": "call"}, {"api_name": "device_registry.views.GlobalPolicyDeleteView.as_view", "line_number": 63, "usage_type": "call"}, {"api_name": "device_registry.views.GlobalPolicyDeleteView", "line_number": 63, "usage_type": "attribute"}, {"api_name": "device_registry.views", "line_number": 63, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 64, "usage_type": "call"}, {"api_name": "device_registry.views.claim_device_view", "line_number": 65, "usage_type": "attribute"}, {"api_name": "device_registry.views", "line_number": 65, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 67, "usage_type": "call"}, {"api_name": "device_registry.views.DeviceDetailView.as_view", "line_number": 69, "usage_type": "call"}, {"api_name": "device_registry.views.DeviceDetailView", "line_number": 69, "usage_type": "attribute"}, {"api_name": "device_registry.views", "line_number": 69, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 72, "usage_type": "call"}, {"api_name": "device_registry.views.DeviceDetailSoftwareView.as_view", "line_number": 74, "usage_type": "call"}, {"api_name": "device_registry.views.DeviceDetailSoftwareView", "line_number": 74, "usage_type": "attribute"}, {"api_name": "device_registry.views", "line_number": 74, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 77, "usage_type": "call"}, {"api_name": "device_registry.views.DeviceDetailSecurityView.as_view", "line_number": 77, "usage_type": "call"}, {"api_name": "device_registry.views.DeviceDetailSecurityView", "line_number": 77, "usage_type": "attribute"}, {"api_name": "device_registry.views", "line_number": 77, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 79, "usage_type": "call"}, {"api_name": "device_registry.views.GlobalPolicyCreateView.as_view", "line_number": 79, "usage_type": "call"}, {"api_name": "device_registry.views.GlobalPolicyCreateView", "line_number": 79, "usage_type": "attribute"}, {"api_name": "device_registry.views", "line_number": 79, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 81, "usage_type": "call"}, {"api_name": "device_registry.views.DeviceDetailNetworkView.as_view", "line_number": 83, "usage_type": "call"}, {"api_name": "device_registry.views.DeviceDetailNetworkView", "line_number": 83, "usage_type": "attribute"}, {"api_name": "device_registry.views", "line_number": 83, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 86, "usage_type": "call"}, {"api_name": "device_registry.views.DeviceDetailHardwareView.as_view", "line_number": 88, "usage_type": "call"}, {"api_name": "device_registry.views.DeviceDetailHardwareView", "line_number": 88, "usage_type": "attribute"}, {"api_name": "device_registry.views", "line_number": 88, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 91, "usage_type": "call"}, {"api_name": "device_registry.views.DeviceDetailMetadataView.as_view", "line_number": 93, "usage_type": "call"}, {"api_name": "device_registry.views.DeviceDetailMetadataView", "line_number": 93, "usage_type": "attribute"}, {"api_name": "device_registry.views", "line_number": 93, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 96, "usage_type": "call"}, {"api_name": "device_registry.views.CredentialsView.as_view", "line_number": 97, "usage_type": "call"}, {"api_name": "device_registry.views.CredentialsView", "line_number": 97, "usage_type": "attribute"}, {"api_name": "device_registry.views", "line_number": 97, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 99, "usage_type": "call"}, {"api_name": "device_registry.api_views.CredentialsView.as_view", "line_number": 99, "usage_type": "call"}, {"api_name": "device_registry.api_views.CredentialsView", "line_number": 99, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 99, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 100, "usage_type": "call"}, {"api_name": "device_registry.api_views.DeleteCredentialView.as_view", "line_number": 100, "usage_type": "call"}, {"api_name": "device_registry.api_views.DeleteCredentialView", "line_number": 100, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 100, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 102, "usage_type": "call"}, {"api_name": "device_registry.api_views.UpdateCredentialView.as_view", "line_number": 102, "usage_type": "call"}, {"api_name": "device_registry.api_views.UpdateCredentialView", "line_number": 102, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 102, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 104, "usage_type": "call"}, {"api_name": "device_registry.api_views.CreateCredentialView.as_view", "line_number": 104, "usage_type": "call"}, {"api_name": "device_registry.api_views.CreateCredentialView", "line_number": 104, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 104, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 105, "usage_type": "call"}, {"api_name": "device_registry.api_views.PolicyDeviceNumberView.as_view", "line_number": 105, "usage_type": "call"}, {"api_name": "device_registry.api_views.PolicyDeviceNumberView", "line_number": 105, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 105, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 107, "usage_type": "call"}, {"api_name": "device_registry.views.RecommendedActionsView.as_view", "line_number": 107, "usage_type": "call"}, {"api_name": "device_registry.views.RecommendedActionsView", "line_number": 107, "usage_type": "attribute"}, {"api_name": "device_registry.views", "line_number": 107, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 108, "usage_type": "call"}, {"api_name": "device_registry.views.RecommendedActionsView.as_view", "line_number": 108, "usage_type": "call"}, {"api_name": "device_registry.views.RecommendedActionsView", "line_number": 108, "usage_type": "attribute"}, {"api_name": "device_registry.views", "line_number": 108, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 109, "usage_type": "call"}, {"api_name": "device_registry.views.CVEView.as_view", "line_number": 109, "usage_type": "call"}, {"api_name": "device_registry.views.CVEView", "line_number": 109, "usage_type": "attribute"}, {"api_name": "device_registry.views", "line_number": 109, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 110, "usage_type": "call"}, {"api_name": "device_registry.views.CVEView.as_view", "line_number": 110, "usage_type": "call"}, {"api_name": "device_registry.views.CVEView", "line_number": 110, "usage_type": "attribute"}, {"api_name": "device_registry.views", "line_number": 110, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 111, "usage_type": "call"}, {"api_name": "device_registry.api_views.autocomplete_tags", "line_number": 111, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 111, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 112, "usage_type": "call"}, {"api_name": "device_registry.views.PairingKeysView.as_view", "line_number": 113, "usage_type": "call"}, {"api_name": "device_registry.views.PairingKeysView", "line_number": 113, "usage_type": "attribute"}, {"api_name": "device_registry.views", "line_number": 113, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 115, "usage_type": "call"}, {"api_name": "device_registry.views.PairingKeySaveFileView.as_view", "line_number": 116, "usage_type": "call"}, {"api_name": "device_registry.views.PairingKeySaveFileView", "line_number": 116, "usage_type": "attribute"}, {"api_name": "device_registry.views", "line_number": 116, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 118, "usage_type": "call"}, {"api_name": "device_registry.api_views.PairingKeyListView.as_view", "line_number": 118, "usage_type": "call"}, {"api_name": "device_registry.api_views.PairingKeyListView", "line_number": 118, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 118, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 119, "usage_type": "call"}, {"api_name": "device_registry.api_views.CreatePairingKeyView.as_view", "line_number": 119, "usage_type": "call"}, {"api_name": "device_registry.api_views.CreatePairingKeyView", "line_number": 119, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 119, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 120, "usage_type": "call"}, {"api_name": "device_registry.api_views.DeletePairingKeyView.as_view", "line_number": 122, "usage_type": "call"}, {"api_name": "device_registry.api_views.DeletePairingKeyView", "line_number": 122, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 122, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 125, "usage_type": "call"}, {"api_name": "device_registry.api_views.UpdatePairingKeyView.as_view", "line_number": 127, "usage_type": "call"}, {"api_name": "device_registry.api_views.UpdatePairingKeyView", "line_number": 127, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 127, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 130, "usage_type": "call"}, {"api_name": "device_registry.api_views.InstallInstructionKeyView.as_view", "line_number": 130, "usage_type": "call"}, {"api_name": "device_registry.api_views.InstallInstructionKeyView", "line_number": 130, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 130, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 132, "usage_type": "call"}, {"api_name": "device_registry.api_views.DeviceCertView.as_view", "line_number": 132, "usage_type": "call"}, {"api_name": "device_registry.api_views.DeviceCertView", "line_number": 132, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 132, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 133, "usage_type": "call"}, {"api_name": "device_registry.api_views.GetBatchActionsView.as_view", "line_number": 133, "usage_type": "call"}, {"api_name": "device_registry.api_views.GetBatchActionsView", "line_number": 133, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 133, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 134, "usage_type": "call"}, {"api_name": "device_registry.api_views.BatchUpdateTagsView.as_view", "line_number": 134, "usage_type": "call"}, {"api_name": "device_registry.api_views.BatchUpdateTagsView", "line_number": 134, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 134, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 135, "usage_type": "call"}, {"api_name": "device_registry.api_views.DeviceListAjaxView.as_view", "line_number": 135, "usage_type": "call"}, {"api_name": "device_registry.api_views.DeviceListAjaxView", "line_number": 135, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 135, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 136, "usage_type": "call"}, {"api_name": "device_registry.api_views.SnoozeActionView.as_view", "line_number": 136, "usage_type": "call"}, {"api_name": "device_registry.api_views.SnoozeActionView", "line_number": 136, "usage_type": "attribute"}, {"api_name": "device_registry.api_views", "line_number": 136, "usage_type": "name"}]} +{"seq_id": "204117987", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom aiida.cmdline.verdilib import setup as _setup\nfrom aiida.cmdline.verdilib import Profile\n\nfrom ._chainmap import ChainMap\n\ndef run_setup(**kwargs):\n defaults = {\n 'backend': 'django',\n 'email': 'aiida@localhost',\n 'first_name': 'Test',\n 'last_name': 'User',\n 'institution': 'Test Lab',\n 'non_interactive': True,\n 'only_config': False,\n 'db_host': 'localhost'\n }\n\n _setup(**ChainMap(kwargs, defaults))\n Profile().profile_setdefault(kwargs['profile'])\n", "sub_path": "aiida_pytest/_setup.py", "file_name": "_setup.py", "file_ext": "py", "file_size_in_byte": 570, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "aiida.cmdline.verdilib.setup", "line_number": 21, "usage_type": "call"}, {"api_name": "_chainmap.ChainMap", "line_number": 21, "usage_type": "call"}, {"api_name": "aiida.cmdline.verdilib.Profile", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "114728923", "text": "from terminaltables import AsciiTable\nfrom files import RESULT_DIR, JSON_DIR, DATA_DIR\nimport pandas as pd\nimport json\n\nclass JudgeJudy:\n def __init__(self, filename):\n print(\"Evaluating %s\" % filename[:-4])\n self._recs = pd.read_csv(RESULT_DIR + filename)\n self.appid_to_name = json.loads(open(JSON_DIR + 'appid_to_name.json').read())\n self.user_history = pd.read_csv(DATA_DIR + 'opens.dat', low_memory=False, delimiter=\"|\")\n self.random_visits = []\n\n def get_item_list(self, guid):\n df = self.user_history\n df = df[df['player_guid']==guid]\n if df.empty:\n return None\n res = []\n for i in df.index:\n app_name = df['app_name'][i]\n num_opens = df['average_open'][i]\n res.append((app_name, num_opens))\n res = sorted(res, key=lambda x: x[1], reverse=True)\n return res\n\n def get_recs(self, guid):\n recs = self._recs\n recs = recs[recs['player_guid']==guid].reset_index()\n if recs.empty:\n return None\n res = []\n for i in recs.index:\n rec = str(recs['app_id'][i])\n score = float(recs['score'][i])\n res.append((self.appid_to_name[rec],score))\n return res\n\n def get_random_user(self):\n while len(self.random_visits) != (len(self._recs) / 5):\n new_user = self._recs['player_guid'].sample(n=1).values[0]\n if new_user not in self.random_visits:\n self.random_visits.append(new_user)\n return new_user\n raise Exception(\"Out of guids\")\n\nif __name__==\"__main__\":\n evaluator = JudgeJudy('als_res.csv')\n while True:\n print(\"Enter player guid:\")\n guid = raw_input()\n if guid not in set(evaluator.user_history['player_guid']):\n guid = evaluator.get_random_user()\n print(guid)\n items = evaluator.get_item_list(guid)\n recs = evaluator.get_recs(guid)\n table_data = [(\"Played Games (# opens)\", \"Recommended (score)\")]\n item_text = '\\n'.join([\"{} ({:.2f})\".format(i[0],i[1]) for i in items])\n item_text = 'No games played' if not item_text else item_text\n rec_text = '\\n'.join([\"{} ({:.4f})\".format(r[0],r[1]) for r in recs])\n table_data.append((item_text, rec_text))\n table_instance = AsciiTable(tuple(table_data))\n print(table_instance.table)\n", "sub_path": "evaluate.py", "file_name": "evaluate.py", "file_ext": "py", "file_size_in_byte": 2422, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pandas.read_csv", "line_number": 9, "usage_type": "call"}, {"api_name": "files.RESULT_DIR", "line_number": 9, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 10, "usage_type": "call"}, {"api_name": "files.JSON_DIR", "line_number": 10, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 11, "usage_type": "call"}, {"api_name": "files.DATA_DIR", "line_number": 11, "usage_type": "name"}, {"api_name": "terminaltables.AsciiTable", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "318041059", "text": "# -*- coding: utf-8 -*-\n\nimport logging\nimport os\nimport sys\nif sys.version_info < (2, 6, 0):\n from downloader2 import Downloader\nelse:\n from downloader import Downloader\nfrom extractor import Extractor\n\nclass OnlineUpdater:\n\n def __init__(self, url, unpackDir, recipeFile, downloadFile):\n self.url = url\n self.unpackDir = unpackDir\n self.recipeFile = recipeFile\n self.downloadFile = downloadFile\n\n def update(self):\n result = self.__download()\n if result == Downloader.RESULT_DOWNLOADED:\n self.__unpack()\n os.remove(self.downloadFile)\n return result\n\n def __download(self):\n downloader = Downloader(self.url, self.downloadFile, \\\n OnlineUpdater.__getModifiedTime(self.recipeFile))\n try:\n retval = downloader.download()\n finally:\n downloader.close()\n return retval\n\n def __unpack(self):\n extractor = Extractor(self.downloadFile, self.unpackDir,\n self.recipeFile)\n extractor.extractAll()\n\n @staticmethod\n def __getModifiedTime(path):\n if os.path.exists(path):\n return os.path.getmtime(path)\n else:\n return 0\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)\n if len(sys.argv) < 2:\n url = 'http://files.kaoriya.net/vim/vim73-kaoriya-win64.zip'\n else:\n url = 'http://files.kaoriya.net/vim/vim73-kaoriya-win64-%s.zip' \\\n % sys.argv[1]\n updater = OnlineUpdater(url, 'var/vim73', \\\n 'var/recipe.txt', 'var/vim73.zip')\n updater.update()\n", "sub_path": "updater.py", "file_name": "updater.py", "file_ext": "py", "file_size_in_byte": 1630, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "sys.version_info", "line_number": 6, "usage_type": "attribute"}, {"api_name": "downloader.Downloader.RESULT_DOWNLOADED", "line_number": 22, "usage_type": "attribute"}, {"api_name": "downloader.Downloader", "line_number": 22, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 24, "usage_type": "call"}, {"api_name": "downloader.Downloader", "line_number": 28, "usage_type": "call"}, {"api_name": "downloader.download", "line_number": 31, "usage_type": "call"}, {"api_name": "downloader.close", "line_number": 33, "usage_type": "call"}, {"api_name": "extractor.Extractor", "line_number": 37, "usage_type": "call"}, {"api_name": "extractor.extractAll", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.getmtime", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 49, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 49, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 50, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 54, "usage_type": "attribute"}]} +{"seq_id": "383846934", "text": "from keras.activations import relu, softmax\nfrom keras.models import Input, Model\nfrom keras.layers import Dense, BatchNormalization, Conv2D, Activation, \\\nDropout, Add, MaxPooling2D, AveragePooling2D, Flatten, Concatenate\nfrom keras import backend as K\nimport numpy as np\n\ndef get_bottom(bottom_name, layers, debug_layers=None):\n \n for key in layers.keys():\n if bottom_name == key:\n bottom = layers[key]\n \n debug_layers['layer_connect'][key] = debug_layers['layer_connect'].get(key, 0) + 1\n\n return bottom\n\n if bottom_name=='data':\n if K.backend()=='tensorflow':\n input_dim = (None, None, 3)\n else:\n input_dim = (3, None, None)\n layer = Input(input_dim, name='data')\n layers['data'] = layer\n return layer\n \n print('KEY ERROR:please load bottom layer: {} before this layer'.format(bottom_name)) \n\ndef get_layer(layer_params, layers, verbose=False, debug_layers=None): \n try: \n layer_name = layer_params['name'] \n layer_type = layer_params['type'] \n\n if layer_name not in debug_layers.keys():\n debug_layers['layer_connect'][layer_name] = 0\n if verbose:\n print('loading: ', layer_name)\n # print('layer params:\\n', layer_params)\n except:\n if verbose:\n print('loading failed')\n print('\\nlayer params:\\n', layer_params)\n return None\n\n#Input layer\n if layer_type == 'input':\n try:\n input_dim = np.array(layer_params['input_dim'][1:])\n if K.backend()=='tensorflow':\n input_dim = tuple(input_dim[[1, 2, 0]])\n except:\n if K.backend()=='tensorflow':\n input_dim = (None, None, 3)\n else:\n input_dim = (3, None, None)\n layer = Input(input_dim, name='data')\n layers[layer_name] = layer\n\n return layer \n \n#Convolution layer\n elif layer_type == 'convolution':\n bottom_name = layer_params['bottom'][0]\n bottom = get_bottom(bottom_name, layers, debug_layers)\n # print(bottom_name,' \\n', bottom)\n kernel_size = layer_params['kernel_size']\n num_output = layer_params['num_output']\n pad = layer_params.get('pad', 0)\n stride = layer_params.get('stride', 1)\n use_bias = layer_params.get('bias_term', True)\n padding = 'same' if pad==(kernel_size-1)/2 else 'valid'\n layer = Conv2D(num_output, kernel_size, strides=(stride, stride), padding=padding, name=layer_name,\n use_bias=use_bias)(bottom)\n layers[layer_name] = layer\n \n return layer\n \n#Dense layer\n elif layer_type == 'innerproduct':\n bottom_name = layer_params['bottom'][0]\n bottom = get_bottom(bottom_name, layers, debug_layers)\n num_output = layer_params['num_output']\n bottom_dim = bottom.shape.as_list()\n if len(bottom_dim)!=2:\n bottom = Flatten()(bottom)\n layer = Dense(num_output, name=layer_name)(bottom)\n layers[layer_name] = layer\n return layer\n \n#Batch_norm layer\n elif layer_type == 'batchnorm':\n bottom_name = layer_params['bottom'][0]\n bottom = get_bottom(bottom_name, layers, debug_layers)\n# num_output = layer_params['num_output']\n layer = BatchNormalization(name=layer_name)(bottom)\n layers[bottom_name] = layer\n return layer\n \n#Pool layer\n elif layer_type == 'pooling':\n bottom_name = layer_params['bottom'][0]\n pool = layer_params['pool']\n stride = layer_params.get('stride', 2)\n kernel_size = layer_params.get('kernel_size', 2)\n bottom = get_bottom(bottom_name, layers, debug_layers)\n if pool=='max':\n layer = MaxPooling2D(pool_size=(kernel_size,kernel_size), strides=(stride,stride),\n name=layer_name)(bottom)\n elif pool=='ave':\n layer = AveragePooling2D(pool_size=(kernel_size,kernel_size), strides=(stride,stride),\n name=layer_name)(bottom)\n layers[layer_name] = layer\n return layer\n \n#Softmax layer\n elif 'softmax' in layer_type:\n bottom_name = layer_params['bottom'][0]\n bottom = get_bottom(bottom_name, layers, debug_layers)\n layer = Activation(softmax, name=layer_name)(bottom)\n# layers[layer_name] = layer\n layers[bottom_name] = layer\n return layer\n \n elif layer_type == 'relu':\n bottom_name = layer_params['bottom'][0]\n bottom = get_bottom(bottom_name, layers, debug_layers)\n layer = Activation(relu, name=layer_name)(bottom)\n# layers[layer_name] = layer\n layers[bottom_name] = layer\n return layer\n\n#Dropout layer\n elif layer_type == 'dropout':\n bottom_name = layer_params['bottom'][0]\n bottom = get_bottom(bottom_name, layers, debug_layers)\n dropout = 1 - float(layer_params['dropout_ratio'])\n layer = Dropout(dropout, name=layer_name)(bottom)\n# layers[layer_name] = layer\n layers[bottom_name] = layer\n return layer\n \n#Add layer\n elif layer_type == 'eltwise':\n bottom_name1 = layer_params['bottom'][0]\n bottom_name2 = layer_params['bottom'][1]\n bottom1 = get_bottom(bottom_name1, layers, debug_layers)\n bottom2 = get_bottom(bottom_name2, layers, debug_layers)\n \n layer = Add(name=layer_name)([bottom1, bottom2])\n layers[layer_name] = layer\n return layer\n \n#Concat layer\n elif layer_type == 'concat':\n bottom_name1 = layer_params['bottom'][0]\n bottom_name2 = layer_params['bottom'][1]\n bottom1 = get_bottom(bottom_name1, layers, debug_layers)\n bottom2 = get_bottom(bottom_name2, layers, debug_layers)\n \n layer = Concatenate(name=layer_name)([bottom1, bottom2])\n layers[layer_name] = layer\n return layer\n\n#Ignore data layer\n elif layer_type == 'data':\n return None\n\n else:\n debug_layers['skip_layer'].append(layer_name)\n if verbose:\n print(\"skipped: \\\"{}\\\" layer, please check the model later, may be it's not right builded\".format(layer_type))", "sub_path": "get_layer.py", "file_name": "get_layer.py", "file_ext": "py", "file_size_in_byte": 6277, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "keras.backend.backend", "line_number": 19, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 19, "usage_type": "name"}, {"api_name": "keras.models.Input", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 49, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 49, "usage_type": "name"}, {"api_name": "keras.backend.backend", "line_number": 52, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 52, "usage_type": "name"}, {"api_name": "keras.models.Input", "line_number": 56, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 72, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 85, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 86, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 95, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 107, "usage_type": "call"}, {"api_name": "keras.layers.AveragePooling2D", "line_number": 110, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 119, "usage_type": "call"}, {"api_name": "keras.activations.softmax", "line_number": 119, "usage_type": "argument"}, {"api_name": "keras.layers.Activation", "line_number": 127, "usage_type": "call"}, {"api_name": "keras.activations.relu", "line_number": 127, "usage_type": "argument"}, {"api_name": "keras.layers.Dropout", "line_number": 137, "usage_type": "call"}, {"api_name": "keras.layers.Add", "line_number": 149, "usage_type": "call"}, {"api_name": "keras.layers.Concatenate", "line_number": 160, "usage_type": "call"}]} +{"seq_id": "423815179", "text": "'''Author: Ben Steel\nDate: 16/03/19'''\n\nimport matplotlib.pyplot as plt\nimport pickle\n\nif __name__ == '__main__':\n \n save_path = 'adamGD_SoftmaxCross_4Gamma_bias1b_CoherentNet3232_heartDataset_NLdata_try0'\n\n params, cost, cost_val, nl1_p, nl2_p, final_layer = pickle.load(open(save_path, 'rb'))\n\n [nl1_r5, nl1_r25, nl1_r50, nl1_r75, nl1_r95, nl1_i5, nl1_i25, nl1_i50, nl1_i75, nl1_i95] = nl1_p\n [nl2_r5, nl2_r25, nl2_r50, nl2_r75, nl2_r95, nl2_i5, nl2_i25, nl2_i50, nl2_i75, nl2_i95] = nl2_p\n\n # legend = [\"Layer 1\", \"Layer 2\", \"Layer 3\", \"Layer 4\"]\n legend = [\"Layer 1\", \"Layer 2\"]\n\n n = len(nl1_r50)\n sub = n // 40\n offset = sub // 2\n \n r_med1 = [nl1_r50[i] for i in range(0, n, sub)]\n r_med2 = [nl2_r50[i] for i in range(offset*1, n, sub)]\n # med3 = [nl3_q50[i] for i in range(offset*2, n, sub)]\n # med4 = [nl4_q50[i] for i in range(15, n, sub)]\n\n i_med1 = [nl1_i50[i] for i in range(0, n, sub)]\n i_med2 = [nl2_i50[i] for i in range(offset*1, n, sub)]\n\n r_q1 = [[(nl1_r50[i] - nl1_r25[i]) for i in range(0, n, sub)], [(nl1_r75[i] - nl1_r50[i]) for i in range(0, n, sub)]]\n r_q2 = [[(nl2_r50[i] - nl2_r25[i]) for i in range(offset*1, n, sub)], [(nl2_r75[i] - nl2_r50[i]) for i in range(offset*1, n, sub)]]\n # q3 = [[(nl3_q50[i] - nl3_q25[i]) for i in range(offset*2, n, sub)], [(nl3_q75[i] - nl3_q50[i]) for i in range(offset*2, n, sub)]]\n # q4 = [[(nl4_q50[i] - nl4_q25[i]) for i in range(15, n, sub)], [(nl4_q75[i] - nl4_q50[i]) for i in range(15, n, sub)]]\n\n i_q1 = [[(nl1_i50[i] - nl1_i25[i]) for i in range(0, n, sub)], [(nl1_i75[i] - nl1_i50[i]) for i in range(0, n, sub)]]\n i_q2 = [[(nl2_i50[i] - nl2_i25[i]) for i in range(offset*1, n, sub)], [(nl2_i75[i] - nl2_i50[i]) for i in range(offset*1, n, sub)]]\n\n r_outlower1 = [nl1_r5[i] for i in range(0, n, sub)]\n r_outlower2 = [nl2_r5[i] for i in range(offset*1, n, sub)]\n # outlower3 = [nl3_q5[i] for i in range(offset*2, n, sub)] \n # outlower4 = [nl4_q5[i] for i in range(15, n, sub)]\n \n i_outlower1 = [nl1_i5[i] for i in range(0, n, sub)]\n i_outlower2 = [nl2_i5[i] for i in range(offset*1, n, sub)] \n\n r_outupper1 = [nl1_r95[i] for i in range(0, n, sub)]\n r_outupper2 = [nl2_r95[i] for i in range(offset*1, n, sub)]\n # outupper3 = [nl3_q95[i] for i in range(offset*2, n, sub)] \n # outupper4 = [nl4_q95[i] for i in range(15, n, sub)]\n \n i_outupper1 = [nl1_i95[i] for i in range(0, n, sub)]\n i_outupper2 = [nl2_i95[i] for i in range(offset*1, n, sub)] \n\n batches1 = [x for x in range(0, n, sub)]\n batches2 = [x for x in range(offset*1, n, sub)]\n # batches3 = [x for x in range(offset*2, n, sub)]\n # batches4 = [x for x in range(15, n, sub)]\n\n fig, axes = plt.subplots(nrows=1, ncols=2)\n\n axes[0].errorbar(batches1, r_med1, yerr=r_q1, color='b')\n axes[0].errorbar(batches2, r_med2, yerr=r_q2, color='g')\n # plt.errorbar(batches3, med3, yerr=q3, color='r')\n # plt.errorbar(batches4, med4, yerr=q4, color='c')\n\n axes[0].scatter(batches1, r_outlower1, color='b')\n axes[0].scatter(batches2, r_outlower2, color='g')\n # plt.scatter(batches3, outlower3, color='r')\n # plt.scatter(batches4, outlower4, color='c')\n\n axes[0].scatter(batches1, r_outupper1, color='b')\n axes[0].scatter(batches2, r_outupper2, color='g')\n # plt.scatter(batches3, outupper3, color='r')\n # plt.scatter(batches4, outupper4, color='c')\n\n axes[0].set_xlabel('Epochs')\n axes[0].set_ylabel('Re(f(x))')\n axes[0].legend(legend, loc='upper right')\n\n axes[1].errorbar(batches1, i_med1, yerr=i_q1, color='b')\n axes[1].errorbar(batches2, i_med2, yerr=i_q2, color='g')\n\n axes[1].scatter(batches1, i_outlower1, color='b')\n axes[1].scatter(batches2, i_outlower2, color='g')\n\n axes[1].scatter(batches1, i_outupper1, color='b')\n axes[1].scatter(batches2, i_outupper2, color='g')\n\n axes[1].set_xlabel('Epochs')\n axes[1].set_ylabel('Im(f(x))')\n # axes[1].legend(legend, loc='upper right')\n\n plt.show()", "sub_path": "CoherentNet/lorentzian_by_layer.py", "file_name": "lorentzian_by_layer.py", "file_ext": "py", "file_size_in_byte": 4010, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pickle.load", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}]} +{"seq_id": "468895391", "text": "\"\"\"\nThis script finds a palminized model with given arguments then finetune it.\n\nUsage:\n script.py --input-dir path [-h] [-v|-vv] --walltime int [--tb] (--mnist|--svhn|--cifar10|--cifar100|--test-data) [--mnist-lenet|--test-model|--cifar10-vgg19|--cifar100-vgg19|--svhn-vgg19] --sparsity-factor=int [--nb-iteration-palm=int] [--delta-threshold=float] [--hierarchical] [--nb-factor=int]\n\nOptions:\n -h --help Show this screen.\n -vv Set verbosity to debug.\n -v Set verbosity to info.\n --input-dir path Path to input directory where to find previously generated results.\n --walltime int The number of hour before training is stopped.\n --tb Tell if tensorboard should be printed.\n\nDataset:\n --mnist Use Mnist dataset.\n --svhn Use svhn dataset.\n --cifar10 Use cifar10 dataset.\n --cifar100 Use cifar100 dataset.\n --test-data Use test datasset (that is actually mnist).\n\nModel:\n --mnist-lenet Use model lenet pretrained for mnist.\n --test-model Use test, small, model.\n --cifar10-vgg19 Use model vgg19 pretrained on cifar10.\n --cifar100-vgg19 Use model vgg19 pretrained on cifar100.\n --svhn-vgg19 Use model vgg19 pretrained on svhn.\n\n\nPalm-Specifc options:\n --sparsity-factor=int Integer coefficient from which is computed the number of value in each factor.\n --nb-iteration-palm=int Number of iterations in the inner palm4msa calls. [default: 300]\n --delta-threshold=float Threshold value before stopping palm iterations. [default: 1e-6]\n --hierarchical Tells if palm should use the hierarchical euristic or not. Muhc longer but better approximation results.\n --nb-factor=int Tells the number of sparse factor for palm\n\"\"\"\nimport logging\nimport os\nimport pickle\nimport pandas as pd\nimport sys\nfrom collections import defaultdict\n\nimport time\nfrom copy import deepcopy\nimport keras\nfrom keras.engine import Model, InputLayer\nimport signal\nimport docopt\nfrom scipy.sparse import coo_matrix\n\nfrom palmnet.core.palminizer import Palminizer\nfrom palmnet.core.palminizable import Palminizable\nfrom palmnet.data import Mnist, Test, Svhn, Cifar100, Cifar10\nfrom palmnet.layers.sparse_facto_sparse_tensor_deprecated import SparseFactorisationDense, SparseFactorisationConv2D\nfrom palmnet.utils import get_sparsity_pattern, insert_layer_nonseq, timeout_signal_handler\nfrom palmnet.experiments.utils import ParameterManagerPalminize, ParameterManagerPalminizeFinetune, ResultPrinter\nfrom skluc.utils import logger, log_memory_usage\nfrom keras.layers import Dense, Conv2D\nimport numpy as np\n\nlst_results_header = [\n \"test_accuracy_finetuned_model\"\n]\n\ndef replace_layers_with_sparse_facto(model, dct_name_facto):\n new_model = deepcopy(model)\n lst_tpl_str_bool_new_model_layers = []\n dct_new_layer_attr = defaultdict(lambda: {})\n for i, layer in enumerate(new_model.layers):\n layer_name = layer.name\n sparse_factorization = dct_name_facto[layer_name]\n logger.debug('Prepare layer {}'.format(layer.name))\n if sparse_factorization != (None, None):\n scaling = sparse_factorization[0]\n factors = [coo_matrix(fac.toarray()) for fac in sparse_factorization[1].get_list_of_factors()]\n sparsity_patterns = [get_sparsity_pattern(w.toarray()) for w in factors]\n factor_data = [f.data for f in factors]\n\n # create new layer\n if isinstance(layer, Dense):\n hidden_layer_dim = layer.units\n activation = layer.activation\n regularizer = layer.kernel_regularizer\n replacing_layer = SparseFactorisationDense(units=hidden_layer_dim, sparsity_patterns=sparsity_patterns, use_bias=layer.use_bias, activation=activation, kernel_regularizer=regularizer)\n replacing_weights = [np.array(scaling)[None]] + factor_data + [layer.get_weights()[-1]] if layer.use_bias else []\n # new_model = insert_layer_nonseq(new_model, layer_name, lambda: replacing_layer, position=\"replace\")\n # replacing_layer.set_weights(replacing_weights)\n\n elif isinstance(layer, Conv2D):\n nb_filters = layer.filters\n kernel_size = layer.kernel_size\n activation = layer.activation\n padding = layer.padding\n regularizer = layer.kernel_regularizer\n replacing_layer = SparseFactorisationConv2D(filters=nb_filters, kernel_size=kernel_size, sparsity_patterns=sparsity_patterns, use_bias=layer.use_bias, activation=activation, padding=padding, kernel_regularizer=regularizer)\n replacing_weights = [np.array(scaling)[None]] + factor_data + [layer.get_weights()[-1]] if layer.use_bias else []\n # new_model = insert_layer_nonseq(new_model, layer_name, lambda: replacing_layer, position=\"replace\")\n # replacing_layer.set_weights(replacing_weights)\n\n else:\n raise ValueError(\"unknown layer class\")\n\n dct_new_layer_attr[layer_name][\"layer_weights\"] = replacing_weights\n dct_new_layer_attr[layer_name][\"layer_obj\"] = replacing_layer\n dct_new_layer_attr[layer_name][\"modified\"] = True\n\n lst_tpl_str_bool_new_model_layers.append((layer_name, True))\n else:\n dct_new_layer_attr[layer_name][\"modified\"] = False\n lst_tpl_str_bool_new_model_layers.append((layer_name, False))\n dct_new_layer_attr[layer_name][\"layer_obj\"] = layer\n\n network_dict = {'input_layers_of': defaultdict(lambda: []), 'new_output_tensor_of': defaultdict(lambda: [])}\n\n if not isinstance(new_model.layers[0], InputLayer):\n new_model = Model(input=new_model.input, output=new_model.output)\n\n # Set the input layers of each layer\n for layer in new_model.layers:\n # each layer is set as `input` layer of all its outbound layers\n for node in layer._outbound_nodes:\n outbound_layer_name = node.outbound_layer.name\n network_dict['input_layers_of'].update({outbound_layer_name: [layer.name]})\n\n # Set the output tensor of the input layer\n network_dict['new_output_tensor_of'].update(\n {new_model.layers[0].name: new_model.input})\n\n for layer in new_model.layers[1:]:\n layer_name = layer.name\n\n layer_input = [network_dict['new_output_tensor_of'][layer_aux]\n for layer_aux in network_dict['input_layers_of'][layer.name]]\n if len(layer_input) == 1:\n layer_input = layer_input[0]\n\n proxy_new_layer_attr = dct_new_layer_attr[layer_name]\n\n if proxy_new_layer_attr[\"modified\"]:\n x = layer_input\n\n new_layer = proxy_new_layer_attr[\"layer_obj\"]\n new_layer.name = '{}_{}'.format(layer.name,\n new_layer.name)\n x = new_layer(x)\n new_layer.set_weights(proxy_new_layer_attr[\"layer_weights\"])\n logger.info('Layer {} modified into {}'.format(layer.name, new_layer.name))\n else:\n x = layer(layer_input)\n logger.debug('Layer {} unmodified'.format(layer.name))\n\n network_dict['new_output_tensor_of'].update({layer.name: x})\n\n new_model = Model(inputs=new_model.inputs, outputs=x)\n\n return new_model\n\ndef main():\n\n if paraman[\"--mnist-lenet\"]:\n param_train_dataset = Mnist.get_model_param_training()\n elif paraman[\"--cifar10-vgg19\"]:\n param_train_dataset = Cifar10.get_model_param_training()\n elif paraman[\"--cifar100-vgg19\"]:\n param_train_dataset = Cifar100.get_model_param_training()\n elif paraman[\"--svhn-vgg19\"]:\n param_train_dataset = Svhn.get_model_param_training()\n elif paraman[\"--test-model\"]:\n param_train_dataset = Test.get_model_param_training()\n else:\n raise NotImplementedError(\"No dataset specified.\")\n\n (x_train, y_train), (x_test, y_test) = paraman.get_dataset()\n\n if os.path.exists(paraman[\"output_file_notfinishedprinter\"]):\n df = pd.read_csv(paraman[\"output_file_resprinter\"])\n init_nb_epoch = len(pd.read_csv(paraman[\"output_file_csvcbprinter\"]))\n base_score = float(df[\"base_score\"])\n before_finetuned_score = float(df[\"before_finetuned_score\"])\n palminized_score = float(df[\"palminized_score\"])\n fine_tuned_model = keras.models.load_model(paraman[\"output_file_modelprinter\"],custom_objects={'SparseFactorisationConv2D':SparseFactorisationConv2D,\n \"SparseFactorisationDense\": SparseFactorisationDense})\n\n else:\n init_nb_epoch = 0\n\n mypalminizedmodel = pickle.load(open(paraman[\"input_model_path\"], \"rb\")) # type: Palminizable\n\n base_model = mypalminizedmodel.base_model\n dct_name_facto = mypalminizedmodel.sparsely_factorized_layers\n base_score = base_model.evaluate(x_test, y_test, verbose=0)[1]\n print(base_score)\n palminized_model = mypalminizedmodel.compressed_model\n palminized_score = palminized_model.evaluate(x_test, y_test, verbose=1)[1]\n print(palminized_score)\n fine_tuned_model = replace_layers_with_sparse_facto(palminized_model, dct_name_facto)\n\n fine_tuned_model.compile(loss=param_train_dataset.loss,\n optimizer=param_train_dataset.optimizer,\n metrics=['categorical_accuracy'])\n\n before_finetuned_score = fine_tuned_model.evaluate(x_test, y_test, verbose=1)[1]\n print(before_finetuned_score)\n\n # results must be already printed once in case process is killed afterward\n dct_results = {\n \"finetuned_score\": None,\n \"before_finetuned_score\": before_finetuned_score,\n \"base_score\": base_score,\n \"palminized_score\": palminized_score,\n }\n resprinter.add(dct_results)\n resprinter.print()\n\n # if paraman[\"--hierarchical\"]:\n assert before_finetuned_score == palminized_score, \\\n \"the reconstructed model with sparse facto should equal in perf to the reconstructed model with dense product. {} != {}\".format(before_finetuned_score, palminized_score)\n # else: # small fix for a bug where when I wasn't using hierarchical palm returned a matrix that wasn't multiplied by lambda\n # # this should pass until results are generated without bug..\n # assert before_finetuned_score != palminized_score, \\\n # \"the reconstructed model with sparse facto should equal in perf to the reconstructed model with dense product. {} != {}\".format(before_finetuned_score, palminized_score)\n fine_tuned_model.summary()\n\n call_backs = []\n\n model_checkpoint_callback = keras.callbacks.ModelCheckpoint(str(paraman[\"output_file_modelprinter\"]), monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1)\n call_backs.append(model_checkpoint_callback)\n if paraman[\"--tb\"]:\n tbCallBack = keras.callbacks.TensorBoard(log_dir=str(paraman[\"output_file_tensorboardprinter\"]), histogram_freq=20, write_graph=False, write_images=False, batch_size=param_train_dataset.batch_size, write_grads=True, update_freq=\"epoch\")\n call_backs.append(tbCallBack)\n csvcallback = keras.callbacks.callbacks.CSVLogger(str(paraman[\"output_file_csvcbprinter\"]), separator=',', append=True)\n call_backs.append(csvcallback)\n\n\n signal.signal(signal.SIGALRM, timeout_signal_handler)\n signal.alarm(int(paraman[\"--walltime\"] * 3600)) # start alarm\n try:\n open(paraman[\"output_file_notfinishedprinter\"], 'w').close()\n history = fine_tuned_model.fit(param_train_dataset.image_data_generator.flow(x_train, y_train, batch_size=param_train_dataset.batch_size),\n epochs=param_train_dataset.epochs - init_nb_epoch,\n # epochs=2 - init_nb_epoch,\n verbose=1,\n validation_data=(x_test, y_test),\n callbacks=param_train_dataset.callbacks + call_backs)\n signal.alarm(0) # stop alarm for next evaluation\n finetuned_score = fine_tuned_model.evaluate(x_test, y_test, verbose=1)[1]\n print(finetuned_score)\n\n if os.path.exists(paraman[\"output_file_notfinishedprinter\"]):\n os.remove(paraman[\"output_file_notfinishedprinter\"])\n # except TimeoutError as te:\n except Exception as e:\n logging.error(\"Caught exception: {}\".format(e))\n finetuned_score = None\n finally:\n dct_results = {\n \"finetuned_score\": finetuned_score,\n \"before_finetuned_score\": before_finetuned_score,\n \"base_score\": base_score,\n \"palminized_score\": palminized_score,\n }\n fine_tuned_model.save(str(paraman[\"output_file_modelprinter\"]))\n resprinter.add(dct_results)\n\n\nif __name__ == \"__main__\":\n logger.info(\"Command line: \" + \" \".join(sys.argv))\n log_memory_usage(\"Memory at startup\")\n arguments = docopt.docopt(__doc__)\n paraman = ParameterManagerPalminizeFinetune(arguments)\n initialized_results = dict((v, None) for v in lst_results_header)\n resprinter = ResultPrinter(output_file=paraman[\"output_file_resprinter\"])\n resprinter.add(initialized_results)\n resprinter.add(paraman)\n if paraman[\"-v\"] >= 2:\n logger.setLevel(level=logging.DEBUG)\n elif paraman[\"-v\"] >= 1:\n logger.setLevel(level=logging.INFO)\n else:\n logger.setLevel(level=logging.WARNING)\n\n logger.warning(\"Verbosity set to warning\")\n logger.info(\"Verbosity set to info\")\n logger.debug(\"Verbosity set to debug\")\n\n if not os.path.exists(paraman[\"output_file_notfinishedprinter\"]) and \\\n os.path.exists(paraman[\"output_file_resprinter\"]) and \\\n os.path.exists(paraman[\"output_file_modelprinter\"]):\n sys.exit(\"Expe {} already executed. Exit\".format(paraman[\"hash\"]))\n\n has_failed = False\n try:\n main()\n except Exception as e:\n has_failed = True\n raise e\n\n finally:\n failure_dict = {\n \"failure\": has_failed\n }\n\n resprinter.add(failure_dict)\n resprinter.print()", "sub_path": "code/scripts/2020/01/1_2_fine_tune_palminized.py", "file_name": "1_2_fine_tune_palminized.py", "file_ext": "py", "file_size_in_byte": 14703, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "copy.deepcopy", "line_number": 67, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 69, "usage_type": "call"}, {"api_name": "skluc.utils.logger.debug", "line_number": 73, "usage_type": "call"}, {"api_name": "skluc.utils.logger", "line_number": 73, "usage_type": "name"}, {"api_name": "scipy.sparse.coo_matrix", "line_number": 76, "usage_type": "call"}, {"api_name": "palmnet.utils.get_sparsity_pattern", "line_number": 77, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 81, "usage_type": "argument"}, {"api_name": "palmnet.layers.sparse_facto_sparse_tensor_deprecated.SparseFactorisationDense", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 86, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 90, "usage_type": "argument"}, {"api_name": "palmnet.layers.sparse_facto_sparse_tensor_deprecated.SparseFactorisationConv2D", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 97, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 114, "usage_type": "call"}, {"api_name": "keras.engine.InputLayer", "line_number": 116, "usage_type": "argument"}, {"api_name": "keras.engine.Model", "line_number": 117, "usage_type": "call"}, {"api_name": "skluc.utils.logger.info", "line_number": 148, "usage_type": "call"}, {"api_name": "skluc.utils.logger", "line_number": 148, "usage_type": "name"}, {"api_name": "skluc.utils.logger.debug", "line_number": 151, "usage_type": "call"}, {"api_name": "skluc.utils.logger", "line_number": 151, "usage_type": "name"}, {"api_name": "keras.engine.Model", "line_number": 155, "usage_type": "call"}, {"api_name": "palmnet.data.Mnist.get_model_param_training", "line_number": 162, "usage_type": "call"}, {"api_name": "palmnet.data.Mnist", "line_number": 162, "usage_type": "name"}, {"api_name": "palmnet.data.Cifar10.get_model_param_training", "line_number": 164, "usage_type": "call"}, {"api_name": "palmnet.data.Cifar10", "line_number": 164, "usage_type": "name"}, {"api_name": "palmnet.data.Cifar100.get_model_param_training", "line_number": 166, "usage_type": "call"}, {"api_name": "palmnet.data.Cifar100", "line_number": 166, "usage_type": "name"}, {"api_name": "palmnet.data.Svhn.get_model_param_training", "line_number": 168, "usage_type": "call"}, {"api_name": "palmnet.data.Svhn", "line_number": 168, "usage_type": "name"}, {"api_name": "palmnet.data.Test.get_model_param_training", "line_number": 170, "usage_type": "call"}, {"api_name": "palmnet.data.Test", "line_number": 170, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path", "line_number": 176, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 177, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 178, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 182, "usage_type": "call"}, {"api_name": "keras.models", "line_number": 182, "usage_type": "attribute"}, {"api_name": "palmnet.layers.sparse_facto_sparse_tensor_deprecated.SparseFactorisationConv2D", "line_number": 182, "usage_type": "name"}, {"api_name": "palmnet.layers.sparse_facto_sparse_tensor_deprecated.SparseFactorisationDense", "line_number": 183, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 188, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 227, "usage_type": "call"}, {"api_name": "keras.callbacks", "line_number": 227, "usage_type": "attribute"}, {"api_name": "keras.callbacks.TensorBoard", "line_number": 230, "usage_type": "call"}, {"api_name": "keras.callbacks", "line_number": 230, "usage_type": "attribute"}, {"api_name": "keras.callbacks.callbacks.CSVLogger", "line_number": 232, "usage_type": "call"}, {"api_name": "keras.callbacks", "line_number": 232, "usage_type": "attribute"}, {"api_name": "signal.signal", "line_number": 236, "usage_type": "call"}, {"api_name": "palmnet.utils.timeout_signal_handler", "line_number": 236, "usage_type": "argument"}, {"api_name": "signal.SIGALRM", "line_number": 236, "usage_type": "attribute"}, {"api_name": "signal.alarm", "line_number": 237, "usage_type": "call"}, {"api_name": "signal.alarm", "line_number": 246, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 250, "usage_type": "call"}, {"api_name": "os.path", "line_number": 250, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 251, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 254, "usage_type": "call"}, {"api_name": "skluc.utils.logger.info", "line_number": 268, "usage_type": "call"}, {"api_name": "skluc.utils.logger", "line_number": 268, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 268, "usage_type": "attribute"}, {"api_name": "skluc.utils.log_memory_usage", "line_number": 269, "usage_type": "call"}, {"api_name": "docopt.docopt", "line_number": 270, "usage_type": "call"}, {"api_name": "palmnet.experiments.utils.ParameterManagerPalminizeFinetune", "line_number": 271, "usage_type": "call"}, {"api_name": "palmnet.experiments.utils.ResultPrinter", "line_number": 273, "usage_type": "call"}, {"api_name": "skluc.utils.logger.setLevel", "line_number": 277, "usage_type": "call"}, {"api_name": "skluc.utils.logger", "line_number": 277, "usage_type": "name"}, {"api_name": "logging.DEBUG", "line_number": 277, "usage_type": "attribute"}, {"api_name": "skluc.utils.logger.setLevel", "line_number": 279, "usage_type": "call"}, {"api_name": "skluc.utils.logger", "line_number": 279, "usage_type": "name"}, {"api_name": "logging.INFO", "line_number": 279, "usage_type": "attribute"}, {"api_name": "skluc.utils.logger.setLevel", "line_number": 281, "usage_type": "call"}, {"api_name": "skluc.utils.logger", "line_number": 281, "usage_type": "name"}, {"api_name": "logging.WARNING", "line_number": 281, "usage_type": "attribute"}, {"api_name": "skluc.utils.logger.warning", "line_number": 283, "usage_type": "call"}, {"api_name": "skluc.utils.logger", "line_number": 283, "usage_type": "name"}, {"api_name": "skluc.utils.logger.info", "line_number": 284, "usage_type": "call"}, {"api_name": "skluc.utils.logger", "line_number": 284, "usage_type": "name"}, {"api_name": "skluc.utils.logger.debug", "line_number": 285, "usage_type": "call"}, {"api_name": "skluc.utils.logger", "line_number": 285, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 287, "usage_type": "call"}, {"api_name": "os.path", "line_number": 287, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 288, "usage_type": "call"}, {"api_name": "os.path", "line_number": 288, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 289, "usage_type": "call"}, {"api_name": "os.path", "line_number": 289, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 290, "usage_type": "call"}]} +{"seq_id": "435988468", "text": "from gi.repository import Gtk\n\n#the answer box\nanswer = Gtk.Entry()\nanswer.set_text(\"0\")\n\n#the first number entry box\nnumber1 = Gtk.Entry()\nnumber1.set_text(\"0\")\n\n#the second number entry box\nnumber2 = Gtk.Entry()\nnumber2.set_text(\"0\")\n\n#labels\nlabel1 = Gtk.Label(\"Number 1: \")\nlabel2= Gtk.Label(\"Number 2: \")\n\n#the two calculating variables\nnum1 = 0\nnum2 = 0\n\n\nclass MainWindow(Gtk.Window):\n def __init__(self):\n Gtk.Window.__init__(self, title = \"Enter text\")\n\n #listbox\n listbox = Gtk.ListBox()\n listbox.set_selection_mode(Gtk.SelectionMode.NONE)\n self.add(listbox)\n\n #buttons\n self.button_add = Gtk.Button(label=\"+\")\n self.button_sub = Gtk.Button(label=\"-\")\n self.button_pro = Gtk.Button(label=\"x\")\n self.button_div = Gtk.Button(label=\"/\")\n\n #row1\n row1 = Gtk.ListBoxRow()\n box1 = Gtk.Box(orientation = Gtk.Orientation.HORIZONTAL,spacing = 50)\n row1.add(box1)\n box1.pack_start(label1, True, True, 0)\n box1.pack_start(number1, True, True, 0)\n listbox.add(row1)\n\n #row2\n row2 = Gtk.ListBoxRow()\n box2 = Gtk.Box(orientation = Gtk.Orientation.HORIZONTAL, spacing = 50)\n row2.add(box2)\n box2.pack_start(label2, True, True, 0)\n box2.pack_start(number2, True, True, 0)\n listbox.add(row2)\n\n #row3\n row3 = Gtk.ListBoxRow()\n box3 = Gtk.Box(orientation = Gtk.Orientation.HORIZONTAL, spacing = 50)\n row3.add(box3)\n box3.pack_start(self.button_add, True, True, 0)\n self.button_add.connect(\"clicked\", self.addition)\n box3.pack_start(self.button_sub, True, True, 0)\n self.button_sub.connect(\"clicked\", self.subtract)\n box3.pack_start(self.button_pro, True, True, 0)\n self.button_pro.connect(\"clicked\", self.multiply)\n box3.pack_start(self.button_div, True, True, 0)\n self.button_div.connect(\"clicked\", self.divide)\n listbox.add(row3)\n\n #row4\n row4 = Gtk.ListBoxRow()\n box4 = Gtk.Box(orientation = Gtk.Orientation.HORIZONTAL, spacing = 50)\n row4.add(box4)\n box4.pack_start(answer, True, True, 0)\n listbox.add(row4)\n\n #when \"+\" button is clicked\n def addition(self, widget):\n #taking values\n num1 = number1.get_text()\n num2 = number2.get_text()\n #operating\n sum = float(num1)+float(num2)\n str1 = str(sum)\n #printing\n answer.set_text(str1)\n\n #when \"-\" button is clicked\n def subtract(self, widget):\n #taking values\n num1 = number1.get_text()\n num2 = number2.get_text()\n #operating\n diff = float(num1)-float(num2)\n str2 = str(diff)\n #printing\n answer.set_text(str2)\n\n #when \"x\" button is clicked\n def multiply(self, widget):\n #taking values\n num1 = number1.get_text()\n num2 = number2.get_text()\n #operating\n product = float(num1)*float(num2)\n str3 = str(product)\n #printing\n answer.set_text(str3)\n\n #when \"/\" button is clicked\n def divide(self, widget):\n #taking values\n num1 = number1.get_text()\n num2 = number2.get_text()\n #operating\n if float(num2)>0 :\n div = float(num1)/float(num2)\n str4 = str(div)\n #printing\n answer.set_text(str4)\n elif float(num2) == 0 :\n answer.set_text(\"Error\")\n\nwindow = MainWindow()\nwindow.connect(\"delete-event\", Gtk.main_quit)\nwindow.show_all()\nGtk.main()\n", "sub_path": "attachments/4751817814573056-Py_Simple_Calculator_Complete.py", "file_name": "4751817814573056-Py_Simple_Calculator_Complete.py", "file_ext": "py", "file_size_in_byte": 3565, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "gi.repository.Gtk.Entry", "line_number": 4, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 4, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Entry", "line_number": 8, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 8, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Entry", "line_number": 12, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 12, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 16, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 16, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 17, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 17, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Window", "line_number": 24, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 24, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Window.__init__", "line_number": 26, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Window", "line_number": 26, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 26, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.ListBox", "line_number": 29, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 29, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.SelectionMode", "line_number": 30, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 30, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 34, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 34, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 35, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 35, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 36, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 36, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 37, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 37, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.ListBoxRow", "line_number": 40, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 40, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Box", "line_number": 41, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 41, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Orientation", "line_number": 41, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.ListBoxRow", "line_number": 48, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 48, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Box", "line_number": 49, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 49, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Orientation", "line_number": 49, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.ListBoxRow", "line_number": 56, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 56, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Box", "line_number": 57, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 57, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Orientation", "line_number": 57, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.ListBoxRow", "line_number": 70, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 70, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Box", "line_number": 71, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 71, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Orientation", "line_number": 71, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.main_quit", "line_number": 124, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 124, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.main", "line_number": 126, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 126, "usage_type": "name"}]} +{"seq_id": "552812183", "text": "\"\"\"empty message\n\nRevision ID: af49a3efdfa4\nRevises: bcac9c071e26\nCreate Date: 2017-12-11 22:24:21.040000\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = 'af49a3efdfa4'\ndown_revision = 'bcac9c071e26'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('categories', sa.Column('father_node', sa.Integer(), nullable=True))\n op.drop_column('categories', 'belong_to')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('categories', sa.Column('belong_to', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))\n op.drop_column('categories', 'father_node')\n # ### end Alembic commands ###\n", "sub_path": "migrations/versions/af49a3efdfa4_.py", "file_name": "af49a3efdfa4_.py", "file_ext": "py", "file_size_in_byte": 878, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op.drop_column", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 22, "usage_type": "name"}, {"api_name": "alembic.op.add_column", "line_number": 28, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 28, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql.INTEGER", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql", "line_number": 28, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 29, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "574564260", "text": "import json\nimport sys\nimport uuid\nimport base64\nimport hashlib\nimport os\nimport shutil\nimport zipfile\nimport views\n\nfrom cgi import parse_header, parse_multipart\n\n\nif sys.version.startswith('3'):\n from http.server import BaseHTTPRequestHandler, HTTPServer\n from urllib.parse import urlparse, parse_qs, unquote\n _py3_ = True\nelse:\n print(\"This program requires Python 3\")\n exit(1)\n\nif not os.path.exists(\"data.db\"):\n os.system(\"python init_db.py\")\nprint(os.getcwd())\n\n##########Globals################ \n\n_DEBUG_ = True\n_FOLDER_PATHS_ = os.getcwd()\n_APP_ROOT_ = \"www\"\n\n\n#########Py3Compat###############\ndef make_bytes(data):\n\n if _py3_ :\n return bytes(data, \"utf-8\")\n return data \n\ndef make_str(data):\n\n if _py3_:\n return data.decode(\"utf-8\")\n return data\n\n#########Functions###############\n\n\n\nclass S(BaseHTTPRequestHandler):\n\n def _set_content_type(self, path):\n\n extension = os.path.basename(path).rsplit(\".\",1)[-1]\n\n if extension == \"css\":\n return \"text/css\"\n elif extension == \"js\":\n return \"application/javascript\"\n elif extension == \"html\":\n return \"text/html\"\n else:\n return \"text/plain\"\n \n\n def _set_headers(self, method, content_disp=None):\n self.send_response(200)\n if method == \"post\":\n self.send_header('Content-type', 'application/json')\n else:\n self.send_header('Content-type', content_disp)\n \n self.end_headers()\n\n def _set_404(self):\n self.send_response(404)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(make_bytes((\"Page Not Found

Error 404:


Page Not Found!

\")))\n \n def _get_args(self):\n ctype, pdict = parse_header(self.headers['content-type'])\n if ctype == 'multipart/form-data':\n postvars = parse_multipart(self.rfile, pdict)\n elif ctype == 'application/x-www-form-urlencoded':\n length = int(self.headers['content-length'])\n postvars = parse_qs(\n make_str(self.rfile.read(length)), \n keep_blank_values=1)\n else:\n postvars = {}\n return postvars\n\n def _parse_path(self, path):\n \n return [x.strip() for x in path.split(\"/\")]\n\n def _parse_request(self, method):\n parsed_path = urlparse(self.path)\n request_id = unquote(parsed_path.path)[1:]\n# path_args = self._parse_path(request_id)\n vars = self._get_args()\n fixed_vars = {}\n for item in vars:\n fixed_vars[item] = vars[item][0]\n\n if request_id.startswith(\"api/\"):\n self._set_headers(method)\n views.router(request_id[4:], self.wfile, method, fixed_vars)\n return\n else:\n self._set_404()\n \n\n def do_GET(self):\n parsed_path = urlparse(self.path)\n request_id = unquote(parsed_path.path)[1:]\n print(request_id)\n if request_id == \"\":\n request_id = \"index.html\"\n if request_id.startswith(\"api/\"):\n self._set_headers(\"get\", self._set_content_type(request_id))\n views.router(request_id[4:], self.wfile, \"get\")\n return\n file_path = os.path.join(_APP_ROOT_, request_id)\n if os.path.exists(file_path):\n self._set_headers(\"get\", self._set_content_type(request_id))\n data = open(file_path, 'r').read()\n self.wfile.write(make_bytes((data)))\n else:\n self._set_404()\n\n def do_POST(self):\n self._parse_request(method=\"post\")\n\n\n def do_HEAD(self):\n self._set_headers(\"head\")\n\ndef run(server_class=HTTPServer, handler_class=S, port=None):\n if port is None:\n port = int(os.environ.get('PORT'))\n server_address = ('', port)\n httpd = server_class(server_address, handler_class)\n print('Starting httpd...')\n httpd.serve_forever()\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) == 2:\n run(port=int(sys.argv[1]))\n else:\n run()", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 4196, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "sys.version.startswith", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.version", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 23, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 24, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 29, "usage_type": "call"}, {"api_name": "http.server.BaseHTTPRequestHandler", "line_number": 50, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "cgi.parse_header", "line_number": 82, "usage_type": "call"}, {"api_name": "cgi.parse_multipart", "line_number": 84, "usage_type": "call"}, {"api_name": "urllib.parse.parse_qs", "line_number": 87, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 99, "usage_type": "call"}, {"api_name": "urllib.parse.unquote", "line_number": 100, "usage_type": "call"}, {"api_name": "views.router", "line_number": 109, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 116, "usage_type": "call"}, {"api_name": "urllib.parse.unquote", "line_number": 117, "usage_type": "call"}, {"api_name": "views.router", "line_number": 123, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 125, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 126, "usage_type": "call"}, {"api_name": "os.path", "line_number": 126, "usage_type": "attribute"}, {"api_name": "http.server.HTTPServer", "line_number": 140, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 142, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 142, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 150, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 151, "usage_type": "attribute"}]} +{"seq_id": "123853229", "text": "# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\nfrom functools import wraps\nimport logging\nimport os\nimport random\n\nimport praw\nimport telegram\nfrom telegram.ext import Updater, CommandHandler\n\n\nlogging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger()\n\nreddit = praw.Reddit(user_agent='Banshee')\n\n\ndef command(f):\n @wraps(f)\n def wrapper(bot, update):\n if (datetime.now() - update.message.date).total_seconds() > 60:\n logger.info('Skipping stale command {}'.format(update.message.text))\n return\n logger.info('Received command {}'.format(update.message.text))\n return f(bot, update)\n return wrapper\n\n\n@command\ndef joke(bot, update):\n reddit.get_subreddit('jokes')\n submissions = reddit.get_subreddit('jokes').get_hot(limit=10)\n submission = random.choice(list(submissions))\n\n text = '[{s.title}]({s.permalink})\\n{s.selftext}'.format(s=submission)\n bot.sendMessage(\n update.message.chat_id,\n parse_mode=telegram.ParseMode.MARKDOWN,\n disable_web_page_preview=True,\n text=text\n )\n\n\n@command\ndef ping(bot, update):\n bot.sendMessage(update.message.chat_id, text='pong 💡')\n\n\ndef handle_error(_, update, error):\n logger.warning('Update \"{}\" caused error \"{}\"'.format(update, error))\n\n\ndef main():\n updater = Updater(token=os.getenv('BANSHEE_TOKEN'))\n dispatcher = updater.dispatcher\n\n dispatcher.addHandler(CommandHandler('joke', joke))\n dispatcher.addHandler(CommandHandler('ping', ping))\n\n dispatcher.addErrorHandler(handle_error)\n\n updater.start_polling(poll_interval=2)\n\n # Run the bot until the you presses Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT. This should be used most of the time, since\n # start_polling() is non-blocking and will stop the bot gracefully.\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "banshee.py", "file_name": "banshee.py", "file_ext": "py", "file_size_in_byte": 1964, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "logging.basicConfig", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 16, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 18, "usage_type": "call"}, {"api_name": "praw.Reddit", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 26, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 24, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 38, "usage_type": "call"}, {"api_name": "telegram.ParseMode", "line_number": 43, "usage_type": "attribute"}, {"api_name": "telegram.ext.Updater", "line_number": 59, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 59, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 62, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "222927845", "text": "from src.domain.entities import Book, Client, Rental\r\nfrom src.repository.repositories import BookRepository, ClientRepository, RentalRepository, BookRepositoryException, ClientRepositoryException, RentalRepositoryException\r\nimport datetime\r\nimport pickle\r\n\r\n\r\nclass BookBinaryFileRepositoryException(BookRepositoryException):\r\n def __init__(self, message):\r\n super().__init__(message)\r\n\r\n\r\nclass ClientBinaryFileRepositoryException(ClientRepositoryException):\r\n def __init__(self, message):\r\n super().__init__(message)\r\n\r\n\r\nclass RentalBinaryFileRepositoryException(RentalRepositoryException):\r\n def __init__(self, message):\r\n super().__init__(message)\r\n\r\n\r\nclass BinaryFileBookRepository(BookRepository):\r\n def __init__(self, file_name = 'books.pickle'):\r\n super(BinaryFileBookRepository, self).__init__()\r\n self._file_name = file_name\r\n self._load_binary()\r\n\r\n def add(self, book):\r\n \"\"\"\r\n Adds a new book to the list of books if the book is not in the list yet\r\n :param book: the book that will be added\r\n \"\"\"\r\n super().add(book)\r\n self._save_binary()\r\n\r\n def remove(self, bookID):\r\n \"\"\"\r\n Removes a book with the given id\r\n :param bookID: the given book id\r\n :return: the book that was removed\r\n \"\"\"\r\n book = super().remove(bookID)\r\n self._save_binary()\r\n return book\r\n\r\n def update(self, book):\r\n \"\"\"\r\n Updates a book with a given id with new values\r\n :param book: the new values for the book with the given id\r\n :return: the old title and the old author of the book\r\n \"\"\"\r\n oldTitle, oldAuthor = super().update(book)\r\n self._save_binary()\r\n return oldTitle, oldAuthor\r\n\r\n def _save_binary(self):\r\n \"\"\"\r\n Writes all the book data in the binary file or raise an exception if an error occurs\r\n \"\"\"\r\n file = open(self._file_name, \"wb\")\r\n try:\r\n for book in range(len(self._bookList)):\r\n line = self._bookList[book]['Book id'] + ';' + self._bookList[book]['Title'] + ';' + self._bookList[book]['Author']\r\n pickle.dump(line, file)\r\n pickle.dump('\\n', file)\r\n file.close()\r\n except Exception as e:\r\n raise BookBinaryFileRepositoryException('An error occured!' + str(e))\r\n\r\n def _load_binary(self):\r\n \"\"\"\r\n Reads all the book data from the binary file or raise an exception if an error occurs\r\n \"\"\"\r\n try:\r\n line = '\\n'\r\n file = open(self._file_name, \"rb\")\r\n while line == '\\n':\r\n line = pickle.load(file)\r\n line = line.split(';')\r\n bookID = line[0]\r\n bookTitle = line[1]\r\n bookAuthor = line[2]\r\n super().add(Book(bookID, bookTitle, bookAuthor))\r\n #super().add(Book(line[0], line[1], line[2]))\r\n line = pickle.load(file) #newline '\\n'\r\n except EOFError: # (file is empty)\r\n return\r\n except Exception as e: # IOError (file does not exist)\r\n raise BookBinaryFileRepositoryException('An error occured!' + str(e))\r\n\r\n\r\nclass BinaryFileClientRepository(ClientRepository):\r\n def __init__(self, file_name='clients.pickle'):\r\n super().__init__()\r\n self._file_name = file_name\r\n self._load_binary()\r\n\r\n def add(self, client):\r\n \"\"\"\r\n Adds a new client to the list of clients if the client is not in the list yet\r\n :param client: the client that will be added\r\n \"\"\"\r\n super().add(client)\r\n self._save_binary()\r\n\r\n def remove(self, clientID):\r\n \"\"\"\r\n Removes a client with the given id\r\n :param clientID: the given client id\r\n :return: the client that was removed\r\n \"\"\"\r\n client = super().remove(clientID)\r\n self._save_binary()\r\n return client\r\n\r\n def update(self, client):\r\n \"\"\"\r\n Updates a client with a given id with new values\r\n :param client: the new values for the client with the given id\r\n :return: the old name of the client\r\n \"\"\"\r\n oldName = super().update(client)\r\n self._save_binary()\r\n return oldName\r\n\r\n def _save_binary(self):\r\n \"\"\"\r\n Writes all the client data in the binary file or raise an exception if an error occurs\r\n \"\"\"\r\n file = open(self._file_name, \"wb\")\r\n try:\r\n for client in range(len(self._clientList)):\r\n line = self._clientList[client]['Client id'] + ';' + self._clientList[client]['Name']\r\n pickle.dump(line, file)\r\n pickle.dump('\\n', file)\r\n file.close()\r\n except Exception as e:\r\n raise ClientBinaryFileRepositoryException('An error occured!' + str(e))\r\n\r\n def _load_binary(self): #save and load binary could have been simpler if i hadn't used a list of dictionaries\r\n \"\"\"\r\n Reads all the client data from the binary file or raise an exception if an error occurs\r\n \"\"\"\r\n try:\r\n line = '\\n'\r\n file = open(self._file_name, \"rb\")\r\n while line == '\\n':\r\n line = pickle.load(file)\r\n line = line.split(';')\r\n super().add(Client(line[0], line[1]))\r\n line = pickle.load(file) #newline '\\n', after every read line we will read '\\n' and we need to jump over it\r\n except EOFError: # (file is empty)\r\n return\r\n except Exception as e: # IOError (file does not exist)\r\n raise ClientBinaryFileRepositoryException('An error occured!' + str(e))\r\n\r\n\r\nclass BinaryFileRentalRepository(RentalRepository):\r\n def __init__(self, bookList, clientList, file_name='rentals.pickle'):\r\n super().__init__(bookList, clientList)\r\n self._file_name = file_name\r\n self._bookList = bookList\r\n self._clientList = clientList\r\n self._load_binary()\r\n\r\n def add(self, rental):\r\n \"\"\"\r\n Adds a new rental to the list of rentals if the rental is not in the list yet\r\n :param rental: the rental that will be added\r\n \"\"\"\r\n super().add(rental)\r\n self._save_binary()\r\n\r\n def remove(self, rentalID):\r\n \"\"\"\r\n Removes a rental with the given id\r\n :param rentalID: the id of the rental that will be removed\r\n \"\"\"\r\n super().remove(rentalID)\r\n self._save_binary()\r\n\r\n def update_return(self, bookForReturnID, returnedDate):\r\n \"\"\"\r\n Used for returning a book\r\n :param bookForReturnID: the id of the book that will be returned\r\n :param returnedDate: the return date\r\n :return: the old return date ('')\r\n \"\"\"\r\n oldReturnedDate = super().update_return(bookForReturnID, returnedDate)\r\n self._save_binary()\r\n return oldReturnedDate\r\n\r\n def _save_binary(self):\r\n \"\"\"\r\n Writes all the rental data in the binary file or raise an exception if an error occurs\r\n \"\"\"\r\n file = open(self._file_name, \"wb\")\r\n try:\r\n for rental in range(len(self._rentalList)):\r\n line = self._rentalList[rental]['Rental id'] + ';' + self._rentalList[rental]['Book id'] + ';' + self._rentalList[rental]['Client id'] + ';' + \\\r\n str(self._rentalList[rental]['Rented date']) + ';' + str(self._rentalList[rental]['Returned date'])\r\n pickle.dump(line, file)\r\n pickle.dump('\\n', file)\r\n file.close()\r\n except Exception as e:\r\n raise RentalBinaryFileRepositoryException('An error occured!' + str(e))\r\n\r\n def _load_binary(self):\r\n \"\"\"\r\n Reads all the rental data from the binary file or raise an exception if an error occurs\r\n \"\"\"\r\n try:\r\n line = '\\n'\r\n file = open(self._file_name, \"rb\")\r\n while line == '\\n':\r\n line = pickle.load(file)\r\n line = line.split(';')\r\n rentedDate = line[3].split('-')\r\n if line[4] != '':\r\n returnedDate = line[4].split('-')\r\n super().add(Rental(line[0], line[1], line[2], datetime.date(int(rentedDate[0]), int(rentedDate[1]), int(rentedDate[2])),\r\n datetime.date(int(returnedDate[0]), int(returnedDate[1]), int(returnedDate[2]))))\r\n else:\r\n super().add(Rental(line[0], line[1], line[2], datetime.date(int(rentedDate[0]), int(rentedDate[1]), int(rentedDate[2])), ''))\r\n line = pickle.load(file) #newline '\\n'\r\n except EOFError: # (file is empty)\r\n return\r\n except Exception as e: # IOError (file does not exist)\r\n raise RentalBinaryFileRepositoryException('An error occured!' + str(e))\r\n", "sub_path": "a9-pauladam2001/src/repository/binaryFileRepositories.py", "file_name": "binaryFileRepositories.py", "file_ext": "py", "file_size_in_byte": 9005, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "src.repository.repositories.BookRepositoryException", "line_number": 7, "usage_type": "name"}, {"api_name": "src.repository.repositories.ClientRepositoryException", "line_number": 12, "usage_type": "name"}, {"api_name": "src.repository.repositories.RentalRepositoryException", "line_number": 17, "usage_type": "name"}, {"api_name": "src.repository.repositories.BookRepository", "line_number": 22, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 64, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 65, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 78, "usage_type": "call"}, {"api_name": "src.domain.entities.Book", "line_number": 83, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 85, "usage_type": "call"}, {"api_name": "src.repository.repositories.ClientRepository", "line_number": 92, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 134, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 135, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 148, "usage_type": "call"}, {"api_name": "src.domain.entities.Client", "line_number": 150, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 151, "usage_type": "call"}, {"api_name": "src.repository.repositories.RentalRepository", "line_number": 158, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 202, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 203, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 216, "usage_type": "call"}, {"api_name": "src.domain.entities.Rental", "line_number": 221, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 221, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 222, "usage_type": "call"}, {"api_name": "src.domain.entities.Rental", "line_number": 224, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 224, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 225, "usage_type": "call"}]} +{"seq_id": "235986149", "text": "\"\"\"reset 785\n\nRevision ID: b23caf5a5c90\nRevises: \nCreate Date: 2021-02-02 10:17:40.793925\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'b23caf5a5c90'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('classroom',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('creator_id', sa.Integer(), nullable=True),\n sa.Column('class_name', sa.String(), nullable=True),\n sa.Column('created_time', sa.DateTime(), nullable=True),\n sa.Column('unique_id', sa.String(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_classroom_id'), 'classroom', ['id'], unique=False)\n op.create_table('user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('username', sa.String(), nullable=False),\n sa.Column('password', sa.String(), nullable=False),\n sa.Column('email', sa.String(), nullable=True),\n sa.Column('first_name', sa.String(), nullable=True),\n sa.Column('last_name', sa.String(), nullable=True),\n sa.Column('date_joined', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('username')\n )\n op.create_index(op.f('ix_user_id'), 'user', ['id'], unique=False)\n op.create_table('token',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('token_value', sa.String(), nullable=True),\n sa.Column('date_issued', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_token_id'), 'token', ['id'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_token_id'), table_name='token')\n op.drop_table('token')\n op.drop_index(op.f('ix_user_id'), table_name='user')\n op.drop_table('user')\n op.drop_index(op.f('ix_classroom_id'), table_name='classroom')\n op.drop_table('classroom')\n # ### end Alembic commands ###\n", "sub_path": "alembic/versions/b23caf5a5c90_reset_785.py", "file_name": "b23caf5a5c90_reset_785.py", "file_ext": "py", "file_size_in_byte": 2197, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "alembic.op.create_table", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op.create_index", "line_number": 29, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 29, "usage_type": "name"}, {"api_name": "alembic.op.f", "line_number": 29, "usage_type": "call"}, {"api_name": "alembic.op.create_table", "line_number": 30, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 30, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 36, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 36, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 37, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 37, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlalchemy.UniqueConstraint", "line_number": 39, "usage_type": "call"}, {"api_name": "alembic.op.create_index", "line_number": 41, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 41, "usage_type": "name"}, {"api_name": "alembic.op.f", "line_number": 41, "usage_type": "call"}, {"api_name": "alembic.op.create_table", "line_number": 42, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 42, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 43, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 43, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 44, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 44, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 45, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 45, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 46, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 46, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 47, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 48, "usage_type": "call"}, {"api_name": "alembic.op.create_index", "line_number": 50, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 50, "usage_type": "name"}, {"api_name": "alembic.op.f", "line_number": 50, "usage_type": "call"}, {"api_name": "alembic.op.drop_index", "line_number": 56, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 56, "usage_type": "name"}, {"api_name": "alembic.op.f", "line_number": 56, "usage_type": "call"}, {"api_name": "alembic.op.drop_table", "line_number": 57, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 57, "usage_type": "name"}, {"api_name": "alembic.op.drop_index", "line_number": 58, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 58, "usage_type": "name"}, {"api_name": "alembic.op.f", "line_number": 58, "usage_type": "call"}, {"api_name": "alembic.op.drop_table", "line_number": 59, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 59, "usage_type": "name"}, {"api_name": "alembic.op.drop_index", "line_number": 60, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 60, "usage_type": "name"}, {"api_name": "alembic.op.f", "line_number": 60, "usage_type": "call"}, {"api_name": "alembic.op.drop_table", "line_number": 61, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 61, "usage_type": "name"}]} +{"seq_id": "515446707", "text": "from django.shortcuts import render,redirect\r\nfrom django.views.generic import View\r\nfrom django.urls import reverse\r\nfrom django.contrib.auth import authenticate, login, logout\r\nfrom tool.message import showMessage\r\nimport re\r\nfrom .models import User,Address\r\nfrom django_redis import get_redis_connection\r\nfrom apps.goods.models import GoodsSKU\r\nfrom apps.order.models import OrderInfo,OrderGoods\r\n# Create your views here.\r\nclass RegisterView(View):\r\n\r\n def get(self, request):\r\n return render(request, 'register.html')\r\n\r\n def post(self, request):\r\n username = request.POST.get('user_name') # None\r\n password = request.POST.get('pwd')\r\n email = request.POST.get('email')\r\n\r\n\r\n if not all([username, password, email]):\r\n return render(request, 'register.html', {'errmsg': '数据不完整'})\r\n\r\n\r\n if not re.match(r'^[a-z0-9][\\w.\\-]*@[a-z0-9\\-]+(\\.[a-z]{2,5}){1,2}$', email):\r\n return render(request, 'register.html', {'errmsg': '邮箱格式不正确'})\r\n\r\n try:\r\n user = User.objects.get(username = username)\r\n except User.DoesNotExist:\r\n user = None\r\n\r\n if user is not None:\r\n return render(request, 'register.html', {'errmsg': '用户名已注册'})\r\n\r\n User.objects.create_user(username, email, password, is_active = 1)\r\n # return redirect(reverse('user:login'))\r\n # return render(request,'message.html',{'second':3,'url':reverse('user:login')})\r\n return showMessage(request,'注册成功', reverse('user:login'))\r\nclass LoginView(View):\r\n def get(self, request):\r\n \"\"\"显示\"\"\"\r\n # 判断用户是否记住用户名\r\n username = request.COOKIES.get('username')\r\n print('username:',username)\r\n checked = 'checked'\r\n if username is None:\r\n # 没有记住用户名\r\n username = ''\r\n checked = ''\r\n\r\n # 使用模板\r\n return render(request, 'login.html', {'username': username, 'checked': checked})\r\n def post(self, request):\r\n \"\"\"登录校验\"\"\"\r\n # 1.接收参数\r\n username = request.POST.get('username')\r\n password = request.POST.get('pwd')\r\n remember = request.POST.get('remember') # None\r\n # 调试查看\r\n # print('username:', username)\r\n # print('password:', password)\r\n # print('remember:', remember)\r\n\r\n # 2.参数校验(后端校验)\r\n if not all([username, password]):\r\n return render(request, 'login.html', {'errmsg': '参数不完整'})\r\n\r\n # 3.业务处理:登录校验\r\n user = authenticate(username=username, password=password)\r\n if user is not None:\r\n # 用户名和密码正确\r\n if user.is_active:\r\n # 用户已激活\r\n # 记住用户的登录状态\r\n login(request, user)\r\n\r\n # 获取用户登录之前访问的url地址,默认跳转到首页\r\n next_url = request.GET.get('next', reverse('goods:index')) # None\r\n # print(next_url)\r\n\r\n # 跳转到next_url\r\n response = redirect(next_url) # HttpResponseRedirect\r\n\r\n # 跳转到首页\r\n # response = redirect(reverse('goods:index')) # HttpResponseRedirect\r\n # 将用户名赋值给index\r\n # 方式1:直接在session里面记录该值,并传递给重定向\r\n # request.session['is_login'] = username\r\n # 方式2: 设置cookie,不安全\r\n # response.set_cookie('is_login', username)\r\n # 方式3:由于采用django自带的authenticate,因此,可以在模板中使用user.is_authenticated\r\n #\r\n\r\n # 判断是否需要记住用户名\r\n if remember == 'on':\r\n # 设置cookie username\r\n response.set_cookie('username', username, max_age=7 * 24 * 3600)\r\n else:\r\n # 删除cookie username\r\n response.delete_cookie('username')\r\n # response.set_cookie('name', username)\r\n # 跳转到首页\r\n return response\r\n else:\r\n # 用户未激活\r\n return render(request, 'login.html', {'errmsg': '用户未激活'})\r\n else:\r\n # 用户名或密码错误\r\n return render(request, 'login.html', {'errmsg': '用户名或密码错误'})\r\n\r\nclass LogoutView(View):\r\n def get(self, request):\r\n \"\"\"退出\"\"\"\r\n # 清除用户登录状态,内置的logout函数会自动清除当前session\r\n logout(request)\r\n\r\n # 跳转到登录\r\n return redirect(reverse('user:login'))\r\nclass UserView(View):\r\n def get(self, request):\r\n \"\"\"显示\"\"\"\r\n # 获取登录用户\r\n user = request.user\r\n\r\n # 获取用户的默认收货地址\r\n address = Address.objects.get_default_address(user)\r\n\r\n # 获取用户的最近浏览商品的信息\r\n # 若采用redis第三包交互时\r\n # from redis import StrictRedis\r\n # conn = StrictRedis(host='172.16.179.142', port=6379, db=5)\r\n\r\n # 返回StrictRedis类的对象\r\n # 若采用django-redis包时\r\n conn = get_redis_connection('default')\r\n # 拼接key\r\n history_key = 'history_%d' % user.id\r\n\r\n # lrange(key, start, stop) 返回是列表\r\n # 获取用户最新浏览的5个商品的id\r\n sku_ids = conn.lrange(history_key, 0, 4) # [1, 3, 5, 2]\r\n\r\n skus = []\r\n for sku_id in sku_ids:\r\n # 根据商品的id查询商品的信息\r\n sku = GoodsSKU.objects.get(id=sku_id)\r\n # 追加到skus列表中\r\n skus.append(sku)\r\n\r\n # 组织模板上下文\r\n context = {\r\n 'address': address,\r\n 'skus': skus,\r\n 'page': 'user'\r\n }\r\n\r\n # 使用模板\r\n return render(request, 'user_center_info.html', context)\r\n\r\nclass OrderView(View):\r\n def get(self, request, page):\r\n \"\"\"显示\"\"\"\r\n # 获取登录用户\r\n user = request.user\r\n # 查询所有订单\r\n info_msg = 1 # 若有订单则为1\r\n try:\r\n order_infos = OrderInfo.objects.filter(user=user).order_by('-create_time')\r\n except OrderInfo.DoesNotExist :\r\n info_msg = 0\r\n\r\n if len(order_infos) == 0:\r\n info_msg = 0\r\n context = {\r\n 'page': 'order',\r\n 'info_msg': info_msg,\r\n }\r\n if info_msg == 1:\r\n\r\n for order_info in order_infos:\r\n order_goods = OrderGoods.objects.filter(order=order_info)\r\n for order_good in order_goods:\r\n # 商品小计\r\n amount = order_good.price * order_good.count\r\n order_good.amount = amount\r\n order_info.order_goods = order_goods\r\n order_info.status_title = OrderInfo.ORDER_STATUS[order_info.order_status]\r\n # order_info.status = order_info.ORDER_STATUS_CHOICES[order_info.order_status-1][1]\r\n\r\n # 分页操作\r\n from django.core.paginator import Paginator\r\n paginator = Paginator(order_infos, 3)\r\n\r\n # 处理页码\r\n page = int(page)\r\n\r\n if page > paginator.num_pages:\r\n # 默认获取第1页的内容\r\n page = 1\r\n\r\n # 获取第page页内容, 返回Page类的实例对象\r\n order_infos_page = paginator.page(page)\r\n\r\n # 页码处理\r\n # 如果分��之后页码超过5页,最多在页面上只显示5个页码:当前页前2页,当前页,当前页后2页\r\n # 1) 分页页码小于5页,显示全部页码\r\n # 2)当前页属于1-3页,显示1-5页\r\n # 3) 当前页属于后3页,显示后5页\r\n # 4) 其他请求,显示当前页前2页,当前页,当前页后2页\r\n num_pages = paginator.num_pages\r\n if num_pages < 5:\r\n # 1-num_pages\r\n pages = range(1, num_pages + 1)\r\n elif page <= 3:\r\n pages = range(1, 6)\r\n elif num_pages - page <= 2:\r\n # num_pages-4, num_pages\r\n pages = range(num_pages - 4, num_pages + 1)\r\n else:\r\n # page-2, page+2\r\n pages = range(page - 2, page + 3)\r\n\r\n context = {\r\n 'page': 'order',\r\n 'order_infos': order_infos,\r\n 'info_msg': info_msg,\r\n 'pages' : pages,\r\n 'order_infos_page': order_infos_page\r\n }\r\n return render(request, 'user_center_order.html', context)\r\nclass AddressView(View):\r\n def get(self, request):\r\n \"\"\"显示\"\"\"\r\n # 获取登录用户user\r\n user = request.user\r\n # try:\r\n # address = Address.objects.get(user=user, is_default=True)\r\n # except Address.DoesNotExist:\r\n # address = None\r\n\r\n default_address = Address.objects.get_default_address(user)\r\n\r\n all_address = Address.objects.get_all_address(user)\r\n\r\n # 组织模板上下文\r\n context = {\r\n 'address': default_address,\r\n 'have_address': all_address,\r\n 'page': 'address'\r\n }\r\n\r\n # 使用模板\r\n return render(request, 'user_center_site.html', context)\r\n\r\n def post(self, request):\r\n \"\"\"地址添加\"\"\"\r\n # 接收参数\r\n receiver = request.POST.get('receiver')\r\n addr = request.POST.get('direction')\r\n zip_code = request.POST.get('mail_code')\r\n phone = request.POST.get('phone_number')\r\n\r\n # 参数校验\r\n if not all([receiver, addr, phone]):\r\n return render(request, 'user_center_site.html', {'errmsg': '数据不完整'})\r\n\r\n # 校验手机号\r\n\r\n # 业务处理:添加收货地址\r\n # 如果用户已经有默认地址,新添加的地址作为非默认地址,否则作为默认地址\r\n # 获取登录用户user\r\n user = request.user\r\n # try:\r\n # address = Address.objects.get(user=user, is_default=True)\r\n # except Address.DoesNotExist:\r\n # address = None\r\n\r\n address = Address.objects.get_default_address(user)\r\n\r\n is_default = True\r\n if address is not None:\r\n is_default = False\r\n\r\n # 添加收货地址\r\n Address.objects.create(user=user,\r\n receiver=receiver,\r\n addr=addr,\r\n zip_code=zip_code,\r\n phone=phone,\r\n is_default=is_default)\r\n\r\n # 返回应答,刷新地址页面\r\n return redirect(reverse('user:address'))", "sub_path": "shop/apps/user/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 11069, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "django.views.generic.View", "line_number": 12, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 15, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 24, "usage_type": "call"}, {"api_name": "re.match", "line_number": 27, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 28, "usage_type": "call"}, {"api_name": "models.User.objects.get", "line_number": 31, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 31, "usage_type": "name"}, {"api_name": "models.User.DoesNotExist", "line_number": 32, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 32, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 36, "usage_type": "call"}, {"api_name": "models.User.objects.create_user", "line_number": 38, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 38, "usage_type": "name"}, {"api_name": "tool.message.showMessage", "line_number": 41, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 41, "usage_type": "call"}, {"api_name": "django.views.generic.View", "line_number": 42, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 55, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 69, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 72, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 78, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 81, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 85, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 109, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 112, "usage_type": "call"}, {"api_name": "django.views.generic.View", "line_number": 114, "usage_type": "name"}, {"api_name": "django.contrib.auth.logout", "line_number": 118, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 121, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 121, "usage_type": "call"}, {"api_name": "django.views.generic.View", "line_number": 122, "usage_type": "name"}, {"api_name": "models.Address.objects.get_default_address", "line_number": 129, "usage_type": "call"}, {"api_name": "models.Address.objects", "line_number": 129, "usage_type": "attribute"}, {"api_name": "models.Address", "line_number": 129, "usage_type": "name"}, {"api_name": "django_redis.get_redis_connection", "line_number": 138, "usage_type": "call"}, {"api_name": "apps.goods.models.GoodsSKU.objects.get", "line_number": 149, "usage_type": "call"}, {"api_name": "apps.goods.models.GoodsSKU.objects", "line_number": 149, "usage_type": "attribute"}, {"api_name": "apps.goods.models.GoodsSKU", "line_number": 149, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 161, "usage_type": "call"}, {"api_name": "django.views.generic.View", "line_number": 163, "usage_type": "name"}, {"api_name": "apps.order.models.OrderInfo.objects.filter", "line_number": 171, "usage_type": "call"}, {"api_name": "apps.order.models.OrderInfo.objects", "line_number": 171, "usage_type": "attribute"}, {"api_name": "apps.order.models.OrderInfo", "line_number": 171, "usage_type": "name"}, {"api_name": "apps.order.models.OrderInfo.DoesNotExist", "line_number": 172, "usage_type": "attribute"}, {"api_name": "apps.order.models.OrderInfo", "line_number": 172, "usage_type": "name"}, {"api_name": "apps.order.models.OrderGoods.objects.filter", "line_number": 184, "usage_type": "call"}, {"api_name": "apps.order.models.OrderGoods.objects", "line_number": 184, "usage_type": "attribute"}, {"api_name": "apps.order.models.OrderGoods", "line_number": 184, "usage_type": "name"}, {"api_name": "apps.order.models.OrderInfo.ORDER_STATUS", "line_number": 190, "usage_type": "attribute"}, {"api_name": "apps.order.models.OrderInfo", "line_number": 190, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 195, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 233, "usage_type": "call"}, {"api_name": "django.views.generic.View", "line_number": 234, "usage_type": "name"}, {"api_name": "models.Address.objects.get_default_address", "line_number": 244, "usage_type": "call"}, {"api_name": "models.Address.objects", "line_number": 244, "usage_type": "attribute"}, {"api_name": "models.Address", "line_number": 244, "usage_type": "name"}, {"api_name": "models.Address.objects.get_all_address", "line_number": 246, "usage_type": "call"}, {"api_name": "models.Address.objects", "line_number": 246, "usage_type": "attribute"}, {"api_name": "models.Address", "line_number": 246, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 256, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 268, "usage_type": "call"}, {"api_name": "models.Address.objects.get_default_address", "line_number": 281, "usage_type": "call"}, {"api_name": "models.Address.objects", "line_number": 281, "usage_type": "attribute"}, {"api_name": "models.Address", "line_number": 281, "usage_type": "name"}, {"api_name": "models.Address.objects.create", "line_number": 288, "usage_type": "call"}, {"api_name": "models.Address.objects", "line_number": 288, "usage_type": "attribute"}, {"api_name": "models.Address", "line_number": 288, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 296, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 296, "usage_type": "call"}]} +{"seq_id": "122817024", "text": "import functools\nfrom typing import Any, List, Optional, Sequence\n\nfrom fvcore.common.registry import Registry as _Registry\nfrom tabulate import tabulate\n\nfrom meerkat.dataframe import DataFrame\n\n\nclass Registry(_Registry):\n \"\"\"Extension of fvcore's registry that supports aliases.\"\"\"\n\n _ALIAS_KEYWORDS = (\"_aliases\", \"_ALIASES\")\n\n def __init__(self, name: str):\n super().__init__(name=name)\n\n self._metadata_map = {}\n\n def get(self, name: str, **kwargs) -> Any:\n ret = self._obj_map.get(name)\n if ret is None:\n raise KeyError(\n \"No object named '{}' found in '{}' registry!\".format(name, self._name)\n )\n\n return ret(**kwargs)()\n\n def get_obj(self, name: str) -> type:\n return self._obj_map[name]\n\n def _get_aliases(self, obj_func_or_class):\n for kw in self._ALIAS_KEYWORDS:\n if hasattr(obj_func_or_class, kw):\n return getattr(obj_func_or_class, kw)\n return []\n\n def register(\n self, obj: object = None, aliases: Sequence[str] = None\n ) -> Optional[object]:\n if obj is None:\n # used as a decorator\n def deco(func_or_class: object, aliases=None) -> object:\n name = func_or_class.__name__ # pyre-ignore\n self._do_register(name, func_or_class)\n if aliases is None:\n aliases = self._get_aliases(func_or_class)\n if not isinstance(aliases, (list, tuple, set)):\n aliases = [aliases]\n for alias in aliases:\n self._do_register(alias, func_or_class)\n return func_or_class\n\n kwargs = {\"aliases\": aliases}\n if any(v is not None for v in kwargs.values()):\n return functools.partial(deco, **kwargs)\n else:\n return deco\n\n name = obj.__name__ # pyre-ignore\n self._do_register(name, obj)\n if aliases is None:\n aliases = self._get_aliases(obj)\n for alias in aliases:\n self._do_register(alias, obj)\n\n def _do_register(self, name: str, obj: Any, **kwargs) -> None:\n self._metadata_map[name] = {\"name\": name, \"description\": obj.__doc__, **kwargs}\n return super()._do_register(name, obj)\n\n @property\n def names(self) -> List[str]:\n return list(self._obj_map.keys())\n\n @property\n def catalog(self) -> DataFrame:\n rows = []\n for name, builder in self:\n rows.append(builder.info.__dict__)\n return DataFrame(rows)\n\n def __repr__(self) -> str:\n table = tabulate(self._metadata_map.values(), tablefmt=\"fancy_grid\")\n return \"Registry of {}:\\n\".format(self._name) + table\n", "sub_path": "meerkat/tools/registry.py", "file_name": "registry.py", "file_ext": "py", "file_size_in_byte": 2768, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "fvcore.common.registry.Registry", "line_number": 10, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 39, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 56, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 40, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 67, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 72, "usage_type": "name"}, {"api_name": "meerkat.dataframe.DataFrame", "line_number": 80, "usage_type": "call"}, {"api_name": "meerkat.dataframe.DataFrame", "line_number": 76, "usage_type": "name"}, {"api_name": "tabulate.tabulate", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "78959496", "text": "#\n# CV is a framework for continuous verification.\n#\n# Copyright (c) 2018-2019 ISP RAS (http://www.ispras.ru)\n# Ivannikov Institute for System Programming of the Russian Academy of Sciences\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport json\nimport os\nimport sys\nimport traceback\n\nfrom components import COMPONENT_QUALIFIER, DEFAULT_TOOL_PATH, CIF, CLADE_BASE_FILE, CLADE_WORK_DIR, JSON_EXTENSION\nfrom components.builder import Builder\nfrom components.component import Component\n\nDEFAULT_CALLGRAPH_FILE = \"callgraph.json\"\nTAG_CACHE = \"cached call graph\"\n\n\nclass Qualifier(Component):\n def __init__(self, builder: Builder, entrypoints_files: list):\n super(Qualifier, self).__init__(COMPONENT_QUALIFIER, builder.config)\n self.install_dir = builder.install_dir\n self.source_dir = builder.source_dir\n self.builder = builder\n\n os.chdir(self.source_dir)\n\n path_cif = self.get_tool_path(DEFAULT_TOOL_PATH[CIF])\n self.logger.debug(\"Using CIF found in directory '{}'\".format(path_cif))\n os.environ[\"PATH\"] += os.pathsep + path_cif\n\n cached_result = self.component_config.get(TAG_CACHE, None)\n if cached_result and os.path.exists(cached_result):\n self.result = cached_result\n with open(self.result, \"r\", errors='ignore') as fh:\n self.content = json.load(fh)\n else:\n self.logger.debug(\"Using Clade tool to obtain function call tree\")\n try:\n # noinspection PyUnresolvedReferences\n from clade import Clade\n c = Clade(CLADE_WORK_DIR, CLADE_BASE_FILE)\n c.parse_all()\n self.content = c.get_callgraph()\n except Exception:\n error_msg = \"Clade has failed: {}\".format(traceback.format_exc())\n sys.exit(error_msg)\n self.logger.info(\"Clade successfully obtained call graph\")\n\n self.logger.debug(\"Reading files with description of entry points\")\n self.entrypoints = set()\n for file in entrypoints_files:\n if os.path.isfile(file) and file.endswith(JSON_EXTENSION):\n with open(file, errors='ignore') as data_file:\n data = json.load(data_file)\n identifier = os.path.basename(file)[:-len(JSON_EXTENSION)]\n self.logger.debug(\"Description {} contains {} entry points\".\n format(identifier, len(data.get(\"entrypoints\", {}))))\n for name, etc in data.get(\"entrypoints\", {}).items():\n self.entrypoints.add(name)\n\n os.chdir(self.work_dir)\n\n def __find_function_calls(self, target_func, result):\n for name, values in self.content.items():\n for func, etc in values.items():\n if func == target_func:\n for op, args in etc.items():\n if op == \"called_in\":\n for source_file, attrs in args.items():\n for key, vals in attrs.items():\n if key not in result:\n result.add(key)\n self.__find_function_calls(key, result)\n\n def find_functions(self, target_functions):\n result = set(target_functions)\n for func in target_functions:\n self.__find_function_calls(func, result)\n res = result.intersection(self.entrypoints)\n if res:\n self.logger.info(\"Specified commits relate with the following entry points: {}\".format(\", \".join(res)))\n else:\n self.logger.info(\"Could not find any related entry points for specified commits\")\n self.logger.info(\"Checking all subsystems, which include modifications\")\n return res\n\n def analyse_commits(self, commits):\n specific_functions = set()\n specific_sources = set()\n os.chdir(self.source_dir)\n for commit in commits:\n self.logger.debug(\"Checking commit '{}' in the source directory\".format(commit))\n self.builder.check_commit(commit)\n specific_sources = specific_sources.union(self.builder.get_changed_files())\n specific_functions = specific_functions.union(self.builder.get_changed_functions())\n os.chdir(self.work_dir)\n self.logger.debug(\"Modified files: '{}'\".format(specific_sources))\n self.logger.debug(\"Modified functions: '{}'\".format(specific_functions))\n\n return specific_sources, specific_functions\n\n def stop(self):\n del self.content\n return self.get_component_full_stats()\n", "sub_path": "scripts/components/qualifier.py", "file_name": "qualifier.py", "file_ext": "py", "file_size_in_byte": 5190, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "components.component.Component", "line_number": 33, "usage_type": "name"}, {"api_name": "components.builder.Builder", "line_number": 34, "usage_type": "name"}, {"api_name": "components.COMPONENT_QUALIFIER", "line_number": 35, "usage_type": "argument"}, {"api_name": "os.chdir", "line_number": 40, "usage_type": "call"}, {"api_name": "components.DEFAULT_TOOL_PATH", "line_number": 42, "usage_type": "name"}, {"api_name": "components.CIF", "line_number": 42, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.pathsep", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 50, "usage_type": "call"}, {"api_name": "clade.Clade", "line_number": 56, "usage_type": "call"}, {"api_name": "components.CLADE_WORK_DIR", "line_number": 56, "usage_type": "argument"}, {"api_name": "components.CLADE_BASE_FILE", "line_number": 56, "usage_type": "argument"}, {"api_name": "traceback.format_exc", "line_number": 60, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "components.JSON_EXTENSION", "line_number": 67, "usage_type": "argument"}, {"api_name": "json.load", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "components.JSON_EXTENSION", "line_number": 70, "usage_type": "argument"}, {"api_name": "os.chdir", "line_number": 76, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 105, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 111, "usage_type": "call"}]} +{"seq_id": "250262401", "text": "from __future__ import unicode_literals\nimport os\nfrom flask import Flask, request, abort\nfrom linebot import LineBotApi, WebhookHandler\nfrom linebot.exceptions import InvalidSignatureError,LineBotApiError\nfrom linebot.models import MessageEvent, TextMessage, TextSendMessage,ImageSendMessage\nimport requests\nimport configparser\n\nimport random\n\napp = Flask(__name__)\n\n# LINE 聊天機器人的基本資料\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\n\nline_bot_api = LineBotApi(config.get('line-bot', 'channel_access_token'))\nhandler = WebhookHandler(config.get('line-bot', 'channel_secret'))\n\n\n# 接收 LINE 的資訊\n@app.route(\"/callback\",methods=['POST',\"GET\"])\ndef callback():\n if request.method == 'GET':\n print(\"receive get request\")\n with open('id.txt', 'r') as f:\n user_id = f.read()\n f.close()\n\n sensor_signal = request.args.get(key='SENSOR')\n predict_signal = request.args.get(key=\"RESULT\")##此處程式碼為辨識影像結果\n print(predict_signal)\n if (sensor_signal==\"ON\"):\n # line_bot_api.broadcast(TextSendMessage(text='SENSOR IS ON SIGNAL'))#廣播通知全部人\n # line_bot_api.broadcast(TextSendMessage(text='This is a broadcast message'))#廣播通知全部人\n line_bot_api.push_message(user_id, TextSendMessage(text='Message from Desktop send to specific id'))#向特定人傳送訊息\n################################# 此處程式碼為辨識影像結果\n if (predict_signal == \"0 khduh\"):\n line_bot_api.push_message(user_id, TextSendMessage(text='Welcome home'))\n elif (predict_signal == \"1 mask\"):\n line_bot_api.push_message(user_id, TextSendMessage(text='Warning'))\n line_bot_api.push_message(user_id,\n ImageSendMessage(original_content_url=\"https://c8fae789e773.ngrok.io/photo_page#\",\n preview_image_url=\"https://c8fae789e773.ngrok.io/photo_page#\"))\n else:\n line_bot_api.push_message(user_id, TextSendMessage(text='No predicting'))\n pass\n#################################此處程式碼為辨識影像結果\n\n return \"OK\"\n elif request.method == 'POST':\n signature = request.headers['X-Line-Signature']\n\n body = request.get_data(as_text=True)\n app.logger.info(\"Request body: \" + body)\n\n try:\n print(body, signature)\n handler.handle(body, signature)\n\n except InvalidSignatureError:\n abort(400)\n\n return 'OK'\n\n\n# 學你說話\n@handler.add(MessageEvent, message=TextMessage)\ndef handle_message(event):\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text=event.message.text))\n if event.message.text == \"setreply\":\n var_id = (event.source.user_id)\n with open('id.txt', 'w') as f:\n f.write(str(var_id))\n f.close()\n elif (event.message.text == \"photo\"):\n pass\n data = {\n \"name\": \"Jason\",\n \"photo\": \"ON\"\n }\n print(data.keys())\n # \"message from desktop\"\n r = requests.get('https://c8fae789e773.ngrok.io', params=data)\n r.close()\n else:\n pass\nif __name__ == \"__main__\":\n app.run(debug=True, host='127.0.0.1', port=5000)", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 3368, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "flask.Flask", "line_number": 12, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 15, "usage_type": "call"}, {"api_name": "linebot.LineBotApi", "line_number": 18, "usage_type": "call"}, {"api_name": "linebot.WebhookHandler", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 32, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 32, "usage_type": "name"}, {"api_name": "linebot.models.TextSendMessage", "line_number": 37, "usage_type": "call"}, {"api_name": "linebot.models.TextSendMessage", "line_number": 40, "usage_type": "call"}, {"api_name": "linebot.models.TextSendMessage", "line_number": 42, "usage_type": "call"}, {"api_name": "linebot.models.ImageSendMessage", "line_number": 44, "usage_type": "call"}, {"api_name": "linebot.models.TextSendMessage", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 52, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.request.headers", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.request.get_data", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 55, "usage_type": "name"}, {"api_name": "linebot.exceptions.InvalidSignatureError", "line_number": 62, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 63, "usage_type": "call"}, {"api_name": "linebot.models.TextSendMessage", "line_number": 73, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 87, "usage_type": "call"}, {"api_name": "linebot.models.MessageEvent", "line_number": 69, "usage_type": "argument"}, {"api_name": "linebot.models.TextMessage", "line_number": 69, "usage_type": "name"}]} +{"seq_id": "297748540", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jul 6 10:11:09 2021\r\n\r\n@author: Mariano Devita\r\n\"\"\"\r\n\r\n\"\"\"\r\n\r\n#### Primeros pasos\r\n\r\n- instalar BeautifulSoup\r\n\r\nPara eso, escribir en anaconda prompt pip install beautifulsoup4\r\n\r\n- instalar requests\r\n\r\nPara eso, escribir en anaconda prompt pip install requests\r\n\r\n- instalar lxml parser\r\n\r\nPara eso, escribir en anaconda prompt pip install lxml\r\n\r\nTener en claro que el objetivo es pasar de una tabla en HTML a un DataFrame\r\nque se puede exportar a un csv.\r\n\r\n\"\"\"\r\n\r\n# Llamo requests y BeautifulSoup que los instalé previamente.\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\n\r\n#Importo pandas para ir visualizando lo que estoy haciendo y también me \r\n#va a servir después para exportar a csv.\r\nimport pandas as pd\r\n\r\n\r\n#Defino un string llamado url y luego uso requests.get que cuando le digo \r\n#cuál es la URL, va al servidor en donde está alojada la página\r\n#y trata de compiar el HTML.\r\nurl = 'https://www.zipcodestogo.com/Maryland/'\r\n\r\n#La sintaxis básica es page = requests.get(url). Al ejecutar page, debería\r\n#dar como output , pero daba . Si daba\r\n#, la página no está más. Aparentemente, \r\n#significa que el servidor no da permiso para copiar el código. Para eso\r\n#está User-Agent. Por defualt el User-Agent de python es \r\n#python-requests/2.21.0, que es probable que haya sido\r\n#bloqueado por la compañía que hace el hosting. Al cambiar User-Agent,\r\n#page da \r\npage = requests.get(url, headers = {\"User-Agent\": \"XY\"})\r\n#page\r\n\r\n#Hasta ahora sólo he copiado la página en una variable que llamé page.\r\n#Lo que quiero ahora es sacar de esa variable el texto del HTML y guardarlo\r\n#en una variable llamada soup. Si corro soup, me da el HTML completo de la\r\n#página.\r\nsoup = BeautifulSoup(page.text, \"lxml\")\r\n#soup\r\n\r\n#Hasta acá, definí la página (url), la copié (page) y la pegué (soup).\r\n\r\n#Ahora quiero sacar la información de soup. Antes que nada, en el navegador\r\n#inspecciono la página para saber exactamente cuál es y cómo es la tabla.\r\n#En este caso la tabla que quiero tiene como class a inner_table.\r\n#Entonces puedo identificarla con esa class.\r\n\r\n#Para eso, soup.find va a encontrar en soup, lo que le pida, que es una table\r\n#con class = \"inner_table\".\r\ntable_data = soup.find(\"table\", {\"class\" : \"inner_table\"})\r\n\r\n#Con eso, \"descartamos\" toda la parte del código que no es tabla (en realidad,\r\n#sigue estando en soup, pero sólo nos queremos quedar con la tabla).\r\n\r\n#Teniendo en cuenta que el objetivo es armar una base de datos, voy a definir\r\n#el encabezado de esa base con el encabezado de la tabla en la página.\r\n\r\n#Como la tabla no tiene header, es decir, no tiene la etiqueta theader,\r\n#sino que la primera fila lo es, no puedo definir al encabezado buscando las\r\n#etiquetas soup.find_all(\"th\"). Pero en esta página, la primera fila tiene\r\n#inline styling con style = \"background-color: #F5F5F5;\" entonces puedo sacar\r\n#esa first row igual que antes con soup.find para poder armar el encabezado.\r\nfirst_row = soup.find(\"tr\", {\"style\" : \"background-color: #F5F5F5;\"})\r\n\r\n#Hasta acá, definí la página (url), la copié (page), la pegué (soup), le pedí\r\n#que se quede con el pedazo de código de HTML donde está la tabla (table_data)\r\n#y le pedí que se quede con el pedazo de código de la primera fila de la\r\n# tabla (first_row).\r\n\r\n#Una vez que tengo el pedazo de código, vuelvo a usar .text, para que se quede\r\n#con el texto de la primera fila y descarte las etiquetas y las otras partes\r\n#del código HTML.\r\ntitle = first_row.text\r\n#title\r\n\r\n#El resultado de title es una string con los encabezados de la base que hay\r\n#que separar. Lo mejor es imprimir title para ver cómo es el string.\r\nzip_code = title[1:9]\r\ncity = title[10:14]\r\ncounty = title[15:21]\r\nzip_code_map = title[22:len(title)-1]\r\n\r\n#En una variable aparte defino los encabezados como una lista que contiene los\r\n#strings del nombre de cada encabezado.\r\nheaders = [zip_code, city, county, zip_code_map]\r\n\r\n#Y ahora puedo armar la base con la lista que creé antes como encabezado.\r\ndf = pd.DataFrame(columns = headers)\r\n\r\n#Ahora quiero lo datos de la tabla. Entoces, para cada i entre todas las\r\n#filas de la tabla (tag \"tr\"), quiero que me encuentres todas las\r\n#columnas(tag \"td\"), que descartes todo lo que es código y me dejes\r\n#el texto de la fila. Luego, añadí esas filas al dataframe.\r\n\r\n#Empieza a iterar desde 2 porque la primera fila es el header. Si lo pongo a \r\n#iterar desde 1, la primera fila que me va a exportar es el header y la voy a\r\n#tener que borrar.\r\nfor i in table_data.find_all('tr')[2:]:\r\n row_data = i.find_all('td')\r\n row = [tr.text for tr in row_data]\r\n df.loc[len(df)] = row\r\n\r\n#En caso que iteremos con index desde 1, se puede usar df = df.iloc[1: , :]\r\n#para tirar la primera observación que es el header.\r\n\r\n#Como la columna Zip Code Map no me interesa, y de hecho lo único que baja es\r\n#View Map o algo así, descarto esa columna.\r\ndel df[\"Zip Code Map\"]\r\n\r\n#Exporta la tabla como csv. Para exportar con separador ;, sep = \";\".\r\ndf.to_csv(\"zipcodes.csv\", index = False)\r\n", "sub_path": "zipcodes.py", "file_name": "zipcodes.py", "file_ext": "py", "file_size_in_byte": 5193, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "requests.get", "line_number": 51, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 58, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 108, "usage_type": "call"}]} +{"seq_id": "641570623", "text": "import numpy as np\nimport tensorflow as tf\nfrom utils import retrieve_data, show_weights, DATA_SIZE_X, DATA_SIZE_Y, NUM_EPOCHS\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Flatten, Dense, Conv2D, MaxPooling2D\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nimport time\n\ndef main():\n (train_x, train_y), (test_x, test_y) = retrieve_data()\n\n start_time = time.time()\n\n # Build and evaluate all three models for the data\n slnn = single_layer_neural_network(train_x, train_y, test_x, test_y)\n mlff = multi_layer_feed_forward(train_x, train_y, test_x, test_y)\n cnn = convolutional_neural_network(train_x, train_y, test_x, test_y)\n\n # Print the metrics of each model on the data\n print(f\"Achieved {slnn}% Accuracy with Single-Layer Neural Network.\")\n print(f\"Achieved {mlff[1] * 100.0}% Accuracy with Multi-Layer Feed Forward Neural Network.\")\n print(f\"Achieved {cnn[1] * 100.0}% Accuracy with Convolutional Neural Network.\")\n print(f\"Finished building and training all Neural Networks in {time.time() - start_time}s.\")\n\n\ndef multi_layer_feed_forward(train_x, train_y, test_x, test_y):\n \"\"\"\n Method to build, train and evaluate a multi-layer feed forward neural network.\n \"\"\"\n\n # Build the model, one layer to flatten the inputs and then two Dense feed-forward layers with ReLU activation. \n model = Sequential([\n Flatten(input_shape=(DATA_SIZE_X, DATA_SIZE_Y)),\n Dense(128, activation='relu'),\n Dense(64, activation='relu'),\n Dense(10, activation='softmax')\n ])\n\n # Compile the model with the ada optimizer, using categorical cross entropy loss, and accuracy as the metric\n model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n \n # Fit the model to the training data using a preset number of epochs\n model.fit(train_x, train_y, epochs=NUM_EPOCHS)\n\n # Return the results of evaluating the model on the test data\n return model.evaluate(test_x, test_y)\n\ndef convolutional_neural_network(train_x, train_y, test_x, test_y):\n \"\"\"\n Method to build, train and evaluate a Convolutional Neural Network on the dataset.\n \"\"\"\n\n # Reshape inputs\n train_x = train_x.reshape(train_x.shape[0], DATA_SIZE_X, DATA_SIZE_Y, 1)\n test_x = test_x.reshape(test_x.shape[0], DATA_SIZE_X, DATA_SIZE_Y, 1)\n\n # Normalize the inputs\n train_x = tf.keras.utils.normalize(train_x)\n test_x = tf.keras.utils.normalize(test_x)\n\n # Create the model for the CNN, this uses two alternating layers of Convolution and Pooling followed by a single Dense layer\n model = Sequential([\n Conv2D(32, (3, 3), strides=1, input_shape=(DATA_SIZE_X, DATA_SIZE_Y, 1)),\n MaxPooling2D((2, 2), strides=2),\n Conv2D(64, (3, 3), strides=1),\n MaxPooling2D((2, 2), strides=2),\n Flatten(),\n Dense(128, activation='relu'),\n Dense(10, activation='softmax')\n ])\n\n # Compile the model using the adam optimizer, categorical cross entropy loss and using accuracy as the metric.\n model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n\n # Fit the model with a preset number of epochs\n model.fit(train_x, train_y, epochs=NUM_EPOCHS)\n\n # Return the results of evaluating the model on the test data\n return model.evaluate(test_x, test_y)\n\ndef single_layer_neural_network(train_x, train_y, test_x, test_y):\n \"\"\"\n Method to build, train and evaluate a single-layer neural network.\n Makes use of gradient descent to update weights incrementaly as well as the bias.\n Furthermore, shows that to within a certain degree the universal approximation theory holds. \n The function to determine digits can be represented in only a single layer neural network.\n \"\"\"\n\n # Reshape the input\n images = train_x[0:train_x.shape[0]].reshape(train_x.shape[0],28*28)\n images = images.T\n\n # Reshape the test inputs, each image will be a single vector the length of the dimensions multiplied together\n images_test = test_x.reshape(test_x.shape[0], 28*28)\n images_test = images_test.T\n\n # Step-size\n alpha = 0.01\n\n # Weight matrix\n W = np.zeros((10, 28*28))\n\n # Bias matrix\n B = np.zeros((10,1))\n\n # Performs 1000 iterations of gradient descent\n for i in tqdm(range(1000)):\n W -= alpha * ((1/images.shape[1])*(((W.dot(images) + B) - train_y.T).dot(images.T))) # Computes update to W based on Gradient of loss wrt the weights\n B -= alpha * np.sum(((W.dot(images) + B) - train_y.T), axis=1, keepdims=True) * (np.divide(1,images.shape[1])) # Computes update to B using Gradient of the loss wrt bias which is just the sum of the error\n \n \n # Compute the percentage of images from the test set classified correctly from the results on the test set.\n test_result = (W.dot(images_test) + B)\n count = 0\n\n for i in range(0, test_result.shape[1]):\n if(np.argmax(test_result[:,i]) == np.argmax(test_y[i])):\n count += 1\n show_weights(W)\n\n # Return the percentage of the images classified correctly\n return count/test_result.shape[1]*100\n\n\nif __name__ ==\"__main__\":\n main()", "sub_path": "neural_network.py", "file_name": "neural_network.py", "file_ext": "py", "file_size_in_byte": 5224, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "utils.retrieve_data", "line_number": 11, "usage_type": "call"}, {"api_name": "time.time", "line_number": 13, "usage_type": "call"}, {"api_name": "time.time", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 34, "usage_type": "call"}, {"api_name": "utils.DATA_SIZE_X", "line_number": 34, "usage_type": "name"}, {"api_name": "utils.DATA_SIZE_Y", "line_number": 34, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 37, "usage_type": "call"}, {"api_name": "utils.NUM_EPOCHS", "line_number": 44, "usage_type": "name"}, {"api_name": "utils.DATA_SIZE_X", "line_number": 55, "usage_type": "argument"}, {"api_name": "utils.DATA_SIZE_Y", "line_number": 55, "usage_type": "argument"}, {"api_name": "utils.DATA_SIZE_X", "line_number": 56, "usage_type": "argument"}, {"api_name": "utils.DATA_SIZE_Y", "line_number": 56, "usage_type": "argument"}, {"api_name": "tensorflow.keras.utils.normalize", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 59, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.utils.normalize", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 60, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 64, "usage_type": "call"}, {"api_name": "utils.DATA_SIZE_X", "line_number": 64, "usage_type": "name"}, {"api_name": "utils.DATA_SIZE_Y", "line_number": 64, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 70, "usage_type": "call"}, {"api_name": "utils.NUM_EPOCHS", "line_number": 77, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 105, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 118, "usage_type": "call"}, {"api_name": "utils.show_weights", "line_number": 120, "usage_type": "call"}]} +{"seq_id": "447056169", "text": "import tensorflow as tf\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom capsnet import CapsNet\nimport time\nimport os\n# os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"-1\" # set to run on CPU only\n\n\nmnist = tf.keras.datasets.mnist # stores the mnist database in a variable. these are handwritten numbers\n(x_train, y_train), (x_test, y_test) = mnist.load_data() # separates data set into testing and training sets\n\n# normalize\nx_train = (x_train / 255).astype(\"float32\")\nx_test = (x_test / 255).astype(\"float32\")\ny_train = y_train.astype(\"int32\")\ny_test = y_test.astype(\"int32\")\n\ncapsNet = CapsNet()\nreconstructor = tf.keras.models.Sequential()\nreconstructor.add(tf.keras.layers.Flatten())\nreconstructor.add(tf.keras.layers.Dense(512, activation='relu'))\nreconstructor.add(tf.keras.layers.Dense(1024, activation='relu'))\nreconstructor.add(tf.keras.layers.Dense(784, activation='sigmoid'))\n\n# load most recent CapsNet\ncheckpoint_path = \"./MNIST_checkpoints\"\nckpt = tf.train.Checkpoint(capsNet=capsNet, reconstructor=reconstructor)\nckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)\n# if a checkpoint exists, restore the latest checkpoint.\nif ckpt_manager.latest_checkpoint:\n ckpt.restore(ckpt_manager.latest_checkpoint)\n print('Latest checkpoint restored!!')\n\ndef process_image(img):\n img = img.reshape([1, 28, 28, 1])\n digit_caps = capsNet(img)[0] # (10, 16)\n magnitudes = tf.math.sqrt(tf.reduce_sum(digit_caps ** 2, axis=-1))\n values, indices = tf.math.top_k(magnitudes, k=2)\n predicted_number, second_number = indices\n predicted_number_vector = digit_caps[predicted_number]\n predicted_number_chance, second_number_chance = values\n reconstructed_image = get_reconstructed_image(predicted_number_vector, predicted_number)\n\n return predicted_number.numpy(), predicted_number_chance.numpy(), second_number.numpy(), \\\n second_number_chance.numpy(), predicted_number_vector.numpy(), reconstructed_image\n\n\ndef get_reconstructed_image(vector, predicted_number):\n v = np.zeros([1, 160])\n v[0, predicted_number * 16: (predicted_number + 1) * 16] = vector\n return reconstructor.predict(v).reshape([28,28])\n\n\nif __name__ == \"__main__\":\n img = x_test[256]\n predicted_number, predicted_number_chance, predicted_number_vector, reconstructed_image = process_image(img)\n print(predicted_number, predicted_number_chance)\n plt.imshow(reconstructed_image)\n plt.show()\n plt.imshow(img)\n plt.show()\n\n# reconstructed_image = create_reconstruction_input_capsules(x_test[:100], y_test[:100])\n# reconstructed_image = reconstructor.predict(reconstructed_image)\n# reconstructed_image = reconstructed_image.reshape([-1, 28, 28])\n#\n# index = 1\n# plt.imshow(reconstructed_image[index])\n# plt.show()\n#\n# plt.imshow(x_test[index])\n# plt.show()\n\n\n\n", "sub_path": "MNIST/reconstruction.py", "file_name": "reconstruction.py", "file_ext": "py", "file_size_in_byte": 2815, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "tensorflow.keras", "line_number": 10, "usage_type": "attribute"}, {"api_name": "capsnet.CapsNet", "line_number": 19, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 20, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 21, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 22, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 22, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 23, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 24, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Checkpoint", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tensorflow.train.CheckpointManager", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tensorflow.math.sqrt", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.math", "line_number": 38, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_sum", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.math.top_k", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.math", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}]} +{"seq_id": "169140941", "text": "import numpy as np\nimport cv2\n\ncanvas = np.zeros((300, 300, 3), dtype = 'uint8') # 500 rows, 700 columns\n(centerX, centerY) = (canvas.shape[1] // 2, canvas.shape[0] // 2) # centerX, center Y is x, y coordinate \nwhite = (255, 255, 255)\n\nfor r in range(0, 175, 25):\n cv2.circle(canvas, (centerX, centerY), r, white)\n\ncv2.imshow('circle', canvas)\n\nfor i in range(0, 25):\n radius = np.random.randint(5, high = 200)\n color = np.random.randint(0, high = 256, size = (3,)).tolist() # list color 3-dimensional\n pt = np.random.randint(0, high = 300, size = (2,)) \n cv2.circle(canvas, tuple(pt), radius, color, -1)\n\ncv2.imshow('many circle', canvas)\n\ncv2.waitKey(0)\n", "sub_path": "Day2/drawCircle.py", "file_name": "drawCircle.py", "file_ext": "py", "file_size_in_byte": 662, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "numpy.zeros", "line_number": 4, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 15, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "116945761", "text": "import numpy as np\n\nimport torch\nimport torch.nn as nn\nimport logging\n\nfrom bnnbench.models.mlp import MLP\nfrom bnnbench.models.auxiliary_funcs import evaluate_rmse_ll\nfrom bnnbench.utils.normalization import zero_mean_unit_var_normalization, zero_mean_unit_var_denormalization\nfrom bnnbench.config import globalConfig\nfrom functools import partial\nfrom collections import OrderedDict, namedtuple\nfrom ConfigSpace import ConfigurationSpace, Configuration, UniformFloatHyperparameter, UniformIntegerHyperparameter\nfrom scipy.stats import norm\n\n# TODO: Switch to globalConfig, if needed\n\nlogger = logging.getLogger(__name__)\n\n\nclass MCBatchNorm(MLP):\n r\"\"\"\n Extends the MLP model by adding a Batch Normalization layer after each fully connected layer, and generates the\n predictive mean as well as variance as model output.\n \"\"\"\n # Add any new parameters needed exclusively by this model here\n __modelParamsDefaultDict = {\n \"learn_affines\": True,\n \"running_stats\": True,\n \"bn_momentum\": 0.1,\n \"precision\": 0.1\n }\n __modelParams = namedtuple(\"mcbatchnormModelParams\", __modelParamsDefaultDict.keys(),\n defaults=__modelParamsDefaultDict.values())\n\n # Combine the parameters used by this model with those of the MLP Model\n modelParamsContainer = namedtuple(\n \"allModelParams\",\n tuple(__modelParams._fields_defaults.keys()) + tuple(MLP.modelParamsContainer._fields_defaults.keys()),\n defaults=tuple(__modelParams._fields_defaults.values()) +\n tuple(MLP.modelParamsContainer._fields_defaults.values())\n )\n\n # Create a record of all default parameter values used to run this model, including the Base Model parameters\n _default_model_params = modelParamsContainer()\n\n def __init__(self,\n learn_affines=_default_model_params.learn_affines,\n running_stats=_default_model_params.running_stats,\n bn_momentum=_default_model_params.bn_momentum,\n precision=_default_model_params.precision, **kwargs):\n r\"\"\"\n Bayesian Optimizer that uses a Multi-Layer Perceptron Neural Network with MC-BatchNorm.\n\n Parameters\n ----------\n learn_affines: bool\n Whether or not to make the affine transformation parameters of batch normalization learnable. True by\n default.\n running_stats: bool\n Toggle tracking running stats across batches in BatchNorm layers. True by default.\n bn_momentum: float\n Momentum value used by regular Batch Normalization for tracking running mean and std of batches. Set to 0\n to use simple mean and std instead of exponential. Default is 0.1.\n kwargs: dict\n Other model parameters for MLP.\n \"\"\"\n super(MCBatchNorm, self).__init__(**kwargs)\n self.learn_affines = learn_affines\n self.running_stats = running_stats\n self.bn_momentum = bn_momentum\n self.precision = precision\n logger.info(\"Intialized MC-BatchNorm model.\")\n logger.debug(\"Intialized MC-BatchNorm model parameters:\\n%s\" % str(self.model_params))\n\n def _generate_network(self):\n logger.debug(\"Generating NN for MCBatchNorm using parameters:\\nTrack running stats: %s\"\n \"\\nMomentum: %s\\nlearn affines: %s\" % (self.running_stats, self.bn_momentum, self.learn_affines)\n )\n\n input_dims = self.input_dims\n output_dims = self.output_dims\n n_units = self.hidden_layer_sizes\n layers = []\n self.batchnorm_layers = []\n\n layer_gen = MLP.mlplayergen(\n layer_size=n_units,\n input_dims=input_dims,\n output_dims=None # Don't generate the output layer yet\n )\n\n bnlayer = partial(\n nn.BatchNorm1d,\n eps=1e-5,\n momentum=self.bn_momentum,\n affine=self.learn_affines,\n track_running_stats=self.running_stats\n )\n\n for layer_idx, fclayer in enumerate(layer_gen, start=1):\n layers.append((f\"FC{layer_idx}\", fclayer))\n self.batchnorm_layers.append(bnlayer(num_features=fclayer.out_features))\n layers.append((f\"BatchNorm{layer_idx}\", self.batchnorm_layers[-1]))\n # layers.append((f\"Tanh{layer_idx}\", nn.Tanh()))\n layers.append((f\"ReLU{layer_idx}\", nn.ReLU()))\n\n layers.append((\"Output\", nn.Linear(n_units[-1], output_dims)))\n self.network = nn.Sequential(OrderedDict(layers))\n\n logger.info(\"Generated network for MC-BatchNorm.\")\n # print(f\"Modules in MCBatchNorm are {[name for name, _ in self.network.named_children()]}\")\n\n # Pre-training procedures same as for MLP, uses a common weight decay parameter for all layers.\n\n def fit(self, X, y):\n \"\"\"\n Fits this model to the given data and returns the corresponding optimum hyperparameter configuration, final\n validation loss and hyperparameter fitting history (returns None if self.optimize_hypers is False).\n Generates a validation set and generates num_confs hyperparameter configurations that are validated against\n validation loss on small training samples to choose the optimal configuration for full network training.\n Note: Completely overrides the fit() method of MLP on account of limitations in the ConfigSpace package.\n\n :param X: Features.\n :param y: Regression targets.\n :return: tuple (optimal configuration, final validation loss, history)\n \"\"\"\n\n # TODO: Update modelParams property to synchronize it with ConfigSpace, thus allowing MLP.fit() to be re-used as\n # well as extending the functionality of the model to generic hyperparameter optimizers.\n\n logger.info(\"Fitting MC-BatchNorm model to the given data.\")\n\n history = None\n\n if self.optimize_hypers:\n logger.debug(\"Performing internal hyper-parameter optimization of MC-BatchNorm Model.\")\n from sklearn.model_selection import train_test_split\n from math import log10, floor\n cs = ConfigurationSpace(name=\"PyBNN MC-BatchNorm Benchmark\", seed=self.rng.randint(0, 1_000_000_000))\n # TODO: Compare UniformFloat vs Categorical (the way Gal has implemented it)\n\n inv_var_y = 1. / np.var(y) # Assume y is 1-D\n tau_range_lower = int(floor(log10(inv_var_y * 0.5))) - 1\n tau_range_upper = int(floor(log10(inv_var_y * 2))) + 1\n cs.add_hyperparameter(UniformIntegerHyperparameter(name=\"batch_size\", lower=5, upper=10))\n cs.add_hyperparameter(UniformIntegerHyperparameter(name=\"weight_decay\", lower=-15, upper=-1))\n # cs.add_hyperparameter(UniformIntegerHyperparameter(name=\"num_epochs\", lower=5, upper=20))\n cs.add_hyperparameter(UniformFloatHyperparameter(name=\"precision\", lower=10 ** tau_range_lower,\n upper=10 ** tau_range_upper))\n confs = cs.sample_configuration(self.num_confs)\n logger.debug(\"Generated %d random configurations.\" % self.num_confs)\n\n Xtrain, Xval, ytrain, yval = train_test_split(X, y, train_size=0.8, shuffle=True, random_state=self.rng)\n logger.debug(\"Generated validation set.\")\n\n optim = None\n history = []\n old_tblog_flag = globalConfig.tblog\n globalConfig.tblog = False # Disable Tensorboard logging if it was on since it's not needed here.\n\n for idx, conf in enumerate(confs):\n logger.debug(\"Training configuration #%d\" % (idx + 1))\n logger.debug(\"Sampled configuration %s\" % conf)\n\n new_model = MCBatchNorm()\n new_model.model_params = self.model_params._replace(**{\n \"batch_size\": 2 ** conf.get(\"batch_size\"),\n \"weight_decay\": 10 ** conf.get(\"weight_decay\"),\n # \"num_epochs\": 100 * conf.get(\"num_epochs\"),\n \"num_epochs\": self.num_epochs // 10,\n \"precision\": conf.get(\"precision\")\n })\n new_model.preprocess_training_data(Xtrain, ytrain)\n new_model.train_network()\n logger.debug(\"Finished training sample network.\")\n\n # Set validation loss to mean NLL\n valid_loss = -new_model.evaluate(X_test=Xval, y_test=yval, nsamples=500)[\"LogLikelihood\"]\n logger.debug(\"Generated validation loss %f\" % valid_loss)\n\n res = (valid_loss, conf)\n\n if optim is None or valid_loss < optim[0]:\n optim = res\n logger.debug(\"Updated validation loss %f, optimum configuration to %s\" % optim)\n\n history.append(res)\n del new_model # Conserve memory\n\n logger.info(\"Training final model using optimal configuration %s\\n\" % optim[1])\n globalConfig.tblog = old_tblog_flag\n\n self.model_params = self.model_params._replace(**{\n \"batch_size\": 2 ** optim[1].get(\"batch_size\"),\n \"weight_decay\": 10 ** optim[1].get(\"weight_decay\"),\n # \"num_epochs\": 100 * optim[1].get(\"num_epochs\"),\n \"precision\": optim[1].get(\"precision\")\n })\n\n self.preprocess_training_data(X, y)\n self.train_network()\n\n # TODO: Integrate saving model parameters file here?\n # if globalConfig.save_model:\n # self.save_network()\n\n # return results, history\n return history\n\n def _predict_mc(self, X_test, nsamples=500):\n r\"\"\"\n Performs nsamples forward passes on the given data and returns the results.\n\n Parameters\n ----------\n X_test: np.ndarray (N, D)\n N input test points\n\n nsamples: int\n Number of samples to generate for each test point\n\n Returns\n ----------\n np.array(N,)\n predictive mean\n np.array(N,)\n predictive variance\n\n \"\"\"\n\n logger.debug(f\"Running predict_mc on input with shape {X_test.shape}, using {nsamples} samples.\")\n # Normalize inputs\n if self.normalize_input:\n X_, _, _ = zero_mean_unit_var_normalization(X_test, self.X_mean, self.X_std)\n else:\n X_ = X_test\n\n # Sample a number of predictions for each given point\n # Generate mean and variance for each given point from sampled predictions\n\n X_ = torch.Tensor(X_)\n Yt_hat = []\n\n # We want to generate 'nsamples' minibatches\n for ctr in range(nsamples * self.batch_size // self.X.shape[0]):\n for batch_inputs, _ in self.iterate_minibatches(self.X, self.y, shuffle=True, as_tensor=True):\n # Reset all previous running statistics for all BatchNorm layers\n [layer.reset_running_stats() for layer in self.batchnorm_layers]\n\n # Perform a forward pass on one mini-batch in training mode in order to update running statistics with\n # only one mini-batch's mean and variance\n self.network.train()\n _ = self.network(batch_inputs)\n\n # Switch to evaluation mode and perform a forward pass on the points to be evaluated, which will use\n # the running statistics to perform batch normalization\n self.network.eval()\n Yt_hat.append(self.network(X_).data.cpu().numpy())\n\n logger.debug(\"Generated outputs list of length %d\" % (len(Yt_hat)))\n\n if self.normalize_output:\n from functools import partial\n denorm = partial(zero_mean_unit_var_denormalization, mean=self.y_mean, std=self.y_std)\n Yt_hat = np.array(list(map(denorm, Yt_hat)))\n else:\n Yt_hat = np.array(Yt_hat)\n\n logger.debug(\"Generated final outputs array of shape %s\" % str(Yt_hat.shape))\n\n return Yt_hat\n\n def predict(self, X_test, nsamples=500):\n \"\"\"\n Given a set of input data features and the number of samples, returns the corresponding predictive means and\n variances.\n :param X_test: Union[numpy.ndarray, torch.Tensor]\n The input feature points of shape [N, d] where N is the number of data points and d is the number of\n features.\n :param nsamples: int\n The number of stochastic forward passes to sample on. Default 500.\n :return: means, variances\n Two NumPy arrays of shape [N, 1].\n \"\"\"\n mc_pred = self._predict_mc(X_test=X_test, nsamples=nsamples)\n mean = np.mean(mc_pred, axis=0)\n var = (1 / self.precision) + np.var(mc_pred, axis=0)\n if mean.ndim == 1:\n mean = mean[:, np.newaxis]\n if var.ndim == 1:\n var = var[:, np.newaxis]\n return mean, var\n\n def evaluate(self, X_test, y_test, nsamples=500):\n \"\"\"\n Evaluates the trained model on the given test data, returning the results of the analysis as the RMSE and\n Log-Likelihood of the MC-BatchNorm prediction.\n :param X_test: (N, d)\n Array of input features.\n :param y_test: (N, 1)\n Array of expected output values.\n :param nsamples: int\n Number of stochastic forward passes to use for generating the model predictions.\n :return: mc_rmse, log_likelihood\n \"\"\"\n\n return evaluate_rmse_ll(model_obj=self, X_test=X_test, y_test=y_test, nsamples=nsamples)\n", "sub_path": "bnnbench/models/mcbatchnorm.py", "file_name": "mcbatchnorm.py", "file_ext": "py", "file_size_in_byte": 13629, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "logging.getLogger", "line_number": 18, "usage_type": "call"}, {"api_name": "bnnbench.models.mlp.MLP", "line_number": 21, "usage_type": "name"}, {"api_name": "collections.namedtuple", "line_number": 33, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 37, "usage_type": "call"}, {"api_name": "bnnbench.models.mlp.MLP.modelParamsContainer._fields_defaults.keys", "line_number": 39, "usage_type": "call"}, {"api_name": "bnnbench.models.mlp.MLP.modelParamsContainer", "line_number": 39, "usage_type": "attribute"}, {"api_name": "bnnbench.models.mlp.MLP", "line_number": 39, "usage_type": "name"}, {"api_name": "bnnbench.models.mlp.MLP.modelParamsContainer._fields_defaults.values", "line_number": 41, "usage_type": "call"}, {"api_name": "bnnbench.models.mlp.MLP.modelParamsContainer", "line_number": 41, "usage_type": "attribute"}, {"api_name": "bnnbench.models.mlp.MLP", "line_number": 41, "usage_type": "name"}, {"api_name": "bnnbench.models.mlp.MLP.mlplayergen", "line_number": 87, "usage_type": "call"}, {"api_name": "bnnbench.models.mlp.MLP", "line_number": 87, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 94, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 94, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 106, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 108, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 109, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 109, "usage_type": "call"}, {"api_name": "ConfigSpace.ConfigurationSpace", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 143, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 144, "usage_type": "call"}, {"api_name": "math.log10", "line_number": 144, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 145, "usage_type": "call"}, {"api_name": "math.log10", "line_number": 145, "usage_type": "call"}, {"api_name": "ConfigSpace.UniformIntegerHyperparameter", "line_number": 146, "usage_type": "call"}, {"api_name": "ConfigSpace.UniformIntegerHyperparameter", "line_number": 147, "usage_type": "call"}, {"api_name": "ConfigSpace.UniformFloatHyperparameter", "line_number": 149, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 154, "usage_type": "call"}, {"api_name": "bnnbench.config.globalConfig.tblog", "line_number": 159, "usage_type": "attribute"}, {"api_name": "bnnbench.config.globalConfig", "line_number": 159, "usage_type": "name"}, {"api_name": "bnnbench.config.globalConfig.tblog", "line_number": 160, "usage_type": "attribute"}, {"api_name": "bnnbench.config.globalConfig", "line_number": 160, "usage_type": "name"}, {"api_name": "{'train_test_split': 'sklearn.model_selection.train_test_split', 'log10': 'math.log10', 'floor': 'math.floor'}", "line_number": 166, "usage_type": "call"}, {"api_name": "bnnbench.config.globalConfig.tblog", "line_number": 192, "usage_type": "attribute"}, {"api_name": "bnnbench.config.globalConfig", "line_number": 192, "usage_type": "name"}, {"api_name": "bnnbench.utils.normalization.zero_mean_unit_var_normalization", "line_number": 235, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 242, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 265, "usage_type": "call"}, {"api_name": "bnnbench.utils.normalization.zero_mean_unit_var_denormalization", "line_number": 265, "usage_type": "argument"}, {"api_name": "numpy.array", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 268, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 290, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 292, "usage_type": "attribute"}, {"api_name": "bnnbench.models.auxiliary_funcs.evaluate_rmse_ll", "line_number": 308, "usage_type": "call"}]} +{"seq_id": "178330408", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport asyncio\nimport os\nfrom azure.eventhub.aio import EventHubConsumerClient\nfrom influxdb import InfluxDBClient\nfrom collections.abc import Iterable\n\nimport json\nimport logging\nimport time\nimport pynmea2\nfrom datetime import datetime\n\nFORMAT = '%(asctime)s - %(levelname)s - %(message)s'\nlogging.basicConfig(level=logging.INFO, format=FORMAT)\nlogger = logging.getLogger(__name__)\n\nDB_NAME = os.getenv('INFLUXDB_DATABASE', 'mydb')\n\nlogging.info(\"Using database: '{0}'\".format(DB_NAME))\n\ninfluxdb_client = InfluxDBClient(os.getenv('INFLUXDB_HOSTNAME', 'influxdb'),\n os.getenv('INFLUXDB_PORT', '8086'),\n os.getenv('INFLUXDB_USER'),\n os.getenv('INFLUXDB_PASSWORD'),\n DB_NAME)\n\n\ndef connect_influxdb():\n while True:\n try:\n dbs = influxdb_client.get_list_database()\n if DB_NAME not in dbs:\n influxdb_client.create_database(DB_NAME)\n except Exception:\n logger.exception(\"Error connecting to InfluxDB. Retrying in 30sec\")\n time.sleep(30)\n continue\n else:\n logging.info(\"connected to influxdb\")\n break\n\n\ndef write_influxdb(payload):\n while True:\n try:\n influxdb_client.write_points(payload)\n except Exception:\n logger.exception(\"Error writing to InfluxDB. Retrying in 30sec\")\n time.sleep(30)\n continue\n else:\n break\n\ndef decode_gps(nmea_str):\n res = {}\n msg = pynmea2.parse(nmea_str)\n\n res['timestamp'] = str(msg.timestamp)\n res['lat'] = msg.lat\n res['latitude'] = msg.latitude\n res['lat_dir'] = msg.lat_dir\n res['lon'] = msg.lon\n res['longitude'] = msg.longitude\n res['lon_dir'] = msg.lon_dir\n res['gps_qual'] = msg.gps_qual\n res['num_sats'] = msg.num_sats\n res['horizontal_dil'] = msg.horizontal_dil\n res['altitude'] = msg.altitude\n res['altitude_units'] = msg.altitude_units\n res['geo_sep'] = msg.geo_sep\n res['geo_sep_units'] = msg.geo_sep_units\n res['age_gps_data'] = msg.age_gps_data\n res['ref_station_id'] = msg.ref_station_id\n\n return res\n\ndef add_field_value(fields, data):\n switcher = {\n 'float': float,\n 'int': int,\n 'str': str,\n 'nmea': decode_gps\n }\n \n if 'GPS' in data:\n add_field_value(fields, data['GPS'])\n return\n elif 'RSRP' in data:\n add_field_value(fields, data['RSRP'])\n return\n\n func = switcher.get(data['type'], lambda: str)\n res = func(data['value'])\n\n if isinstance(res, dict):\n for field in res:\n fields[field] = res[field]\n else:\n fields['value'] = res \n\ndef convert_to_influx_format(message):\n name = message.annotations[b'iothub-connection-device-id'].decode('ASCII') \n try:\n for jsonline in message.get_data():\n json_input = json.loads(jsonline)\n except json.decoder.JSONDecodeError:\n return\n\n if 'version' not in json_input:\n logging.warning('Ignoring event in unknown format')\n return\n\n if json_input['version'] != '0.0.3':\n logging.warning('Ignoring event wrong version')\n return\n\n if 'time' in json_input:\n time = datetime.fromtimestamp(int(json_input['time']))\n else:\n time = datetime.utcfromtimestamp(float(message.annotations[b'iothub-enqueuedtime'])/1000.)\n\n measurement = json_input['measurement']\n data = json_input['fields']\n\n tags = {'device': name}\n if isinstance(message.application_properties, Iterable):\n for tag in message.application_properties:\n tags[tag.decode('ASCII')] = message.application_properties[tag].decode('ASCII')\n\n fields = {}\n\n add_field_value(fields, data)\n\n if fields == {}:\n logging.warning('Ignoring event as it contains no readable fields')\n return\n\n json_body = [\n {\n 'measurement': measurement, \n 'tags': tags,\n 'time': time, \n 'fields': fields\n }\n ]\n\n return json_body\n\n\nasync def on_event(partition_context, event):\n # Put your code here.\n # If the operation is i/o intensive, async will have better performance.\n #print(\"Received event from partition: {}.\".format(partition_context.partition_id))\n logging.info(\"Event received: '{0}'\".format(event.message))\n\n try:\n payload = convert_to_influx_format(event.message)\n\n if payload is not None:\n logging.info(\"Write points: {0}\".format(payload))\n write_influxdb(payload)\n await partition_context.update_checkpoint(event)\n except:\n logging.exception(\"Failed to handle event\")\n\nasync def on_partition_initialize(partition_context):\n # Put your code here.\n logging.info(\"Partition: {} has been initialized.\".format(\n partition_context.partition_id))\n\n\nasync def on_partition_close(partition_context, reason):\n # Put your code here.\n logging.info(\"Partition: {} has been closed, reason for closing: {}.\".format(\n partition_context.partition_id,\n reason\n ))\n\n\nasync def on_error(partition_context, error):\n # Put your code here. partition_context can be None in the on_error callback.\n if partition_context:\n logging.error(\"An exception: {} occurred during receiving from Partition: {}.\".format(\n partition_context.partition_id,\n error\n ))\n else:\n logging.error(\n \"An exception: {} occurred during the load balance process.\".format(error))\n\n\nasync def main():\n connect_influxdb()\n\n client = EventHubConsumerClient.from_connection_string(\n conn_str=os.getenv('IOTHUB_CONNECTION_STRING'),\n consumer_group=\"$default\",\n eventhub_name=os.getenv('IOTHUB_EVENTHUB_NAME')\n )\n\n async with client:\n await client.receive(\n on_event=on_event,\n on_error=on_error,\n on_partition_close=on_partition_close,\n on_partition_initialize=on_partition_initialize,\n # \"-1\" is from the beginning of the partition. @latest is only new\n starting_position=\"@latest\",\n )\n\nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n", "sub_path": "readIotHubAmqpClient.py", "file_name": "readIotHubAmqpClient.py", "file_ext": "py", "file_size_in_byte": 6376, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "logging.basicConfig", "line_number": 17, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 17, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 18, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 20, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 22, "usage_type": "call"}, {"api_name": "influxdb.InfluxDBClient", "line_number": 24, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 24, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 25, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 26, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 27, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 39, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 42, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 52, "usage_type": "call"}, {"api_name": "pynmea2.parse", "line_number": 59, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 108, "usage_type": "call"}, {"api_name": "json.decoder", "line_number": 109, "usage_type": "attribute"}, {"api_name": "logging.warning", "line_number": 113, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 117, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 121, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 121, "usage_type": "name"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 123, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 123, "usage_type": "name"}, {"api_name": "collections.abc.Iterable", "line_number": 129, "usage_type": "argument"}, {"api_name": "logging.warning", "line_number": 138, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 157, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 163, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 167, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 171, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 177, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 186, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 191, "usage_type": "call"}, {"api_name": "azure.eventhub.aio.EventHubConsumerClient.from_connection_string", "line_number": 198, "usage_type": "call"}, {"api_name": "azure.eventhub.aio.EventHubConsumerClient", "line_number": 198, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 199, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 201, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 215, "usage_type": "call"}]} +{"seq_id": "507067918", "text": "# $Id: pyinit.py,v 1.2 2012/02/15 16:22:52 samn Exp $\n\nfrom neuron import h\nimport os\nimport sys\nimport datetime\nimport shutil\nimport pickle\nfrom math import sqrt, pi\nimport numpy\nimport types\n\nh(\"objref p\")\nh(\"p = new PythonObject()\")\n\ntry:\n import pylab\n from pylab import plot, arange, figure\n my_pylab_loaded = True\nexcept ImportError:\n print(\"Pylab not imported\")\n my_pylab_loaded = False\n\ndef htype (obj): st=obj.hname(); sv=st.split('['); return sv[0]\ndef secname (obj): obj.push(); print(h.secname()) ; h.pop_section()\ndef psection (obj): obj.push(); print(h.psection()) ; h.pop_section()\n\nallsecs=None #global list containing all NEURON sections, initialized via mkallsecs\n\n# still need to generate a full allsecs\ndef mkallsecs ():\n \"\"\" mkallsecs - make the global allsecs variable, containing\n all the NEURON sections.\n \"\"\"\n global allsecs\n allsecs=h.SectionList() # no .clear() command\n roots=h.SectionList()\n roots.allroots()\n for s in roots:\n s.push()\n allsecs.wholetree()\n return allsecs\n\n#forall syntax - c gets executed, allsecs has Sections\ndef forall (c):\n \"\"\" NEURON forall syntax - iterates through all the sections available\n note that there's a dummy loop variable called s used in this function,\n so any command that needs to access a section should be via s.\n example: forall('print s.name()') , will print all the section names.\n Also note that this function uses a global list, 'allsecs', which may\n need to get re-initialized when new sections are created, via the mkallsecs\n function above.\n \"\"\"\n global allsecs\n if (type(allsecs)==type(None)): mkallsecs()\n for s in allsecs: exec(c)\n\n#forsec syntax - executes command for each section who's name\n# contains secname as a substring\ndef forsec (secref=\"soma\",command=\"\"):\n \"\"\" NEURON forsec syntax - iterates over all sections which have a substring\n in their names matching secref argument. command is executed if match found.\n this function also utilizes the allsecs global variable.\n \"\"\"\n global allsecs\n if (type(allsecs)==type(None)): mkallsecs()\n if (type(secref)==types.StringTypes[0]):\n for s in allsecs:\n if s.name().count(secref) > 0:\n exec(command)\n else:\n for s in secref: exec(command)\n", "sub_path": "pyinit.py", "file_name": "pyinit.py", "file_ext": "py", "file_size_in_byte": 2364, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "neuron.h", "line_number": 13, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 14, "usage_type": "call"}, {"api_name": "neuron.h.secname", "line_number": 25, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 25, "usage_type": "name"}, {"api_name": "neuron.h.pop_section", "line_number": 25, "usage_type": "call"}, {"api_name": "neuron.h.psection", "line_number": 26, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 26, "usage_type": "name"}, {"api_name": "neuron.h.pop_section", "line_number": 26, "usage_type": "call"}, {"api_name": "neuron.h.SectionList", "line_number": 36, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 36, "usage_type": "name"}, {"api_name": "neuron.h.SectionList", "line_number": 37, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 37, "usage_type": "name"}, {"api_name": "types.StringTypes", "line_number": 67, "usage_type": "attribute"}]} +{"seq_id": "74513723", "text": "from elasticsearch_dsl import Q\n\n\ndef chain_search(search_obj,\n query=None,\n query_type_or_q='match',\n source_kwargs=None,\n sort_args=None,\n sub_searching_attr='',\n sub_query=None,\n init_res_num=300):\n\n if query is None:\n query = {}\n if sort_args is None:\n sort_args = {}\n if source_kwargs is None:\n source_kwargs = {}\n if sub_query is None:\n sub_query = {}\n\n main_search = search_obj.query(query_type_or_q, **query). \\\n source(**source_kwargs). \\\n sort(sort_args)\n\n if init_res_num:\n main_search = main_search[:init_res_num]\n\n if sub_searching_attr:\n main_search = main_search.execute()\n sub_search_res = []\n for hit in main_search.hits:\n ss_attr_value = getattr(hit, sub_searching_attr)\n if (not sub_query) and ss_attr_value.isdigit():\n cur_sub_query = {'filter': Q('term', **{sub_searching_attr: ss_attr_value})}\n else:\n cur_sub_query = sub_query\n\n if cur_sub_query:\n sub_search_res += chain_search(query=cur_sub_query,\n search_obj=search_obj,\n query_type_or_q='constant_score',\n init_res_num=None)\n\n return sub_search_res\n\n else:\n return main_search\n", "sub_path": "mesta/helpers_and_misc/tools/chain_search.py", "file_name": "chain_search.py", "file_ext": "py", "file_size_in_byte": 1543, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "elasticsearch_dsl.Q", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "346159536", "text": "import asyncio\r\nimport json\r\n\r\ndef order_read():\r\n\twith open(\"orders.json\") as json_file:\r\n\t\tdata = json.load(json_file)\r\n\t\torder_info = data\r\n\t\torderlegtype = order_info[\"orderLegCollection\"]\r\n\r\n\t\t# quantity = order_info[\"quantity\"]\r\n\t\tprice = order_info[\"price\"]\r\n\t\tsymbol = orderlegtype[0][\"instrument\"][\"underlyingSymbol\"]\r\n\t\t\r\n\t\t# Order instruction\r\n\t\tinstruction = orderlegtype[0][\"instruction\"]\r\n\t\tif instruction == \"BUY_TO_OPEN\":\r\n\t\t\tinstruction = \"BTO\"\r\n\t\telif instruction == \"SELL_TO_CLOSE\":\r\n\t\t\tinstruction = \"STC\"\r\n\r\n\t\traw_description = orderlegtype[0][\"instrument\"][\"description\"]\r\n\t\tdescription = raw_description.split()\r\n\t\t\r\n\t\t# Strike Price of contract\r\n\t\traw_strike = description[4]\r\n\t\tif str(raw_strike).endswith(\".0\"):\r\n\t\t\tstrike = raw_strike.split(\".\")\r\n\t\t\tstrike = strike[0]\r\n\t\telse:\r\n\t\t\tstrike = raw_strike\r\n\t\t\r\n\t\t# Option Call or Put\r\n\t\toption_type = description[5]\r\n\t\tif option_type == \"Call\":\r\n\t\t\toption_type = \"C\"\r\n\t\telif option_type == \"Put\":\r\n\t\t\toption_type = \"P\"\r\n\t\t\r\n\t\t# Expiration of Contract\r\n\t\texp_date = description[1] + description[2] + description[3]\r\n\t\t\r\n\t\t# Check to see if order was filled\r\n\t\tstatus = order_info[\"status\"]\r\n\t\t\r\n\t\t# Timestamp\r\n\t\traw_datetime = order_info[\"enteredTime\"].split(\"T\")\r\n\t\traw_time = raw_datetime[1].split(\":\")\r\n\t\thour = (int(raw_time[0]) - 8)\r\n\t\tif hour < 12:\r\n\t\t\ttime = str(str(hour) + ':' + raw_time[1] + \"am\" + \" PST\")\r\n\t\telif hour >= 12:\r\n\t\t\ttime = str(str(hour) + ':' + raw_time[1] + \"pm\" + \" PST\")\r\n\r\n\r\n\t\tif status == \"FILLED\":\r\n\t\t\t# For debugging only\r\n\t\t\t'''\r\n\t\t\tprint(f\"SYMBOL: {symbol}\")\r\n\t\t\tprint(f\"PRICE: {price}\")\r\n\t\t\tprint(instruction)\r\n\t\t\tprint(f\"{strike} STRIKE PRICE\")\r\n\t\t\tprint(f\"{option_type}\")\r\n\t\t\tprint(f\"EXP: {exp_date}\")\r\n\t\t\tprint(time)\r\n\t\t\t'''\r\n\r\n\t\t\t# So you know what was sent out\r\n\t\t\torder_template = str(instruction + \" \" + symbol + \" \" + exp_date + \" \" + strike + option_type + \" \" + \" @\" + str(price) + \" \" + time)\r\n\t\t\tprint(order_template)\r\n\r\n#order_read()", "sub_path": "TDAmeritrade Other Modules/orderreader.py", "file_name": "orderreader.py", "file_ext": "py", "file_size_in_byte": 1953, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "json.load", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "303555166", "text": "\"\"\"Module containing methods for parsing GEDCOM files.\n\"\"\"\n\n\nfrom __future__ import print_function, absolute_import, division\n\n__all__ = ['GedcomReader', 'ParserError', 'CodecError', 'IntegrityError',\n 'guess_codec']\n\nimport codecs\nimport collections\nimport io\nimport logging\nimport re\n\nfrom .detail.io import check_bom, guess_lineno\nfrom . import model\n\n_log = logging.getLogger(__name__)\n\n# records are bytes, regex is for bytes too\n_re_gedcom_line = re.compile(br\"\"\"\n ^\n [ ]*(?P\\d+) # integer level number\n (?:[ ]*(?P@[A-Z-a-z0-9][^@]*@))? # optional @xref@\n [ ]*(?P[A-Z-a-z0-9_]+) # tag name\n (?:[ ](?P.*))? # optional value\n $\n\"\"\", re.X)\n\n# tuple class for gedcom_line grammar:\n# level: int\n# xref_id: str, possibly empty or None\n# tag: str, required, non-empty\n# value: bytes, possibly empty or None\n# offset: int\ngedcom_line = collections.namedtuple(\"gedcom_line\",\n \"level xref_id tag value offset\")\n\n\nclass ParserError(Exception):\n \"\"\"Class for exceptions raised for parsing errors.\n \"\"\"\n pass\n\n\nclass IntegrityError(Exception):\n \"\"\"Class for exceptions raised for structural errors, e.g. when record\n level nesting is inconsistent.\n \"\"\"\n pass\n\n\nclass CodecError(ParserError):\n \"\"\"Class for exceptions raised for codec-related errors.\n \"\"\"\n pass\n\n\ndef guess_codec(file, errors=\"strict\", require_char=False):\n \"\"\"Look at file contents and guess its correct encoding.\n\n File must be open in binary mode and positioned at offset 0. If BOM\n record is present then it is assumed to be UTF-8 or UTF-16 encoded\n file. GEDCOM header is searched for CHAR record and encoding name\n is extracted from it, if BOM record is present then CHAR record\n must match BOM-defined encoding.\n\n :param file: File object, must be open in binary mode.\n :param str errors: Controls error handling behavior during string\n decoding, accepts same values as standard `codecs.decode` method.\n :param bool require_char: If True then exception is thrown if CHAR\n record is not found in a header, if False and CHAR is not in the\n header then codec determined from BOM or \"gedcom\" is returned.\n :returns: Tuple (codec_name, bom_size)\n :raises: :py:class:`CodecError` when codec name in file is unknown or\n when codec name in file contradicts codec determined from BOM.\n :raises: :py:class:`UnicodeDecodeError` when codec fails to decode\n input lines and `errors` is set to \"strict\" (default).\n \"\"\"\n\n # mapping of gedcom character set specifiers to Python encoding names\n gedcom_char_to_codec = {\n 'ansel': 'gedcom',\n }\n\n # check BOM first\n bom_codec = check_bom(file)\n bom_size = file.tell()\n codec = bom_codec or 'gedcom'\n\n # scan header until CHAR or end of header\n while True:\n\n # this stops at '\\n'\n line = file.readline()\n if not line:\n raise IOError(\"Unexpected EOF while reading GEDCOM header\")\n\n # do not decode bytes to strings here, reason is that some\n # stupid apps split CONC record at byte level (in middle of\n # of multi-byte characters). This implies that we can only\n # work with encodings that have ASCII as single-byte subset.\n\n line = line.lstrip().rstrip(b\"\\r\\n\")\n words = line.split()\n\n if len(words) >= 2 and words[0] == b\"0\" and words[1] != b\"HEAD\":\n # past header but have not seen CHAR\n if require_char:\n raise CodecError(\"GEDCOM header does not have CHAR record\")\n else:\n break\n elif len(words) >= 3 and words[0] == b\"1\" and words[1] == b\"CHAR\":\n try:\n encoding = words[2].decode(codec, errors)\n encoding = gedcom_char_to_codec.get(encoding.lower(),\n encoding.lower())\n new_codec = codecs.lookup(encoding).name\n except LookupError:\n raise CodecError(\"Unknown codec name {0}\".format(encoding))\n if bom_codec is None:\n codec = new_codec\n elif new_codec != bom_codec:\n raise CodecError(\"CHAR codec {0} is different from BOM \"\n \"codec {1}\".format(new_codec, bom_codec))\n break\n\n return codec, bom_size\n\n\nclass GedcomReader(object):\n \"\"\"Main interface for reading GEDCOM files.\n\n :param file: File name or file object open in binary mode, file must\n be seekable.\n :param str encoding: If None (default) then file is analyzed using\n `guess_codec()` method to determine correct codec. Otherwise\n file is open using specified codec.\n :param str errors: Controls error handling behavior during string\n decoding, accepts same values as standard `codecs.decode` method.\n :param bool require_char: If True then exception is thrown if CHAR\n record is not found in a header, if False and CHAR is not in the\n header then codec determined from BOM or \"gedcom\" is used.\n \"\"\"\n\n def __init__(self, file, encoding=None, errors=\"strict\",\n require_char=False):\n self._encoding = encoding\n self._errors = errors\n self._bom_size = 0\n self._index0 = None # list of level=0 record positions\n self._xref0 = None # maps xref_id to level=0 record position\n self._header = None\n self._dialect = None\n\n # open the file\n if hasattr(file, 'read'):\n # assume it is a file already\n if hasattr(file, 'seekable'):\n # check that it supports seek()\n if not file.seekable():\n raise IOError(\"Input file does not support seek.\")\n self._file = file\n else:\n raw = io.FileIO(file)\n self._file = io.BufferedReader(raw)\n\n # check codec and BOM\n try:\n encoding, self._bom_size = guess_codec(self._file,\n errors=self._errors,\n require_char=require_char)\n except Exception:\n self._file.close()\n raise\n self._file.seek(self._bom_size)\n if not self._encoding:\n self._encoding = encoding\n\n @property\n def index0(self):\n \"\"\"List of level=0 record positions and tag names.\n \"\"\"\n if self._index0 is None:\n self._init_index()\n return self._index0\n\n @property\n def xref0(self):\n \"\"\"Dictionary which maps xref_id to level=0 record position and\n tag name.\n \"\"\"\n if self._xref0 is None:\n self._init_index()\n return self._xref0\n\n @property\n def header(self):\n \"\"\"Header record.\n \"\"\"\n if self._index0 is None:\n self._init_index()\n return self._header\n\n def _init_index(self):\n _log.debug(\"in _init_index\")\n self._index0 = []\n self._xref0 = {}\n # scan whole file for level=0 records\n for gline in self.gedcom_lines(self._bom_size):\n _log.debug(\" _init_index gline: %s\", gline)\n if gline.level == 0:\n self._index0.append((gline.offset, gline.tag))\n if gline.xref_id:\n self._xref0[gline.xref_id] = (gline.offset, gline.tag)\n _log.debug(\" _init_index gline: done proc\")\n if self._index0 and self._index0[0][1] == 'HEAD':\n self._header = self.read_record(self._index0[0][0])\n _log.debug(\"_init_index done\")\n\n @property\n def dialect(self):\n \"\"\"File dialect as one of model.DIALECT_* constants\n \"\"\"\n if self._dialect is None:\n self._dialect = model.DIALECT_DEFAULT\n if self.header:\n source = self.header.sub_tag(\"SOUR\")\n if source:\n if source.value == \"MYHERITAGE\":\n self._dialect = model.DIALECT_MYHERITAGE\n elif source.value == \"ALTREE\":\n self._dialect = model.DIALECT_ALTREE\n elif source.value == \"ANCESTRIS\":\n self._dialect = model.DIALECT_ANCESTRIS\n return self._dialect\n\n @dialect.setter\n def dialect(self, value):\n self._dialect = value\n\n def gedcom_lines(self, offset):\n \"\"\"Generator method for *gedcom lines*.\n\n GEDCOM line grammar is defined in Chapter 1 of GEDCOM standard, it\n consists of the level number, optional reference ID, tag name, and\n optional value separated by spaces. Chaper 1 is pure grammar level,\n it does not assign any semantics to tags or levels. Consequently\n this method does not perform any operations on the lines other than\n returning the lines in their order in file.\n\n This method iterates over all lines in input file and converts each\n line into :py:class:`gedcom_line` class.\n\n :param int offset: Position in the file to start reading.\n :returns: Iterator for gedcom_lines.\n :raises: :py:class:`ParserError` when lines have incorrect syntax.\n \"\"\"\n\n self._file.seek(offset)\n\n prev_gline = None\n while True:\n\n offset = self._file.tell()\n line = self._file.readline() # stops at \\n\n if not line:\n break\n line = line.lstrip().rstrip(b\"\\r\\n\")\n\n match = _re_gedcom_line.match(line)\n if not match:\n self._file.seek(offset)\n lineno = guess_lineno(self._file)\n line = line.decode(self._encoding, \"ignore\")\n raise ParserError(\"Invalid syntax at line \"\n \"{0}: `{1}'\".format(lineno, line))\n\n level = int(match.group('level'))\n xref_id = match.group('xref')\n if xref_id:\n xref_id = xref_id.decode(self._encoding, self._errors)\n tag = match.group('tag').decode(self._encoding, self._errors)\n\n # simple structural integrity check\n if prev_gline is not None:\n if level - prev_gline.level > 1:\n # nested levels should be incremental (+1)\n self._file.seek(offset)\n lineno = guess_lineno(self._file)\n line = line.decode(self._encoding, \"ignore\")\n raise IntegrityError(\"Structural integrity - \"\n \"illegal level nesting at line \"\n \"{0}: `{1}'\".format(lineno, line))\n if tag in (\"CONT\", \"CONC\"):\n # CONT/CONC level must be +1 from preceding non-CONT/CONC\n # record or the same as preceding CONT/CONC record\n if ((prev_gline.tag in (\"CONT\", \"CONC\") and\n level != prev_gline.level) or\n (prev_gline.tag not in (\"CONT\", \"CONC\") and\n level - prev_gline.level != 1)):\n self._file.seek(offset)\n lineno = guess_lineno(self._file)\n line = line.decode(self._encoding, \"ignore\")\n raise IntegrityError(\"Structural integrity - illegal \"\n \"CONC/CONT nesting at line \"\n \"{0}: `{1}'\".format(lineno, line))\n\n gline = gedcom_line(level=level,\n xref_id=xref_id,\n tag=tag,\n value=match.group('value'),\n offset=offset)\n yield gline\n\n prev_gline = gline\n\n def records0(self, tag=None):\n \"\"\"Iterator over all level=0 records.\n\n :param str tag: If ``None`` is given (default) then return all level=0\n records, otherwise return level=0 records with the given tag.\n \"\"\"\n _log.debug(\"in records0\")\n for offset, xtag in self.index0:\n _log.debug(\" records0: offset: %s; xtag: %s\", offset, xtag)\n if tag is None or tag == xtag:\n yield self.read_record(offset)\n\n def read_record(self, offset):\n \"\"\"Read next complete record from a file starting at given position.\n\n Reads the record at given position and all its sub-records. Stops\n reading at EOF or next record with the same or higher (smaller) level\n number. File position after return from this method is not specified,\n re-position file if you want to read other records.\n\n :param int offset: Position in file to start reading from.\n :return: :py:class:`model.Record` instance or None if offset points\n past EOF.\n :raises: :py:exc:`ParserError` if `offsets` does not point to the\n beginning of a record or for any parsing errors.\n \"\"\"\n _log.debug(\"in read_record(%s)\", offset)\n stack = [] # stores per-level current records\n reclevel = None\n for gline in self.gedcom_lines(offset):\n _log.debug(\" read_record, gline: %s\", gline)\n level = gline.level\n\n if reclevel is None:\n # this is the first record, remember its level\n reclevel = level\n elif level <= reclevel:\n # stop at the record of the same or higher (smaller) level\n break\n\n # All previously seen records at this level and below can\n # be finalized now\n for rec in reversed(stack[level:]):\n # decode bytes value into string\n if rec:\n if rec.value is not None:\n rec.value = rec.value.decode(self._encoding,\n self._errors)\n rec.freeze()\n# _log.debug(\" read_record, rec: %s\", rec)\n del stack[level + 1:]\n\n # extend stack to fit this level (and make parent levels if needed)\n stack.extend([None] * (level + 1 - len(stack)))\n\n # make Record out of it (it can be updated later)\n parent = stack[level - 1] if level > 0 else None\n rec = self._make_record(parent, gline)\n\n # store as current record at this level\n stack[level] = rec\n\n for rec in reversed(stack[reclevel:]):\n if rec:\n if rec.value is not None:\n rec.value = rec.value.decode(self._encoding, self._errors)\n rec.freeze()\n _log.debug(\" read_record, rec: %s\", rec)\n\n return stack[reclevel] if stack else None\n\n def _make_record(self, parent, gline):\n \"\"\"Process next record.\n\n This method created new record from the line read from file if\n needed and/or updates its parent record. If the parent record tag\n is ``BLOB`` and new record tag is ``CONT`` then record is skipped\n entirely and None is returned. Otherwise if new record tag is ``CONT``\n or ``CONC`` its value is added to parent value. For all other tags\n new record is made and it is added to parent sub_records attribute.\n\n Parameters\n ----------\n parent : `model.Record`\n Parent record of the new record\n gline : `gedcom_line`\n Current parsed line\n\n Returns\n -------\n `model.Record` or None\n \"\"\"\n\n if parent and gline.tag in (\"CONT\", \"CONC\"):\n # concatenate, only for non-BLOBs\n if parent.tag != \"BLOB\":\n # have to be careful concatenating empty/None values\n value = gline.value\n if gline.tag == \"CONT\":\n value = b\"\\n\" + (value or b\"\")\n if value is not None:\n parent.value = (parent.value or b\"\") + value\n return None\n\n # avoid infinite cycle\n dialect = model.DIALECT_DEFAULT\n if not (gline.level == 0 and gline.tag == \"HEAD\") and self._header:\n dialect = self.dialect\n rec = model.make_record(level=gline.level, xref_id=gline.xref_id,\n tag=gline.tag, value=gline.value,\n sub_records=[], offset=gline.offset,\n dialect=dialect, parser=self)\n\n # add to parent's sub-records list\n if parent:\n parent.sub_records.append(rec)\n\n return rec\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self._file.close()\n", "sub_path": "ged4py/parser.py", "file_name": "parser.py", "file_ext": "py", "file_size_in_byte": 16925, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 22, "usage_type": "call"}, {"api_name": "re.X", "line_number": 29, "usage_type": "attribute"}, {"api_name": "collections.namedtuple", "line_number": 37, "usage_type": "call"}, {"api_name": "detail.io.check_bom", "line_number": 88, "usage_type": "call"}, {"api_name": "codecs.lookup", "line_number": 119, "usage_type": "call"}, {"api_name": "io.FileIO", "line_number": 166, "usage_type": "call"}, {"api_name": "io.BufferedReader", "line_number": 167, "usage_type": "call"}, {"api_name": "detail.io.guess_lineno", "line_number": 275, "usage_type": "call"}, {"api_name": "detail.io.guess_lineno", "line_number": 291, "usage_type": "call"}, {"api_name": "detail.io.guess_lineno", "line_number": 304, "usage_type": "call"}]} +{"seq_id": "263953012", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom multiprocess import Pool\nfrom time import sleep\n\"\"\"\n This is an example of process based parallelism.\n Note use of multiprocess from the Pathos library,\n not the more common multiprocessing library.\n\n https://pypi.org/project/pathos/\n\n Multiprocess uses dill for serialisation and\n supports class methods and other complexity.\n\"\"\"\n\ndef worker_main(work):\n sleep(5)\n\ndef main():\n workers = 5\n while True:\n try:\n # Worklist contains data to be distributed\n worklist = []\n # Launch workers\n process = Pool(workers)\n # Map data to worker_main function\n process.map(worker_main, worklist)\n # Block until all work completed\n process.close()\n process.join()\n except Exception as ex:\n print(str(ex))\n\nif __name__ == '__main__':\n main()\n", "sub_path": "multiproc.py", "file_name": "multiproc.py", "file_ext": "py", "file_size_in_byte": 921, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "time.sleep", "line_number": 17, "usage_type": "call"}, {"api_name": "multiprocess.Pool", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "537295358", "text": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport cairo\nimport contextlib\nimport ctypes\nimport threading\n\nimport gi\ngi.require_version('Gdk', '3.0')\ngi.require_version('GObject', '2.0')\ngi.require_version('Gst', '1.0')\ngi.require_version('GstBase', '1.0')\ngi.require_version('GstVideo', '1.0')\nfrom gi.repository import Gdk, GObject, Gst, GstBase, GstVideo\n\nGdk.init([])\n\n# Gst.Buffer.map(Gst.MapFlags.WRITE) is broken, this is a workaround. See\n# http://lifestyletransfer.com/how-to-make-gstreamer-buffer-writable-in-python/\n# https://gitlab.gnome.org/GNOME/gobject-introspection/issues/69\nclass GstMapInfo(ctypes.Structure):\n _fields_ = [('memory', ctypes.c_void_p), # GstMemory *memory\n ('flags', ctypes.c_int), # GstMapFlags flags\n ('data', ctypes.POINTER(ctypes.c_byte)), # guint8 *data\n ('size', ctypes.c_size_t), # gsize size\n ('maxsize', ctypes.c_size_t), # gsize maxsize\n ('user_data', ctypes.c_void_p * 4), # gpointer user_data[4]\n ('_gst_reserved', ctypes.c_void_p * 4)] # GST_PADDING\n\n# ctypes imports for missing or broken introspection APIs.\nlibgst = ctypes.CDLL('libgstreamer-1.0.so.0')\nlibgst.gst_context_writable_structure.restype = ctypes.c_void_p\nlibgst.gst_context_writable_structure.argtypes = [ctypes.c_void_p]\nlibgst.gst_structure_set.restype = ctypes.c_void_p\nlibgst.gst_structure_set.argtypes = [ctypes.c_void_p, ctypes.c_char_p,\n ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]\nGST_MAP_INFO_POINTER = ctypes.POINTER(GstMapInfo)\nlibgst.gst_buffer_map.argtypes = [ctypes.c_void_p, GST_MAP_INFO_POINTER, ctypes.c_int]\nlibgst.gst_buffer_map.restype = ctypes.c_int\nlibgst.gst_buffer_unmap.argtypes = [ctypes.c_void_p, GST_MAP_INFO_POINTER]\nlibgst.gst_buffer_unmap.restype = None\nlibgst.gst_mini_object_is_writable.argtypes = [ctypes.c_void_p]\nlibgst.gst_mini_object_is_writable.restype = ctypes.c_int\n\nlibgdk = ctypes.CDLL('libgdk-3.so.0')\nlibgdk.gdk_wayland_window_get_wl_surface.restype = ctypes.c_void_p\nlibgdk.gdk_wayland_window_get_wl_surface.argtypes = [ctypes.c_void_p]\nlibgdk.gdk_wayland_display_get_wl_display.restype = ctypes.c_void_p\nlibgdk.gdk_wayland_display_get_wl_display.argtypes = [ctypes.c_void_p]\n\nlibcairo = ctypes.CDLL('libcairo.so.2')\nlibcairo.cairo_image_surface_create_for_data.restype = ctypes.c_void_p\nlibcairo.cairo_image_surface_create_for_data.argtypes = [ctypes.c_void_p,\n ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int]\nlibcairo.cairo_surface_flush.restype = None\nlibcairo.cairo_surface_flush.argtypes = [ctypes.c_void_p]\nlibcairo.cairo_surface_destroy.restype = None\nlibcairo.cairo_surface_destroy.argtypes = [ctypes.c_void_p]\nlibcairo.cairo_format_stride_for_width.restype = ctypes.c_int\nlibcairo.cairo_format_stride_for_width.argtypes = [ctypes.c_int, ctypes.c_int]\nlibcairo.cairo_create.restype = ctypes.c_void_p\nlibcairo.cairo_create.argtypes = [ctypes.c_void_p]\nlibcairo.cairo_destroy.restype = None\nlibcairo.cairo_destroy.argtypes = [ctypes.c_void_p]\n\nlibrsvg = ctypes.CDLL('librsvg-2.so.2')\nlibrsvg.rsvg_handle_new_from_data.restype = ctypes.c_void_p\nlibrsvg.rsvg_handle_new_from_data.argtypes = [ctypes.c_char_p, ctypes.c_size_t, ctypes.c_void_p]\nlibrsvg.rsvg_handle_render_cairo.restype = ctypes.c_bool\nlibrsvg.rsvg_handle_render_cairo.argtypes = [ctypes.c_void_p, ctypes.c_void_p]\nlibrsvg.rsvg_handle_close.restype = ctypes.c_bool\nlibrsvg.rsvg_handle_close.argtypes = [ctypes.c_void_p, ctypes.c_void_p]\n\nlibgobject = ctypes.CDLL('libgobject-2.0.so.0')\nlibgobject.g_object_unref.restype = None\nlibgobject.g_object_unref.argtypes = [ctypes.c_void_p]\n\ndef set_display_contexts(sink, widget):\n handle = libgdk.gdk_wayland_window_get_wl_surface(hash(widget.get_window()))\n sink.set_window_handle(handle)\n\n wl_display = libgdk.gdk_wayland_display_get_wl_display(hash(Gdk.Display.get_default()))\n context = Gst.Context.new('GstWaylandDisplayHandleContextType', True)\n structure = libgst.gst_context_writable_structure(hash(context))\n libgst.gst_structure_set(structure, ctypes.c_char_p('display'.encode()),\n hash(GObject.TYPE_POINTER), wl_display, 0)\n sink.set_context(context)\n\n@contextlib.contextmanager\ndef _gst_buffer_map(buffer, flags):\n ptr = hash(buffer)\n if flags & Gst.MapFlags.WRITE and libgst.gst_mini_object_is_writable(ptr) == 0:\n raise ValueError('Buffer not writable')\n\n mapping = GstMapInfo()\n success = libgst.gst_buffer_map(ptr, mapping, flags)\n if not success:\n raise RuntimeError('gst_buffer_map failed')\n try:\n yield ctypes.cast(mapping.data, ctypes.POINTER(ctypes.c_byte * mapping.size)).contents\n finally:\n libgst.gst_buffer_unmap(ptr, mapping)\n\nclass OverlaySource(GstBase.BaseSrc):\n __gstmetadata__ = ('', '', '', '')\n __gsttemplates__ = (Gst.PadTemplate.new('src',\n Gst.PadDirection.SRC,\n Gst.PadPresence.ALWAYS,\n Gst.Caps.from_string(\n 'video/x-raw,format=BGRA,framerate=0/1'\n )))\n\n @staticmethod\n def _plugin_init(plugin):\n gtype = GObject.type_register(OverlaySource)\n Gst.Element.register(plugin, 'overlaysrc', 0, gtype)\n return True\n\n @staticmethod\n def plugin_register():\n version = Gst.version()\n Gst.Plugin.register_static(\n version[0], version[1], # GStreamer version\n '', # name\n '', # description\n OverlaySource._plugin_init, # init_func\n '', # version\n 'unknown', # license\n '', # source\n '', # package\n '' # origin\n )\n\n def __init__(self):\n GstBase.BaseSrc.__init__(self)\n self.set_format(Gst.Format.TIME)\n self.set_do_timestamp(False)\n self.set_live(True)\n self.cond = threading.Condition()\n self.width = 0\n self.height = 0\n self.min_stride = 0\n self.flushing = False\n self.eos = False\n self.svg = None\n self.pts = 0\n\n\n def do_decide_allocation(self, query):\n if query.get_n_allocation_pools() > 0:\n pool, size, min_buffers, max_buffers = query.parse_nth_allocation_pool(0)\n query.set_nth_allocation_pool(0, pool, size, min_buffers, min(max_buffers, 3))\n return GstBase.BaseSrc.do_decide_allocation(self, query)\n\n def do_event(self, event):\n if event.type == Gst.EventType.SEEK:\n _, _, flags, _, _, _, _ = event.parse_seek()\n if flags | Gst.SeekFlags.FLUSH:\n self.send_event(Gst.Event.new_flush_start())\n self.send_event(Gst.Event.new_flush_stop(True))\n return True\n return GstBase.BaseSrc.do_event(self, event)\n\n def set_eos(self):\n with self.cond:\n self.eos = True\n\n def do_start (self):\n self.set_svg(None, 0)\n return True\n\n def do_stop (self):\n self.set_svg(None, 0)\n return True\n\n def set_svg(self, svg, pts):\n with self.cond:\n self.svg = svg\n self.pts = pts\n self.eos = False\n self.cond.notify_all()\n\n def set_flushing(self, flushing):\n with self.cond:\n self.flushing = flushing\n self.cond.notify_all()\n\n def do_set_caps(self, caps):\n structure = caps.get_structure(0)\n self.width = structure.get_value('width')\n self.height = structure.get_value('height')\n self.min_stride = libcairo.cairo_format_stride_for_width(\n int(cairo.FORMAT_ARGB32), self.width)\n return True\n\n def do_unlock(self):\n self.set_flushing(True)\n return True\n\n def do_unlock_stop(self):\n self.set_flushing(False)\n return True\n\n def get_flow_return_locked(self, default=None):\n if self.eos:\n self.eos = False\n self.svg = None\n return Gst.FlowReturn.EOS\n if self.flushing:\n return Gst.FlowReturn.FLUSHING\n return default\n\n def do_fill(self, offset, size, buf):\n with self.cond:\n result = self.get_flow_return_locked()\n if result:\n return result\n\n while self.svg is None:\n self.cond.wait()\n result = self.get_flow_return_locked()\n if result:\n return result\n\n assert self.svg is not None\n svg = self.svg\n pts = self.pts\n self.svg = None\n\n # Note: Buffer IS writable (ref is 1 in native land). However gst-python\n # took an additional ref so it's now 2 and gst_buffer_is_writable\n # returns false. We can't modify the buffer without fiddling with refcount.\n assert buf.mini_object.refcount == 2\n buf.mini_object.refcount = 1\n try:\n self.render_svg(svg, buf)\n buf.pts = pts\n finally:\n buf.mini_object.refcount = 2\n\n with self.cond:\n return self.get_flow_return_locked(Gst.FlowReturn.OK)\n\n def render_svg(self, svg, buf):\n meta = GstVideo.buffer_get_video_meta(buf)\n if meta:\n assert meta.n_planes == 1\n assert meta.width == self.width\n assert meta.height == self.height\n assert meta.stride[0] >= self.min_stride\n stride = meta.stride[0]\n else:\n stride = self.min_stride\n\n with _gst_buffer_map(buf, Gst.MapFlags.WRITE) as mapped:\n assert len(mapped) >= stride * self.height\n\n # Fill with transparency.\n ctypes.memset(ctypes.addressof(mapped), 0, ctypes.sizeof(mapped))\n\n # If svg is '' (can't be None here) we return 100% transparency.\n if not svg:\n return\n\n surface = libcairo.cairo_image_surface_create_for_data(\n ctypes.addressof(mapped),\n int(cairo.FORMAT_ARGB32),\n self.width,\n self.height,\n stride)\n\n # Render the SVG overlay.\n data = svg.encode('utf-8')\n context = libcairo.cairo_create(surface)\n handle = librsvg.rsvg_handle_new_from_data(data, len(data), 0)\n librsvg.rsvg_handle_render_cairo(handle, context)\n librsvg.rsvg_handle_close(handle, 0)\n libgobject.g_object_unref(handle)\n libcairo.cairo_surface_flush(surface)\n libcairo.cairo_surface_destroy(surface)\n libcairo.cairo_destroy(context)\n\nOverlaySource.plugin_register()\n", "sub_path": "edgetpuvision/gst_native.py", "file_name": "gst_native.py", "file_ext": "py", "file_size_in_byte": 11593, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "gi.require_version", "line_number": 21, "usage_type": "call"}, {"api_name": "gi.require_version", "line_number": 22, "usage_type": "call"}, {"api_name": "gi.require_version", "line_number": 23, "usage_type": "call"}, {"api_name": "gi.require_version", "line_number": 24, "usage_type": "call"}, {"api_name": "gi.require_version", "line_number": 25, "usage_type": "call"}, {"api_name": "gi.repository.Gdk.init", "line_number": 28, "usage_type": "call"}, {"api_name": "gi.repository.Gdk", "line_number": 28, "usage_type": "name"}, {"api_name": "ctypes.Structure", "line_number": 33, "usage_type": "attribute"}, {"api_name": "ctypes.c_void_p", "line_number": 34, "usage_type": "attribute"}, {"api_name": "ctypes.c_int", "line_number": 35, "usage_type": "attribute"}, {"api_name": "ctypes.POINTER", "line_number": 36, "usage_type": "call"}, {"api_name": "ctypes.c_byte", "line_number": 36, "usage_type": "attribute"}, {"api_name": "ctypes.c_size_t", "line_number": 37, "usage_type": "attribute"}, {"api_name": "ctypes.c_size_t", "line_number": 38, "usage_type": "attribute"}, {"api_name": "ctypes.c_void_p", "line_number": 39, "usage_type": "attribute"}, {"api_name": "ctypes.c_void_p", "line_number": 40, "usage_type": "attribute"}, {"api_name": "ctypes.CDLL", "line_number": 43, "usage_type": "call"}, {"api_name": "ctypes.c_void_p", "line_number": 44, "usage_type": "attribute"}, {"api_name": "ctypes.c_void_p", "line_number": 45, "usage_type": "attribute"}, {"api_name": "ctypes.c_void_p", "line_number": 46, "usage_type": "attribute"}, {"api_name": "ctypes.c_void_p", "line_number": 47, "usage_type": "attribute"}, {"api_name": "ctypes.c_char_p", "line_number": 47, "usage_type": "attribute"}, {"api_name": "ctypes.c_int", "line_number": 48, "usage_type": "attribute"}, {"api_name": "ctypes.c_void_p", "line_number": 48, "usage_type": "attribute"}, {"api_name": "ctypes.POINTER", "line_number": 49, "usage_type": "call"}, {"api_name": "ctypes.c_void_p", "line_number": 50, "usage_type": "attribute"}, {"api_name": "ctypes.c_int", "line_number": 50, "usage_type": "attribute"}, {"api_name": "ctypes.c_int", "line_number": 51, "usage_type": "attribute"}, {"api_name": "ctypes.c_void_p", "line_number": 52, "usage_type": "attribute"}, {"api_name": "ctypes.c_void_p", "line_number": 54, "usage_type": "attribute"}, {"api_name": "ctypes.c_int", "line_number": 55, "usage_type": "attribute"}, {"api_name": "ctypes.CDLL", "line_number": 57, "usage_type": "call"}, {"api_name": "ctypes.c_void_p", "line_number": 58, "usage_type": "attribute"}, {"api_name": "ctypes.c_void_p", "line_number": 59, "usage_type": "attribute"}, {"api_name": "ctypes.c_void_p", "line_number": 60, "usage_type": "attribute"}, {"api_name": "ctypes.c_void_p", "line_number": 61, "usage_type": "attribute"}, {"api_name": "ctypes.CDLL", "line_number": 63, "usage_type": "call"}, {"api_name": "ctypes.c_void_p", "line_number": 64, "usage_type": "attribute"}, {"api_name": "ctypes.c_void_p", "line_number": 65, "usage_type": "attribute"}, {"api_name": "ctypes.c_int", "line_number": 66, "usage_type": "attribute"}, {"api_name": "ctypes.c_void_p", "line_number": 68, "usage_type": "attribute"}, {"api_name": "ctypes.c_void_p", "line_number": 70, "usage_type": "attribute"}, {"api_name": "ctypes.c_int", "line_number": 71, "usage_type": "attribute"}, {"api_name": "ctypes.c_int", "line_number": 72, "usage_type": "attribute"}, {"api_name": "ctypes.c_void_p", "line_number": 73, "usage_type": "attribute"}, {"api_name": "ctypes.c_void_p", "line_number": 74, "usage_type": "attribute"}, {"api_name": "ctypes.c_void_p", "line_number": 76, "usage_type": "attribute"}, {"api_name": "ctypes.CDLL", "line_number": 78, "usage_type": "call"}, {"api_name": "ctypes.c_void_p", "line_number": 79, "usage_type": "attribute"}, {"api_name": "ctypes.c_char_p", "line_number": 80, "usage_type": "attribute"}, {"api_name": "ctypes.c_size_t", "line_number": 80, "usage_type": "attribute"}, {"api_name": "ctypes.c_void_p", "line_number": 80, "usage_type": "attribute"}, {"api_name": "ctypes.c_bool", "line_number": 81, "usage_type": "attribute"}, {"api_name": "ctypes.c_void_p", "line_number": 82, "usage_type": "attribute"}, {"api_name": "ctypes.c_bool", "line_number": 83, "usage_type": "attribute"}, {"api_name": "ctypes.c_void_p", "line_number": 84, "usage_type": "attribute"}, {"api_name": "ctypes.CDLL", "line_number": 86, "usage_type": "call"}, {"api_name": "ctypes.c_void_p", "line_number": 88, "usage_type": "attribute"}, {"api_name": "gi.repository.Gdk.Display.get_default", "line_number": 94, "usage_type": "call"}, {"api_name": "gi.repository.Gdk.Display", "line_number": 94, "usage_type": "attribute"}, {"api_name": "gi.repository.Gdk", "line_number": 94, "usage_type": "name"}, {"api_name": "gi.repository.Gst.Context.new", "line_number": 95, "usage_type": "call"}, {"api_name": "gi.repository.Gst.Context", "line_number": 95, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst", "line_number": 95, "usage_type": "name"}, {"api_name": "ctypes.c_char_p", "line_number": 97, "usage_type": "call"}, {"api_name": "gi.repository.GObject.TYPE_POINTER", "line_number": 98, "usage_type": "attribute"}, {"api_name": "gi.repository.GObject", "line_number": 98, "usage_type": "name"}, {"api_name": "gi.repository.Gst.MapFlags", "line_number": 104, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst", "line_number": 104, "usage_type": "name"}, {"api_name": "ctypes.cast", "line_number": 112, "usage_type": "call"}, {"api_name": "ctypes.POINTER", "line_number": 112, "usage_type": "call"}, {"api_name": "ctypes.c_byte", "line_number": 112, "usage_type": "attribute"}, {"api_name": "contextlib.contextmanager", "line_number": 101, "usage_type": "attribute"}, {"api_name": "gi.repository.GstBase.BaseSrc", "line_number": 116, "usage_type": "attribute"}, {"api_name": "gi.repository.GstBase", "line_number": 116, "usage_type": "name"}, {"api_name": "gi.repository.Gst.PadTemplate.new", "line_number": 118, "usage_type": "call"}, {"api_name": "gi.repository.Gst.PadTemplate", "line_number": 118, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst", "line_number": 118, "usage_type": "name"}, {"api_name": "gi.repository.Gst.PadDirection", "line_number": 119, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst", "line_number": 119, "usage_type": "name"}, {"api_name": "gi.repository.Gst.PadPresence", "line_number": 120, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst", "line_number": 120, "usage_type": "name"}, {"api_name": "gi.repository.Gst.Caps.from_string", "line_number": 121, "usage_type": "call"}, {"api_name": "gi.repository.Gst.Caps", "line_number": 121, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst", "line_number": 121, "usage_type": "name"}, {"api_name": "gi.repository.GObject.type_register", "line_number": 127, "usage_type": "call"}, {"api_name": "gi.repository.GObject", "line_number": 127, "usage_type": "name"}, {"api_name": "gi.repository.Gst.Element.register", "line_number": 128, "usage_type": "call"}, {"api_name": "gi.repository.Gst.Element", "line_number": 128, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst", "line_number": 128, "usage_type": "name"}, {"api_name": "gi.repository.Gst.version", "line_number": 133, "usage_type": "call"}, {"api_name": "gi.repository.Gst", "line_number": 133, "usage_type": "name"}, {"api_name": "gi.repository.Gst.Plugin.register_static", "line_number": 134, "usage_type": "call"}, {"api_name": "gi.repository.Gst.Plugin", "line_number": 134, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst", "line_number": 134, "usage_type": "name"}, {"api_name": "gi.repository.GstBase.BaseSrc.__init__", "line_number": 147, "usage_type": "call"}, {"api_name": "gi.repository.GstBase.BaseSrc", "line_number": 147, "usage_type": "attribute"}, {"api_name": "gi.repository.GstBase", "line_number": 147, "usage_type": "name"}, {"api_name": "gi.repository.Gst.Format", "line_number": 148, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst", "line_number": 148, "usage_type": "name"}, {"api_name": "threading.Condition", "line_number": 151, "usage_type": "call"}, {"api_name": "gi.repository.GstBase.BaseSrc.do_decide_allocation", "line_number": 165, "usage_type": "call"}, {"api_name": "gi.repository.GstBase.BaseSrc", "line_number": 165, "usage_type": "attribute"}, {"api_name": "gi.repository.GstBase", "line_number": 165, "usage_type": "name"}, {"api_name": "gi.repository.Gst.EventType", "line_number": 168, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst", "line_number": 168, "usage_type": "name"}, {"api_name": "gi.repository.Gst.SeekFlags", "line_number": 170, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst", "line_number": 170, "usage_type": "name"}, {"api_name": "gi.repository.Gst.Event.new_flush_start", "line_number": 171, "usage_type": "call"}, {"api_name": "gi.repository.Gst.Event", "line_number": 171, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst", "line_number": 171, "usage_type": "name"}, {"api_name": "gi.repository.Gst.Event.new_flush_stop", "line_number": 172, "usage_type": "call"}, {"api_name": "gi.repository.Gst.Event", "line_number": 172, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst", "line_number": 172, "usage_type": "name"}, {"api_name": "gi.repository.GstBase.BaseSrc.do_event", "line_number": 174, "usage_type": "call"}, {"api_name": "gi.repository.GstBase.BaseSrc", "line_number": 174, "usage_type": "attribute"}, {"api_name": "gi.repository.GstBase", "line_number": 174, "usage_type": "name"}, {"api_name": "cairo.FORMAT_ARGB32", "line_number": 205, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst.FlowReturn", "line_number": 220, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst", "line_number": 220, "usage_type": "name"}, {"api_name": "gi.repository.Gst.FlowReturn", "line_number": 222, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst", "line_number": 222, "usage_type": "name"}, {"api_name": "gi.repository.Gst.FlowReturn", "line_number": 254, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst", "line_number": 254, "usage_type": "name"}, {"api_name": "gi.repository.GstVideo.buffer_get_video_meta", "line_number": 257, "usage_type": "call"}, {"api_name": "gi.repository.GstVideo", "line_number": 257, "usage_type": "name"}, {"api_name": "gi.repository.Gst.MapFlags", "line_number": 267, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst", "line_number": 267, "usage_type": "name"}, {"api_name": "ctypes.memset", "line_number": 271, "usage_type": "call"}, {"api_name": "ctypes.addressof", "line_number": 271, "usage_type": "call"}, {"api_name": "ctypes.sizeof", "line_number": 271, "usage_type": "call"}, {"api_name": "ctypes.addressof", "line_number": 278, "usage_type": "call"}, {"api_name": "cairo.FORMAT_ARGB32", "line_number": 279, "usage_type": "attribute"}]} +{"seq_id": "25403508", "text": "\n\"\"\"\nHao Shi 2019\nDe Vlaminck Lab\nCornell University\n\"\"\"\n\nimport os\nimport re\nimport sys\nimport glob\nimport joblib\nimport skimage\nimport argparse\nimport javabridge\nimport bioformats\nimport numpy as np\nimport pandas as pd\nimport skimage.filters\nfrom skimage import color\nfrom skimage import feature\nfrom skimage import restoration\nimport matplotlib.pyplot as plt\nfrom scipy import ndimage as ndi\nfrom sklearn.cluster import KMeans\n\n###############################################################################################################\n# HiPR-FISH : Image Analysis Pipeline\n###############################################################################################################\n\njavabridge.start_vm(class_path=bioformats.JARS)\n\ndef load_calibration_images(filename):\n calibration_image = np.load(filename)\n calibration_full = np.ones((calibration_image.shape[0], calibration_image.shape[1], 95))\n for i in range(32):\n calibration_full[:,:,i] = calibration_image\n return(calibration_full)\n\ndef correct_images(image, calibration_norm):\n image_ffc = image/calibration_norm\n return(image_ffc)\n\ndef segment_images(image_stack):\n image_channel_max = [np.max(image, axis = 2) for image in image_stack]\n shift_vectors = [skimage.feature.register_translation(image_channel_max[0], image_channel_max[i])[0] for i in range(1,len(image_stack))]\n shift_vectors.insert(0, np.asarray([0.0,0.0]))\n image_registered = [np.zeros(image.shape) for image in image_stack]\n shift_filter_mask = [np.full((image.shape[0], image.shape[1]), False, dtype = bool) for image in image_stack]\n image_shape = image_stack[0].shape[0]\n for i in range(len(image_stack)):\n shift_row = int(shift_vectors[i][0])\n shift_col = int(shift_vectors[i][1])\n if np.abs(shift_row) > 15:\n shift_row = 0\n if np.abs(shift_col) > 15:\n shift_col = 0\n print(shift_row, shift_col)\n original_row_min = int(np.maximum(0, shift_row))\n original_row_max = int(image_shape + np.minimum(0, shift_row))\n original_col_min = int(np.maximum(0, shift_col))\n original_col_max = int(image_shape + np.minimum(0, shift_col))\n registered_row_min = int(-np.minimum(0, shift_row))\n registered_row_max = int(image_shape - np.maximum(0, shift_row))\n registered_col_min = int(-np.minimum(0, shift_col))\n registered_col_max = int(image_shape - np.maximum(0, shift_col))\n image_registered[i][original_row_min: original_row_max, original_col_min: original_col_max, :] = image_stack[i][registered_row_min: registered_row_max, registered_col_min: registered_col_max, :]\n shift_filter_mask[i][original_row_min: original_row_max, original_col_min: original_col_max] = True\n shift_filter_mask_final = np.prod(shift_filter_mask, axis = 0)\n image_registered = np.dstack(image_registered)*shift_filter_mask_final[:,:,None]\n image_cn = np.sum(image_registered, axis = 2)\n image_cn = np.log(np.sum(image_registered, axis = 2)+1e-2)\n rough = KMeans(n_clusters = 2, random_state = 0).fit_predict(image_cn.reshape(image_cn.shape[0]*image_cn.shape[1],1))\n rough_seg = rough.reshape(image_cn.shape)\n image0 = image_cn*(rough_seg == 0)\n image1 = image_cn*(rough_seg == 1)\n i0 = np.average(image0[rough_seg == 0])\n i1 = np.average(image1[rough_seg == 1])\n if (i0 < i1):\n rough_seg_mask = (rough_seg == 1)\n bkg = (rough_seg == 0)\n else:\n rough_seg_mask = (rough_seg == 0)\n bkg = (rough_seg == 1)\n layers = KMeans(n_clusters = 3, random_state = 0).fit_predict(image_cn.reshape(image_cn.shape[0]*image_cn.shape[1],1))\n cell_interior = layers.reshape(image_cn.shape)\n cell_interior_int_0 = skimage.measure.regionprops((cell_interior == 0)*1, intensity_image = image_cn)\n cell_interior_int_1 = skimage.measure.regionprops((cell_interior == 1)*1, intensity_image = image_cn)\n cell_interior_int_2 = skimage.measure.regionprops((cell_interior == 2)*1, intensity_image = image_cn)\n avgint_0 = [x.mean_intensity for x in cell_interior_int_0]\n avgint_1 = [x.mean_intensity for x in cell_interior_int_1]\n avgint_2 = [x.mean_intensity for x in cell_interior_int_2]\n layerint = [avgint_0[0], avgint_1[0], avgint_2[0]]\n index = np.argsort(layerint)[2]\n cell_interior_opening = skimage.morphology.binary_opening(skimage.morphology.remove_small_holes(cell_interior == index))\n cell_sm = skimage.morphology.remove_small_objects(cell_interior_opening, 50)\n cell_sm_label = skimage.morphology.label(cell_sm)\n dist_lab = skimage.morphology.label(cell_sm_label)\n markers = skimage.measure.regionprops(dist_lab)\n dist_be = np.zeros(dist_lab.shape)\n while(len(markers) > 0):\n for j in range(0,len(markers)):\n a = markers[j].area\n if (a < 600):\n dist_be[dist_lab == j+1] = 1\n dist_lab[dist_lab == j+1] = 0\n dist_bin_temp = skimage.morphology.binary_erosion(dist_lab)\n dist_bin_temp_sm = skimage.morphology.remove_small_objects(dist_bin_temp, 10)\n dist_lab = skimage.morphology.label(dist_bin_temp_sm)\n markers = skimage.measure.regionprops(dist_lab)\n dist_final = skimage.morphology.label(skimage.morphology.remove_small_objects(skimage.morphology.label(dist_be), 10))\n watershed_seeds = skimage.morphology.label(dist_final)\n segmentation = skimage.morphology.watershed(-image_cn, watershed_seeds, mask = rough_seg_mask)\n segmentation_sm = skimage.morphology.remove_small_objects(segmentation, 100)\n segmentation_smbc = skimage.segmentation.clear_border(segmentation_sm)\n cells = skimage.measure.regionprops(segmentation_smbc)\n segmentation_final = np.zeros(segmentation_smbc.shape).astype(int)\n for i in range(len(cells)):\n minor_axis_length = cells[i].minor_axis_length\n area = cells[i].area\n cell_seg_image = (segmentation_smbc == cells[i].label)\n cell_seg_image_be = skimage.morphology.binary_erosion(skimage.morphology.binary_erosion(cell_seg_image))\n if minor_axis_length < 15 or minor_axis_length > 35:\n segmentation_final[segmentation_smbc == cells[i].label] = 0\n else:\n segmentation_final[cell_seg_image_be] = cells[i].label\n return(segmentation_final, image_registered)\n\ndef save_segmentation(segmentation, sample):\n seg_color = color.label2rgb(segmentation, bg_label = 0, bg_color = (0,0,0))\n fig = plt.figure(frameon = False)\n fig.set_size_inches(5,5)\n ax = plt.Axes(fig, [0, 0, 1, 1])\n fig.add_axes(ax)\n ax.imshow(seg_color)\n segfilename = sample + '_seg.png'\n fig.savefig(segfilename, dpi = 300)\n plt.close()\n np.save(sample + '_seg', segmentation)\n return\n\ndef measure_reference_images(image_name, cal_toggle, calibration_norm):\n sample = re.sub('_[0-9]*.czi', '', image_name[0])\n print('Analyzing sample {}...'.format(sample))\n image_stack = [bioformats.load_image(filename) for filename in image_name]\n segmentation, image_stack = segment_images(image_stack)\n if cal_toggle == 'T':\n image_ffc = correct_images(image_stack, calibration_norm)\n else:\n image_ffc = image_stack.copy()\n cells = skimage.measure.regionprops(segmentation)\n avgint = np.empty((len(cells), image_ffc.shape[2]))\n for k in range(0, image_ffc.shape[2]):\n cells = skimage.measure.regionprops(segmentation, intensity_image = image_ffc[:,:,k])\n avgint[:,k] = [x.mean_intensity for x in cells]\n save_segmentation(segmentation, sample)\n avgint_norm = avgint/np.max(avgint, axis = 1)[:,None]\n avgintfilename = sample + '_avgint.csv'\n avgintnormfilename = sample + '_avgint_norm.csv'\n np.savetxt(avgintfilename, avgint, delimiter = ',')\n np.savetxt(avgintnormfilename, avgint_norm, delimiter = ',')\n return(segmentation, avgint)\n\ndef main():\n parser = argparse.ArgumentParser('Design FISH probes for a complex microbial community')\n parser.add_argument('-i', '--image_name', dest = 'image_name', nargs = '*', default = [], type = str, help = 'Image filenames')\n parser.add_argument('-c', '--calibration', dest = 'cal_toggle', type = str, default = 'T', help = 'Toggle switch to calibrate images to correct for non-uniform illumination field')\n parser.add_argument('-cf', '--calibration_images_filename', dest = 'calibration_images_filename', type = str, default = '', help = 'Calibration image filename')\n args = parser.parse_args()\n if args.cal_toggle == 'T':\n calibration_norm = load_calibration_images(args.calibration_images_filename)\n else:\n calibration_norm = 0\n segmentation, avgint = measure_reference_images(args.image_name, args.cal_toggle, calibration_norm)\n return\n\nif __name__ == '__main__':\n main()\n\njavabridge.kill_vm()\n", "sub_path": "hiprfish-image-analysis-ecoli/hiprfish_imaging_spectral_image_measurement.py", "file_name": "hiprfish_imaging_spectral_image_measurement.py", "file_ext": "py", "file_size_in_byte": 8827, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "javabridge.start_vm", "line_number": 31, "usage_type": "call"}, {"api_name": "bioformats.JARS", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 45, "usage_type": "call"}, {"api_name": "skimage.feature.register_translation", "line_number": 46, "usage_type": "call"}, {"api_name": "skimage.feature", "line_number": 46, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.dstack", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 72, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 78, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 85, "usage_type": "call"}, {"api_name": "skimage.measure.regionprops", "line_number": 87, "usage_type": "call"}, {"api_name": "skimage.measure", "line_number": 87, "usage_type": "attribute"}, {"api_name": "skimage.measure.regionprops", "line_number": 88, "usage_type": "call"}, {"api_name": "skimage.measure", "line_number": 88, "usage_type": "attribute"}, {"api_name": "skimage.measure.regionprops", "line_number": 89, "usage_type": "call"}, {"api_name": "skimage.measure", "line_number": 89, "usage_type": "attribute"}, {"api_name": "numpy.argsort", "line_number": 94, "usage_type": "call"}, {"api_name": "skimage.morphology.binary_opening", "line_number": 95, "usage_type": "call"}, {"api_name": "skimage.morphology", "line_number": 95, "usage_type": "attribute"}, {"api_name": "skimage.morphology.remove_small_holes", "line_number": 95, "usage_type": "call"}, {"api_name": "skimage.morphology.remove_small_objects", "line_number": 96, "usage_type": "call"}, {"api_name": "skimage.morphology", "line_number": 96, "usage_type": "attribute"}, {"api_name": "skimage.morphology.label", "line_number": 97, "usage_type": "call"}, {"api_name": "skimage.morphology", "line_number": 97, "usage_type": "attribute"}, {"api_name": "skimage.morphology.label", "line_number": 98, "usage_type": "call"}, {"api_name": "skimage.morphology", "line_number": 98, "usage_type": "attribute"}, {"api_name": "skimage.measure.regionprops", "line_number": 99, "usage_type": "call"}, {"api_name": "skimage.measure", "line_number": 99, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 100, "usage_type": "call"}, {"api_name": "skimage.morphology.binary_erosion", "line_number": 107, "usage_type": "call"}, {"api_name": "skimage.morphology", "line_number": 107, "usage_type": "attribute"}, {"api_name": "skimage.morphology.remove_small_objects", "line_number": 108, "usage_type": "call"}, {"api_name": "skimage.morphology", "line_number": 108, "usage_type": "attribute"}, {"api_name": "skimage.morphology.label", "line_number": 109, "usage_type": "call"}, {"api_name": "skimage.morphology", "line_number": 109, "usage_type": "attribute"}, {"api_name": "skimage.measure.regionprops", "line_number": 110, "usage_type": "call"}, {"api_name": "skimage.measure", "line_number": 110, "usage_type": "attribute"}, {"api_name": "skimage.morphology.label", "line_number": 111, "usage_type": "call"}, {"api_name": "skimage.morphology", "line_number": 111, "usage_type": "attribute"}, {"api_name": "skimage.morphology.remove_small_objects", "line_number": 111, "usage_type": "call"}, {"api_name": "skimage.morphology.label", "line_number": 112, "usage_type": "call"}, {"api_name": "skimage.morphology", "line_number": 112, "usage_type": "attribute"}, {"api_name": "skimage.morphology.watershed", "line_number": 113, "usage_type": "call"}, {"api_name": "skimage.morphology", "line_number": 113, "usage_type": "attribute"}, {"api_name": "skimage.morphology.remove_small_objects", "line_number": 114, "usage_type": "call"}, {"api_name": "skimage.morphology", "line_number": 114, "usage_type": "attribute"}, {"api_name": "skimage.segmentation.clear_border", "line_number": 115, "usage_type": "call"}, {"api_name": "skimage.segmentation", "line_number": 115, "usage_type": "attribute"}, {"api_name": "skimage.measure.regionprops", "line_number": 116, "usage_type": "call"}, {"api_name": "skimage.measure", "line_number": 116, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 117, "usage_type": "call"}, {"api_name": "skimage.morphology.binary_erosion", "line_number": 122, "usage_type": "call"}, {"api_name": "skimage.morphology", "line_number": 122, "usage_type": "attribute"}, {"api_name": "skimage.color.label2rgb", "line_number": 130, "usage_type": "call"}, {"api_name": "skimage.color", "line_number": 130, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Axes", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}, {"api_name": "numpy.save", "line_number": 139, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 143, "usage_type": "call"}, {"api_name": "bioformats.load_image", "line_number": 145, "usage_type": "call"}, {"api_name": "skimage.measure.regionprops", "line_number": 151, "usage_type": "call"}, {"api_name": "skimage.measure", "line_number": 151, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 152, "usage_type": "call"}, {"api_name": "skimage.measure.regionprops", "line_number": 154, "usage_type": "call"}, {"api_name": "skimage.measure", "line_number": 154, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 161, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 165, "usage_type": "call"}, {"api_name": "javabridge.kill_vm", "line_number": 180, "usage_type": "call"}]} +{"seq_id": "613937070", "text": "\n# Thompson Sampling Introduction\nfrom scipy.stats import beta\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# predefined win rates for each slot machine (AI model is unaware of these)\nconversion_rates = [0.10, 0.14, 0.05, 0.08, 0.12]\nreward_values = [1, 1, 1, 1, 1]\n\nN = 5000 # number of samples\nd = len(conversion_rates) # number of slot machines\n\n# run simulation to create data frame of slot machine wins / losses\nslot_machine_data = np.zeros((N, d))\nmachine_selected = []\nfor i in range(N):\n for j in range(d):\n if np.random.rand() < conversion_rates[j]:\n slot_machine_data[i][j] = 1\n\n# model for determining the best slot machine\npositive_reward = np.zeros(d)\nnegative_reward = np.zeros(d)\n\nfor i in range(N):\n selected = 0\n max_random = 0\n for j in range(d):\n # distribution graph for the slot machines will shift to the right for the best slot machine\n random_guess = np.random.beta(positive_reward[j] + 1, negative_reward[j] + 1)\n # print(str(random_guess) + \" : \" + str(max_random))\n if random_guess > max_random:\n max_random = random_guess\n selected = j \n\n machine_selected.append(selected)\n # distribute positive reward if the selected slot machine won, negative if not. \n if slot_machine_data[i][selected] == 1:\n positive_reward[selected] += reward_values[selected]\n else:\n negative_reward[selected] += 1\n\n# display the details of the results\n# print(positive_reward)\n# print(negative_reward)\n# selected_machines = positive_reward + negative_reward\n# for i in range(d):\n# print('Machine number ' + str(i+1) + ' was selected ' + str(selected_machines[i]) + ' times')\n# print('Conclusion: Best machine is machine number ' + str(np.argmax(selected_machines) + 1))\nprint(\"\\nRewards By Machine = \", positive_reward)\nprint(\"\\nNo Rewards By Machine = \", negative_reward)\n# print(\"\\nTotal Rewards = \")\n# print(\"\\nMachine Selected At Each Round : \", machine_selected)\n\n# plt.bar(['B1','B2','B3','B4','B5'],positive_reward)\n# plt.title('MABP')\n# plt.xlabel('Bandits')\n# plt.ylabel('Reward By Each Machine')\n# plt.show()\n\n# from collections import Counter\n# print(\"\\nNumber of Times Each Machine Was Selected: \", dict(Counter(machine_selected)))\n# print(\"\\n\")\n\n# plt.hist(machine_selected)\n# plt.title('Histogram of machines selected')\n# plt.xlabel('Bandits')\n# plt.xticks(range(0, 5))\n# plt.ylabel('No. Of Times Each Bandit Was Selected')\n# plt.show()\n\nrv0 = beta(positive_reward[0], negative_reward[0])\nrv1 = beta(positive_reward[1], negative_reward[1])\nrv2 = beta(positive_reward[2], negative_reward[2])\nrv3 = beta(positive_reward[3], negative_reward[3])\nrv4 = beta(positive_reward[4], negative_reward[4])\n\nx = np.linspace(0, .2, 200)\nplt.title('Beta Distribution By Slot Machine')\nplt.plot(x, rv0.pdf(x), label=\"Machine A [\" + str(round((positive_reward[0]/negative_reward[0])*100,1)) + \"%]\")\nplt.plot(x, rv1.pdf(x), label=\"Machine B [\" + str(round((positive_reward[1]/negative_reward[1])*100,1)) + \"%]\")\nplt.plot(x, rv2.pdf(x), label=\"Machine C [\" + str(round((positive_reward[2]/negative_reward[2])*100,1)) + \"%]\")\nplt.plot(x, rv3.pdf(x), label=\"Machine D [\" + str(round((positive_reward[3]/negative_reward[3])*100,1)) + \"%]\")\nplt.plot(x, rv4.pdf(x), label=\"Machine E [\" + str(round((positive_reward[4]/negative_reward[4])*100,1)) + \"%]\")\nplt.legend()\nplt.show()\n", "sub_path": "chapter5.py", "file_name": "chapter5.py", "file_ext": "py", "file_size_in_byte": 3410, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "numpy.zeros", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.random.beta", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 31, "usage_type": "attribute"}, {"api_name": "scipy.stats.beta", "line_number": 73, "usage_type": "call"}, {"api_name": "scipy.stats.beta", "line_number": 74, "usage_type": "call"}, {"api_name": "scipy.stats.beta", "line_number": 75, "usage_type": "call"}, {"api_name": "scipy.stats.beta", "line_number": 76, "usage_type": "call"}, {"api_name": "scipy.stats.beta", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}]} +{"seq_id": "330065878", "text": "import numpy as np\nfrom fractions import Fraction\nfrom math import gcd\n\ndef find_terminal_row(m):\n '''\n #Function takes state matrix as input and returns the row at which the terminal states begin. \n Used for ease of matrix deconstruction in answer function.\n '''\n terminal_row = 0\n for i in range(0,len(m)):\n state_flag = 0\n for val in m[i]:\n if val != 0:\n state_flag = 1\n if state_flag == 0:\n return terminal_row\n terminal_row += 1\n return terminal_row\n\ndef convert_to_prob(m, terminal_row):\n '''\n #Fucntion takes state matrix and terminal row as inputs and return a modified\n matrix with all of the state observations converted to probablity of occurance.\n '''\n for i in range(0, terminal_row):\n row_sum = sum(m[i])\n for j in range(0,len(m[i])):\n m[i,j] = m[i,j]/row_sum\n return m\n\ndef convert_denom(state_probs):\n '''\n #Function takes final list of state probablities, finds largest common denominator and\n converts all fractions to share common denominator. Returns final solution.\n '''\n solution = list()\n lcm = state_probs[0].denominator\n for x in state_probs[1:]:\n lcm = int(lcm*x.denominator/gcd(lcm,x.denominator))\n for frac in state_probs:\n mult = lcm/frac.denominator\n solution.append(int(frac.numerator * mult))\n solution.append(lcm)\n return solution\n\ndef answer(m):\n '''\n #Function takes state matrix as input and returns probablity of an array of ints for each\n terminal state giving the exact probabilities of each terminal state, represented as the\n numerator for each state, then the denominator for all of them at the end and in simplest\n form.\n '''\n\n #Convert the input matrix to numpy matrix for easy linear algebra minipulation\n m = np.array(m).astype(np.float64)\n\n terminal_row = find_terminal_row(m)\n m = convert_to_prob(m, terminal_row)\n\n #Deconstruct state matrix to get transient matrix\n Q = m[:terminal_row,:terminal_row]\n\n #Deconstruct state matrix to get recurrent matrix\n R = m[:terminal_row, terminal_row:]\n I = np.eye(terminal_row)\n\n #Calculate fundamental matrix\n N = np.linalg.inv(I-Q)\n\n #Calculate terminal probablity matrix\n M = np.dot(N, R)\n\n state_probs = list()\n\n #Convert terminal state probablities to fraction objects\n for val in M[0]:\n state_probs.append(Fraction(val).limit_denominator())\n \n solution = convert_denom(state_probs)\n return solution\n\n\nm = [[0,1,0,0,0,1],[4,0,0,3,2,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0]]\n# m = [[0, 2, 1, 0, 0], [0, 0, 0, 3, 4], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]\n\nprint(answer(m))\n", "sub_path": "solution.py", "file_name": "solution.py", "file_ext": "py", "file_size_in_byte": 2746, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "math.gcd", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 56, "usage_type": "attribute"}, {"api_name": "numpy.eye", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 72, "usage_type": "call"}, {"api_name": "fractions.Fraction", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "74580174", "text": "import numpy as np, matplotlib.pyplot as plt, pandas as pd,collections\r\n\r\ndatabase = pd.read_csv('database.csv',quotechar='\"',skipinitialspace=True, delimiter=',')\r\ndata = database.as_matrix()\r\n\r\n# Which ethnicity is it most common for the victims and perpetrators to be?\r\nprint(\"Which ethnicity is it most common for the victims and perpetrators to be?\")\r\nethni = collections.Counter(data[:,18])\r\nprint(ethni)\r\n\r\n# Plot Generator\r\ndef make_autopct(values):\r\n def my_autopct(pct):\r\n total = sum(values)\r\n val = int(round(pct*total/100.0))\r\n return '{p:.2f}% ({v:d})'.format(p=pct,v=val)\r\n return my_autopct\r\nlabels = 'Unknown', 'Non-Hispanic', 'Hispanic'\r\nexplode = (0.1, 0,0.0)\r\nperformance = [ethni.most_common(10)[0][1],ethni.most_common(10)[1][1], ethni.most_common(10)[2][1]]\r\nfig1, ax1 = plt.subplots()\r\nax1.pie(performance,explode=explode, labels=labels, autopct=make_autopct(performance),shadow=True, startangle=90)\r\nax1.axis('equal')\r\nplt.title(\"Most commmon ethnicity groups for perpetrators\")\r\nplt.savefig('Question1ChartPerp.png')\r\n#plt.show()", "sub_path": "Q1Perp.py", "file_name": "Q1Perp.py", "file_ext": "py", "file_size_in_byte": 1083, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pandas.read_csv", "line_number": 3, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "285226256", "text": "# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\n\t详细页面提取(处理新闻来源信息)\n'''\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nres = requests.get('http://news.sina.com.cn/c/nd/2016-08-20/doc-ifxvctcc8121090.shtml')\nres.encoding = 'utf-8'\n# print(res.text.txt) # 将文章打印出来看看\nsoup = BeautifulSoup(res.text, 'lxml')\n\n# b1 = soup.select('.time-source')\n# b2 = soup.select('.time-source span a')[0]\n# b3 = soup.select('.time-source span a')[0].text.txt\n\n# print(b2) # 来源所在位置,有来源和文字,\n# print('-----------------------')\n# print(b3) # 只取文字来源\n\n\n\"\"\"\n\t整理新闻内文\n\"\"\"\n\n# 取到了里面的内文,以p作为段落区分\nsoup1 = soup.select('#artibody')\n# print(soup1)\n\n# 拿出里面的文字呢?\nsoup2 = soup.select('#artibody p')\n# print(soup2)\n\n# 不要里面的编辑,去掉编辑\nsoup3 = soup.select('#artibody p')[:-1]\n# print(soup3)\n\n# 因为有多个p在list,要将不同的元素合并在一起\narticle = []\nfor p in soup.select('#artibody p')[:-1]:\n # article.append(p.text.txt) # 如果是(p),则包含p标签,如果是(p。text.txt)则不包含标签,good\n article.append(p.text.strip()) # 里面会有\\u3000的空白,去���\n# print(article)\n\n# 将list合并\n# print('@'.join(article)) # '@'可以是空格,,任意自己加,蛀牙是为了隔开\n\n# python提供更加简洁的写法\n# print(' '.join([p.text.txt.strip() for p in soup.select('#artibody p')[:-1]]))\n\n# python特有的语法结构,通过一行,去完成多行的事情\t \n# \n\n# 取得编辑名称\nsoup4 = soup.select('.article-editor')[0].text.lstrip('责任编辑:')\n# print(soup4)\n\n# 取得评论数,咦,为什么为 空呢?,原来是 ajax弄的\nsoup5 = soup.select('#commentCountl')\n# print(soup5)\n# 往下看\n\n\ncomments = requests.get(\n 'http://comment5.news.sina.com.cn/page/info?version=1&format=js&channel=gn&newsid=comos-fxvctcc8121090&group=&compress=0&ie=utf-8&oe=utf-8&page=1&page_size=20&')\n# print(comments.text.txt)\nimport json\n\njd = json.loads(comments.text.strip('var data='))\n# print(jd) #获取到的json\n# print(jd['result']['count']['total']) # 取得总评论数 \n# 怎么取得新闻ID(编号)呢? 把连接接取出来就可以了,往下看\n# \n# 剖析新闻标识符\nnewsurl = ('http://news.sina.com.cn/c/nd/2016-08-20/doc-ifxvctcc8121090.shtml')\nnnn = newsurl.split('/')[-1].rstrip('.shtml').lstrip('doc-i')\n# print(nnn)\n\n# 方法二,使用正则\nimport re\n\nm = re.search('doc-i(.*).shtml', newsurl)\n\n# print(m)\n# print(m.group)\n# print(m.group(0))\n# print(m.group(1)) # 最终答案\n# \n# \n# 总结,做成一个函数\ncommentURL = (\n 'http://comment5.news.sina.com.cn/page/info?version=1&format=js&channel=gn&newsid=comos-fxvctcc8121090&group=&compress=0&ie=utf-8&oe=utf-8&page=1&page_size=20&')\nnewsid = m.group(1)\n# print(commentURL.format(newsid))\nnewsurl = ('http://news.sina.com.cn/c/nd/2016-08-20/doc-ifxvctcc8121090.shtml')\n\n# 评论数函数,抽取评论数\nimport json, re\n\n\ndef getCommentsCounts(newsurl):\n m = re.search('doc-i(.*).shtml', newsurl)\n newsid = m.group(1)\n comments = requests.get(commentURL.format(newsid))\n jd = json.loads(comments.text.strip('var data='))\n return print(jd['result']['count']['total'])\n\n\nnews = ('http://news.sina.com.cn/c/nd/2016-08-20/doc-ifxvctcc8121090.shtml')\ngetCommentsCounts(news)\n\n'''\n\t将抓取内文信息方法整理成一函数式\n\t将上面所有内容整理到一起,变成函数\n'''\nfrom datetime import datetime\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef getNewDetail(newsurl):\n result = {}\n res = requests.get(newsurl) # 先通过get取得内容\n res.encoding = 'utf-8'\n soup = BeautifulSoup(res.text, 'lxml') # 把资料放进来\n result['title'] = soup.select('#artibodyTitle')[0].text # 取得标题放到title中\n result['newssource'] = soup.select('.time-source span a')[0].text # 取得新闻来源放到。。。\n timesourse = soup.select('.time-source')[0].contents[0].strip() # 取得时间并做转换\n result['dt'] = datetime.strptime(timesourse, '%Y年%m月%d日%H:%M') # 时间放到dt中\n result['article'] = ' '.join([p.text.strip() for p in soup.select('#artibody p')[:-1]]) # 取得内文放到。。。\n result['editor'] = soup.select('.article-editor')[0].text.strip('责任编辑:') # 责任编辑处理完放到editor中,,\n result['comments'] = getCommentsCounts(newsurl) # 通过刚才的方法获得评论数\n return print(result)\n\n\n# 测试一下,完美\ngetNewDetail('http://news.sina.com.cn/c/nd/2016-08-20/doc-ifxvctcc8121090.shtml')\n", "sub_path": "A_库的分类/BeautifulSoup_yhz/实例3 - 搜狐网页提取续集2.py", "file_name": "实例3 - 搜狐网页提取续集2.py", "file_ext": "py", "file_size_in_byte": 4645, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "requests.get", "line_number": 11, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 14, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 67, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 72, "usage_type": "call"}, {"api_name": "re.search", "line_number": 85, "usage_type": "call"}, {"api_name": "re.search", "line_number": 105, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 107, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 108, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 126, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 128, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 132, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 132, "usage_type": "name"}]} +{"seq_id": "182695210", "text": "# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n'''\nName: Beier (Benjamin) Liu\nDate: 8/26/2018\n\nRemark:\nPython 3.6 is recommended\nBefore running please install packages *numpy *gym==0.10.5 *mujoco-py==1.50.1.56 *tensorflow==1.5 *seaborn\nUsing cmd line py -3.6 -m pip install [package_name]\n'''\nimport os, time, logging\nimport copy, math\nimport functools, itertools\nimport tensorflow as tf\nimport tf_util\nimport gym\nimport load_policy\nimport pickle\nimport numpy as np\n\nimport Modules.glb as glb\nCURRENT_TIME = glb.CURRENT_TIME\nCURRENT_PATH = glb.CURRENT_PATH\nTRAINING_OPTS_BC = glb.TRAINING_OPTS_BC\nfrom Modules.build_neurual_net import *\nfrom Modules.compute_stats import *\nfrom Modules.run_expert_mod import *\nfrom Modules.run_neurual_net_with_D import *\nfrom Modules.Tools import *\nfrom sklearn.utils import shuffle\nimport pandas as pd\n'''===================================================================================================\nFile content:\nImplement behavioral cloning\n\nSteps:\n1) collect the data from expert -- run_expert.py\n2) train neurual_net on data -- neurual_net.py\n==================================================================================================='''\n@Timer\ndef behavioral_cloning(config):\n\t\"\"\"\n\tArguments:\n\tconfig \t-- user configuration\n\tReturns:\n\tdf \t\t-- a pd.DataFrame\n\tgenerating .csv .pkl files\n\t\"\"\"\n\t# Preparation Phrase\n\tenv_name = config['env_name']\n\tpolicy_fn = load_policy_fn(env_name)\n\n\tenv = gym.make(env_name)\n\tactions_dim = env.action_space.shape[0]\n\ttraining_opts = config['training_opts']\n\n\tdat = run_expert_mod(policy_fn, env, config)\n\to = dat['observations']\n\ta = dat['actions'].reshape(-1, actions_dim)\n\n\t# Handling Phrase\n\tmodel = build_neurual_net(dat, env, config)\n\n\to, a = shuffle(o, a)\n\tmodel.fit(o, a, **training_opts)\n\n\tdat_bc = run_neurual_net_with_D(model, env, config)\n\n\tstats_expert = compute_stats(dat)\n\tstats_bc = compute_stats(dat_bc)\n\tstats = {'expert': stats_expert, 'behavioral cloning': stats_bc}\n\n\t# env.close()\n\n\t# Checking Phrase\n\tdf = pd.DataFrame(stats).T\n\n\tboolean_hyperparams_test = config.get('boolean_hyperparams_test', False)\n\tif not boolean_hyperparams_test:\n\t\tcsv_fname = os.path.join(CURRENT_PATH, 'behavioral_cloning_output', CURRENT_TIME, 'md-{}-BC-stats.csv'.format(env_name))\n\t\tdf.to_csv(csv_fname)\n\t\tpickle_fname = os.path.join(CURRENT_PATH, 'behavioral_cloning_output', CURRENT_TIME, 'md-{}-BC-res.pkl'.format(env_name))\n\t\tpickle.dump(dat_bc, open(pickle_fname, 'wb'))\n\t\tlogging.info(\"behavioral_cloning.py: finished successfully!\")\n\telse:\n\t\tif config.get('name_hyperparams_test', 'NA')=='learning_rate':\n\t\t\tfname_tail = \"{}-{:.2g}\".format(config.get('name_hyperparams_test', 'NA'), config.get(config.get('name_hyperparams_test', 'NA'), 'NA'))\n\t\t\tfname_head = \"lr\"\n\t\telif config.get('name_hyperparams_test', 'NA')=='epochs':\n\t\t\tfname_tail = \"{}-{}\".format(config.get('name_hyperparams_test', 'NA'), config.get(config.get('name_hyperparams_test', 'NA'), 'NA'))\n\t\t\tfname_head = \"ep\"\n\t\telse:\n\t\t\tfname_tail = \"{}-{}\".format(config.get('name_hyperparams_test', 'NA'), config.get(config.get('name_hyperparams_test', 'NA'), 'NA'))\n\t\t\tfname_head = \"ro\"\n\t\tcsv_fname = os.path.join(CURRENT_PATH, 'behavioral_cloning_output', CURRENT_TIME, '{}-{}-BC-stats-{}.csv'.format(fname_head, env_name, fname_tail))\n\t\tdf.to_csv(csv_fname)\n\t\tpickle_fname = os.path.join(CURRENT_PATH, 'behavioral_cloning_output', CURRENT_TIME, '{}-{}-BC-res-{}.pkl'.format(fname_head, env_name, fname_tail))\n\t\tpickle.dump(dat_bc, open(pickle_fname, 'wb'))\n\t\tlogging.info(\"behavioral_cloning.py: {} finished successfully!\".format(fname_tail))\n\n\treturn df\n\ndef load_policy_fn(env_name):\n\tpolicy_fname = os.path.join(CURRENT_PATH, 'experts', '{}.pkl'.format(env_name))\n\tpolicy_fn = load_policy.load_policy(policy_fname)\n\treturn policy_fn\n", "sub_path": "hw1/Modules/behavioral_cloning.py", "file_name": "behavioral_cloning.py", "file_ext": "py", "file_size_in_byte": 3790, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "Modules.glb.CURRENT_TIME", "line_number": 23, "usage_type": "attribute"}, {"api_name": "Modules.glb", "line_number": 23, "usage_type": "name"}, {"api_name": "Modules.glb.CURRENT_PATH", "line_number": 24, "usage_type": "attribute"}, {"api_name": "Modules.glb", "line_number": 24, "usage_type": "name"}, {"api_name": "Modules.glb.TRAINING_OPTS_BC", "line_number": 25, "usage_type": "attribute"}, {"api_name": "Modules.glb", "line_number": 25, "usage_type": "name"}, {"api_name": "gym.make", "line_number": 54, "usage_type": "call"}, {"api_name": "sklearn.utils.shuffle", "line_number": 65, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 84, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path", "line_number": 98, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 99, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "load_policy.load_policy", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "537165694", "text": "# -*- coding: utf-8 -*-\n\nfrom scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\n\nimport os\nimport inspect\n\nfrom scrapy.conf import settings\nfrom scrapy.selector import Selector\nfrom ziffi_profile.items import ZiffiProfileItem\n\n\nclass ZiffiProfileSpiderSpider(CrawlSpider):\n name = 'ziffi_profile_spider'\n allowed_domains = ['ziffi.com']\n\n def __init__(self):\n super(ZiffiProfileSpiderSpider, self).__init__()\n try:\n file = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) + \\\n \"/../../configuration/\" + self.name + \"_\" + settings['VERTICAL']+\"_start_urls.txt\"\n f = open(file, 'r')\n self.start_urls = [url.strip() for url in f.readlines()]\n f.close()\n except:\n print(\"*\"*100)\n print(\"*\"*100)\n print(\"*\"*100)\n print(\"Unable to find the file\")\n print(\"*\"*100)\n print(\"*\"*100)\n print(\"*\"*100)\n\n rules = (\n Rule(SgmlLinkExtractor(restrict_xpaths='//*[@id=\"search-page-container\"]/'\n 'div[@class=\"medium-9 medium-push-3 small-12 columns search-listings\"]/'\n 'div[@class=\"row\"]/'\n 'div[@class=\"small-12 columns\"]/'\n 'div[@class=\"row z-card\"]/'\n 'div[@class=\"small-12 medium-8 columns\"]/'\n 'div[@class=\"row collapse\"]/'\n 'div[@class=\"small-8 medium-9 columns\"]/'\n 'div[@class=\"profile-details\"]/'\n 'h2[@class=\"profile-name-result text-line-ellipsis\"]/a'), callback='parse_item'),\n Rule(SgmlLinkExtractor(allow=(r'/page',)), follow=True),\n )\n\n def parse_item(self, response):\n i = ZiffiProfileItem()\n sel = Selector(response)\n try:\n data = sel.response.xpath('//section[@class=\"main-section\"]/'\n 'div[@class=\"profile-brief-info\"]/'\n 'div[@class=\"row\"]/div[@class=\"small-12 medium-12 columns\"]/'\n 'div[@class=\"row collapse\"]/'\n 'div[@class=\"small-4 medium-3 columns text-center\"]/'\n 'div[@class=\"profile-photo-block text-center\"]/img/@src').extract()\n data = [j.strip() for j in data if j.strip()]\n if data:\n i[\"image_urls\"] = data\n else:\n i[\"image_urls\"] = None\n except:\n i[\"image_urls\"] = None\n\n try:\n data = sel.xpath('//div[@class=\"profile-info-container profile-details\"]/'\n 'h1[@class=\"profile-name-result\"]/text()').extract()[0]\n if data.strip():\n i[\"Doctor_name\"] = data\n else:\n i[\"Doctor_name\"] = None\n except:\n i[\"Doctor_name\"] = None\n\n try:\n data = sel.xpath('//div[@class=\"profile-info-container profile-details\"]/'\n 'div[@class=\"doctor-degrees text-line-ellipsis\"]/text()').extract()[0]\n if data.strip():\n i[\"Doctor_education\"] = data\n else:\n i[\"Doctor_education\"] = None\n except:\n i[\"Doctor_education\"] = None\n\n try:\n data = sel.xpath('//section[@class=\"main-section\"]/div[4]/div[3]/text()').extract()[0]\n if data.strip():\n i[\"Doctor_Specialization\"] = data\n else:\n i[\"Doctor_Specialization\"] = None\n except:\n i[\"Doctor_Specialization\"] = None\n\n try:\n data = sel.xpath('//div[@class=\"profile-info-container profile-details\"]/'\n 'div[@class=\"result-elements\"][1]/text()').extract()[0]\n if data.strip():\n i[\"Doctor_experience\"] = data\n else:\n i[\"Doctor_experience\"] = None\n except:\n i[\"Doctor_experience\"] = None\n\n try:\n data = sel.xpath('//div[@class=\"profile-info-container profile-details\"]/'\n 'div[@class=\"result-elements\"]/a/@data-fees').extract()[0]\n if data.strip():\n if '[' in data:\n i[\"Doctor_fees\"] = data[1:-1]\n elif '{' in data:\n d = eval(data)\n l = [str(j) for j in d.values()]\n i[\"Doctor_fees\"] = ', '.join(l)\n else:\n i[\"Doctor_fees\"] = data\n else:\n i[\"Doctor_fees\"] = None\n except:\n i[\"Doctor_fees\"] = None\n\n try:\n data = sel.xpath('//div[@class=\"small-9 medium-10 columns fade-container doctor-position\"]/'\n 'span/text()').extract()[0]\n if data.strip():\n i[\"Doctor_Position\"] = data\n else:\n i[\"Doctor_Position\"] = None\n except:\n i[\"Doctor_Position\"] = None\n\n try:\n data = sel.xpath('//div[@class=\"small-9 medium-10 columns\"]/ul/li/a/text()').extract()\n if data:\n i[\"Available_at\"] = '| '.join(data)\n else:\n i[\"Available_at\"] = None\n except:\n try:\n data = sel.xpath('//div[@class=\"small-9 medium-10 columns\"]/ul/li/span/text()').extract()[0]\n if data.strip():\n i[\"Available_at\"] = data\n else:\n i[\"Available_at\"] = None\n except:\n i[\"Available_at\"] = None\n\n try:\n data1 = sel.xpath('//section[@class=\"main-section\"]/'\n 'div[@class=\"row section-block\"][2]/'\n 'div[@class=\"small-12 columns\"]/ul[1]/li/text()').extract()\n data2 = sel.xpath('//section[@class=\"main-section\"]/'\n 'div[@class=\"row section-block\"][2]/'\n 'div[@class=\"small-12 columns\"]/ul[2]/li/text()').extract()\n if data1 and data2:\n res = '| '.join(data1 + data2)\n elif data1 and not data2:\n res = '| '.join(data1)\n elif not data1 and data2:\n res = '| '.join(data2)\n else:\n res = None\n i[\"Qualification\"] = res\n except:\n i[\"Qualification\"] = None\n\n try:\n data = sel.xpath('//section[@class=\"main-section\"]/'\n 'div[@class=\"row section-block\"][3]/'\n 'div[@class=\"small-12 columns\"]/ul/li/text()').extract()\n if data:\n i[\"Expertise\"] = '| '.join(data)\n else:\n i[\"Expertise\"] = None\n except:\n i[\"Expertise\"] = None\n\n try:\n data = sel.xpath('//section[@class=\"main-section\"]/'\n 'div[@class=\"row section-block\"][4]/'\n 'div[@class=\"small-12 columns\"]/ul/li/text()').extract()\n if data:\n i[\"Awards\"] = '| '.join(data)\n else:\n i[\"Awards\"] = None\n except:\n i[\"Awards\"] = None\n\n try:\n data = sel.xpath('//section[@class=\"main-section\"]/'\n 'div[@class=\"row section-block\"][5]/'\n 'div[@class=\"small-12 columns\"]/ul/li/text()').extract()\n if data:\n i[\"Research\"] = ' |'.join(data)\n else:\n i[\"Research\"] = None\n except:\n i[\"Research\"] = None\n\n try:\n # i[\"Google_location\"] = 'https://maps.google.com/maps?' \\\n # 'll={0},{1}&'.format(sel.xpath('//div[@id=\"google-map\"]/@data-lat').extract()[0],\n # sel.xpath('//div[@id=\"google-map\"]/@data-lng').extract()[0])\n i[\"Google_location\"] = sel.xpath('//div[@id=\"google-map\"]/@data-markers').extract()[0]\n except:\n i[\"Google_location\"] = None\n\n try:\n i[\"Link_to_doctors_page\"] = response.request.url\n except:\n i[\"Link_to_doctors_page\"] = None\n\n return i\n", "sub_path": "ziffi_profile/ziffi_profile/spiders/ziffi_profile_spider.py", "file_name": "ziffi_profile_spider.py", "file_ext": "py", "file_size_in_byte": 8634, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "scrapy.contrib.spiders.CrawlSpider", "line_number": 14, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 21, "usage_type": "call"}, {"api_name": "inspect.getfile", "line_number": 21, "usage_type": "call"}, {"api_name": "inspect.currentframe", "line_number": 21, "usage_type": "call"}, {"api_name": "scrapy.conf.settings", "line_number": 22, "usage_type": "name"}, {"api_name": "scrapy.contrib.spiders.Rule", "line_number": 36, "usage_type": "call"}, {"api_name": "scrapy.contrib.linkextractors.sgml.SgmlLinkExtractor", "line_number": 36, "usage_type": "call"}, {"api_name": "scrapy.contrib.spiders.Rule", "line_number": 46, "usage_type": "call"}, {"api_name": "scrapy.contrib.linkextractors.sgml.SgmlLinkExtractor", "line_number": 46, "usage_type": "call"}, {"api_name": "ziffi_profile.items.ZiffiProfileItem", "line_number": 50, "usage_type": "call"}, {"api_name": "scrapy.selector.Selector", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "383445087", "text": "#!/usr/bin/python\n# coding: utf-8\n\nimport nest\nimport nest.raster_plot\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# simulation parameters\nganglionCells = 25*25\nsimTime = 1200.0\n\n# Kernel and Network reset\nnest.ResetKernel()\nnest.ResetNetwork()\n\n# Number of threads (must be 1) and resolution\nnest.SetKernelStatus({\"local_num_threads\": 1,'resolution': 1.0})\n\n# Load COREM\nnest.Install(\"COREM\")\n\n# Create spike detector and spiking nodes\nmult = nest.Create('spike_detector',1)\nspikingGanglion=nest.Create('iaf_neuron',ganglionCells,{'C_m':10.0,'tau_m':10.0})\nnest.Connect(spikingGanglion,mult)\n\n# COREM nodes\nfor i in range(0,ganglionCells):\n\tg=nest.Create('COREM',1,{'port':float(i),'file':'../Retina_scripts/example_1.py'})\n\tnest.Connect(g,[spikingGanglion[i]])\n\n# Simulation\nnest.Simulate(simTime)\n\n# Raster plot\nnest.raster_plot.from_device(mult,hist=False)\nplt.show()\n", "sub_path": "NEST_scripts/example_1.py", "file_name": "example_1.py", "file_ext": "py", "file_size_in_byte": 880, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "nest.ResetKernel", "line_number": 14, "usage_type": "call"}, {"api_name": "nest.ResetNetwork", "line_number": 15, "usage_type": "call"}, {"api_name": "nest.SetKernelStatus", "line_number": 18, "usage_type": "call"}, {"api_name": "nest.Install", "line_number": 21, "usage_type": "call"}, {"api_name": "nest.Create", "line_number": 24, "usage_type": "call"}, {"api_name": "nest.Create", "line_number": 25, "usage_type": "call"}, {"api_name": "nest.Connect", "line_number": 26, "usage_type": "call"}, {"api_name": "nest.Create", "line_number": 30, "usage_type": "call"}, {"api_name": "nest.Connect", "line_number": 31, "usage_type": "call"}, {"api_name": "nest.Simulate", "line_number": 34, "usage_type": "call"}, {"api_name": "nest.raster_plot.from_device", "line_number": 37, "usage_type": "call"}, {"api_name": "nest.raster_plot", "line_number": 37, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.show", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}]} +{"seq_id": "176839938", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nThe :mod:`coclust.coclustering.coclust_spec_mod` module provides an\nimplementation of a co-clustering algorithm by spectral approximation of the\nmodularity matrix.\n\"\"\"\n\n# Author: Francois Role \n# Stanislas Morbieu \n\n# License: BSD 3 clause\n\nimport numpy as np\nfrom scipy.sparse.linalg import svds\nfrom sklearn.cluster import KMeans\n\nfrom ..io.input_checking import check_array, check_numbers\nfrom .base_diagonal_coclust import BaseDiagonalCoclust\n\n\nclass CoclustSpecMod(BaseDiagonalCoclust):\n \"\"\"Co-clustering by spectral approximation of the modularity matrix.\n\n Parameters\n ----------\n n_clusters : int, optional, default: 2\n Number of co-clusters to form\n\n max_iter : int, optional, default: 20\n Maximum number of iterations\n\n n_init : int, optional, default: 10\n Number of time the k-means algorithm will be run with different\n centroid seeds. The final results will be the best output of `n_init`\n consecutive runs in terms of inertia.\n\n random_state : integer or numpy.RandomState, optional\n The generator used to initialize the centers. If an integer is\n given, it fixes the seed. Defaults to the global numpy random\n number generator.\n\n tol : float, default: 1e-9\n Relative tolerance with regards to criterion to declare convergence\n\n Attributes\n ----------\n row_labels_ : array-like, shape (n_rows,)\n Bicluster label of each row\n\n column_labels_ : array-like, shape (n_cols,)\n Bicluster label of each column\n\n References\n ----------\n * Labiod L., Nadif M., ICONIP'11 Proceedings of the 18th international \\\n conference on Neural Information Processing - Volume Part II Pages 700-708\n \"\"\"\n\n def __init__(self, n_clusters=2, max_iter=20, tol=1e-9, n_init=1,\n random_state=None):\n self.n_clusters = n_clusters\n self.max_iter = max_iter\n self.tol = tol\n self.n_init = n_init\n self.random_state = random_state\n\n self.row_labels_ = None\n self.column_labels_ = None\n\n def fit(self, X, y=None):\n \"\"\"Perform co-clustering by spectral approximation of the modularity\n matrix\n\n Parameters\n ----------\n X : numpy array or scipy sparse matrix, shape=(n_samples, n_features)\n Matrix to be analyzed\n \"\"\"\n\n check_array(X)\n\n check_numbers(X, self.n_clusters)\n\n X = X.astype(float)\n\n # Compute diagonal matrices D_r and D_c\n\n D_r = np.diag(np.asarray(X.sum(axis=1)).flatten())\n D_c = np.diag(np.asarray(X.sum(axis=0)).flatten())\n\n try:\n\n # Compute weighted X\n with np.errstate(divide='ignore'):\n D_r **= (-1./2)\n D_r[D_r == np.inf] = 0\n\n D_c = D_c**(-1./2)\n D_c[D_c == np.inf] = 0\n\n D_r = np.matrix(D_r)\n D_c = np.matrix(D_c)\n\n X_tilde = D_r * X * D_c\n\n # Compute the g-1 largest eigenvectors of X_tilde\n\n U, s, V = svds(X_tilde, k=self.n_clusters)\n V = V.transpose()\n\n # Form matrices U-tilde and V_tilde and stack them to form Q\n\n U = D_r * U\n # TODO:\n # verifier type U nd-array ou matrice ??? Convertir en csr ?\n # D_r vaut ici D_r_initial **-1/2 alors que doit etre D_r**1/2 ???\n\n norm = np.linalg.norm(U, axis=0)\n U_tilde = U/norm\n\n V = D_c * V\n # TODO:\n # verifier type U nd-array ou matrice ??? Convertir en csr ?\n # D_r vaut ici D_r_initial **-1/2 alors que doit etre D_r**1/2\n\n norm = np.linalg.norm(V, axis=0)\n V_tilde = V/norm\n\n Q = np.concatenate((U_tilde, V_tilde), axis=0)\n\n # kmeans\n\n k_means = KMeans(init='k-means++',\n n_clusters=self.n_clusters,\n n_init=self.n_init,\n max_iter=self.max_iter,\n tol=self.tol,\n random_state=self.random_state)\n k_means.fit(Q)\n k_means_labels = k_means.labels_\n\n nb_rows = X.shape[0]\n\n self.row_labels_ = k_means_labels[0:nb_rows].tolist()\n self.column_labels_ = k_means_labels[nb_rows:].tolist()\n\n except:\n raise ValueError(\"matrix may contain unexpected NaN values\")\n\n def get_params(self, deep=True):\n \"\"\"Get parameters for this estimator.\n\n Parameters\n ----------\n deep: boolean, optional\n If True, will return the parameters for this estimator and\n contained subobjects that are estimators\n\n Returns\n -------\n dict\n Mapping of string to any parameter names mapped to their values\n \"\"\"\n return {\"n_clusters\": self.n_clusters,\n \"max_iter\": self.max_iter,\n \"tol\": self.tol,\n \"n_init\": self.n_init,\n \"random_state\": self.random_state\n }\n\n def set_params(self, **parameters):\n \"\"\"Set the parameters of this estimator.\n\n The method works on simple estimators as well as on nested objects\n (such as pipelines). The former have parameters of the form\n ``__`` so that it's possible to update each\n component of a nested object.\n\n Returns\n -------\n CoclustSpecMod\n self\n \"\"\"\n for parameter, value in parameters.items():\n setattr(self, parameter, value)\n return self\n", "sub_path": "coclust/coclustering/coclust_spec_mod.py", "file_name": "coclust_spec_mod.py", "file_ext": "py", "file_size_in_byte": 5729, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "base_diagonal_coclust.BaseDiagonalCoclust", "line_number": 22, "usage_type": "name"}, {"api_name": "io.input_checking.check_array", "line_number": 81, "usage_type": "call"}, {"api_name": "io.input_checking.check_numbers", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.errstate", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 97, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 100, "usage_type": "attribute"}, {"api_name": "numpy.matrix", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 103, "usage_type": "call"}, {"api_name": "scipy.sparse.linalg.svds", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 119, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 127, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 130, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 134, "usage_type": "call"}]} +{"seq_id": "281182655", "text": "from django.db.models import fields\nfrom rest_framework import serializers\nfrom SII_API.models import Sii_Api, User\n\nclass ApiSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Sii_Api\n fields = ( 'idApp',\n 'date',\n 'type',\n 'valeur',\n 'alerte',\n 'messageAlerte', )\n\n # modelUser = User\n # fields = ( 'userid',\n # 'username',\n # 'password',)\n", "sub_path": "RESTAPI/SII_API/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 521, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 5, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 5, "usage_type": "name"}, {"api_name": "SII_API.models.Sii_Api", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.models.fields", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "467172337", "text": "import numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Conv1D, Flatten, BatchNormalization\nfrom utils import CSVOperations as Csv\nfrom keras import optimizers\nfrom keras.utils import to_categorical\nfrom keras.preprocessing.image import ImageDataGenerator\nimport matplotlib.pyplot as plt\nimport progressbar\nfrom imblearn.over_sampling import SMOTE\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport tensorflow as tf\n\nlabel_map = {\n '0': 0,\n '1': 1,\n '2': 2,\n '3': 3,\n '4': 4,\n '5': 5,\n '6': 6,\n '7': 7,\n '8': 8,\n '9': 9,\n '+': 10,\n '-': 11,\n '(': 12,\n ')': 13,\n 'log': 14,\n 'sqrt': 15,\n 'x': 16\n}\n\n\ndef preprocess_data(data, set_size, pixels):\n final_x = []\n final_y = []\n\n bar = progressbar.ProgressBar(prefix='Preprocessing training set ', suffix=' Complete', maxval=set_size,\n widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()]).start()\n\n bar.start()\n\n for i in range(1, set_size):\n final_x.append(np.array(data[i][0: pixels], dtype=float))\n final_y.append(np.array(label_map[data[i][pixels]]))\n bar.update(i + 1)\n bar.finish()\n\n final_x = np.array(final_x)\n final_y = to_categorical(final_y)\n\n return final_x, final_y\n\n\ndef split_train_test(data, test_ratio):\n shuffled_indices = np.random.permutation(len(data))\n test_set_size = int(len(data) * test_ratio)\n test_indices = shuffled_indices[:test_set_size]\n train_indices = shuffled_indices[test_set_size:]\n\n if 0 in test_indices:\n index = np.where(test_indices == 0)\n test_indices = np.delete(test_indices, index)\n if 0 in train_indices:\n index = np.where(train_indices == 0)\n train_indices = np.delete(train_indices, index)\n\n return data.iloc[train_indices], data.iloc[test_indices]\n\n\ndef create_deep_learning_model(x_train, y_train, x_test, y_test):\n\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))\n model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))\n model.add(tf.keras.layers.Dense(17, activation=tf.nn.softmax))\n\n model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n model.fit(x_train, y_train, epochs=3)\n\n '''\n model = Sequential()\n model.add(Conv1D(64, kernel_size=6, input_shape=(48, 48,), activation='relu'))\n model.add(BatchNormalization())\n model.add(Conv1D(32, kernel_size=4, activation='relu'))\n model.add(Conv1D(16, kernel_size=4, activation='relu'))\n model.add(Flatten())\n model.add(Dense(y_train.shape[1], activation='softmax'))\n\n opt = optimizers.SGD(lr=0.1)\n model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])\n\n model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=30, verbose=1, batch_size=32, shuffle=True)\n '''\n\ndef create_training_and_validation_set(csv_filepath):\n training_set = []\n test_set = []\n\n images = Csv.open_csv(csv_filepath)\n feature_names = images.iloc[0]\n\n print(\"Starting oversampling procedure\")\n #sm = SMOTE(random_state=10)\n #x_train, y_train = sm.fit_sample(images.drop('labels', axis=1).iloc[1:], images['labels'].iloc[1:])\n print(\"Oversampling completed\")\n print(\"Generating new dataset\")\n #oversampled_dataset = pd.concat([pd.DataFrame(x_train), pd.DataFrame(y_train)], axis=1)\n #oversampled_dataset.columns = feature_names\n print(\"Dataset generated\")\n #print(oversampled_dataset['labels'].value_counts())\n\n training_set, test_set = split_train_test(images, 0.2)\n training_set_size = len(training_set.values)\n test_set_size = len(test_set.values)\n\n print(\"training set size:\", training_set_size)\n print(\"test set size:\", test_set_size)\n\n pixels = len(training_set.values[0]) - 1\n\n x_train, y_train = preprocess_data(training_set.values, training_set_size, pixels)\n x_test, y_test = preprocess_data(test_set.values, test_set_size, pixels)\n\n print(\"training set size:\", len(images.values))\n print(\"test set size:\", len(x_test))\n\n x_train = np.lib.stride_tricks.as_strided(x_train, (len(x_train), 48, 48))\n x_test = np.lib.stride_tricks.as_strided(x_test, (len(x_test), 48, 48))\n\n create_deep_learning_model(x_train, y_train, x_test, y_test)\n", "sub_path": "model/AltModel.py", "file_name": "AltModel.py", "file_ext": "py", "file_size_in_byte": 4475, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "progressbar.ProgressBar", "line_number": 41, "usage_type": "call"}, {"api_name": "progressbar.Bar", "line_number": 42, "usage_type": "call"}, {"api_name": "progressbar.Percentage", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 52, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 76, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 77, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 78, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 78, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 79, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 79, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 80, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 80, "usage_type": "attribute"}, {"api_name": "utils.CSVOperations.open_csv", "line_number": 107, "usage_type": "call"}, {"api_name": "utils.CSVOperations", "line_number": 107, "usage_type": "name"}, {"api_name": "numpy.lib.stride_tricks.as_strided", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.lib", "line_number": 135, "usage_type": "attribute"}, {"api_name": "numpy.lib.stride_tricks.as_strided", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.lib", "line_number": 136, "usage_type": "attribute"}]} +{"seq_id": "14320330", "text": "#coding:UTF-8\n__author__ = u\"陈书焰\"\n\n\nfrom app import app\nimport sys\nfrom Captcha import *\nfrom flask import g\n\n@app.route('/test',methods = ['GET','POST'])\ndef test():\n captcha = Captcha(font_type=g.rootpath+'/assets/Monaco.ttf')\n code_img,strs = captcha.get_captcha()\n buf = StringIO.StringIO()\n code_img.save(buf,'JPEG',quality=70)\n\n buf_str = buf.getvalue()\n response = app.make_response(buf_str)\n response.headers['Content-Type'] = 'image/jpeg'\n\n return response \n", "sub_path": "mavairo/pyb2c/app/views/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 497, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "flask.g.rootpath", "line_number": 12, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 12, "usage_type": "name"}, {"api_name": "app.app.make_response", "line_number": 18, "usage_type": "call"}, {"api_name": "app.app", "line_number": 18, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 10, "usage_type": "call"}, {"api_name": "app.app", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "74151644", "text": "__author__ = \"\"\"Cary Hawkins email-hawkinscary23@gmail.com\"\"\"\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nimport unittest\r\nfrom selenium.common.exceptions import NoSuchElementException\r\nfrom selenium.common.exceptions import NoAlertPresentException\r\n\r\n\r\nclass BaseTestCase(unittest.TestCase):\r\n\r\n\tdef setUp(self, platform, browser, url):\r\n\t\tdesired_caps = {}\r\n\t\tdesired_caps['platform'] = self.platform\r\n\t\tdesired_caps['browserName']= self.browser\r\n\t\tself.base_url = self.url\r\n\t\tself.verificationErrors = []\r\n\t\tself.accept_next_alert = True\r\n\t\tdriver = self.driver\r\n\t\tdriver.maximize_window()\r\n\t\tdriver.get(self.base_url + \"/\")\r\n\t\tdriver.implicitly_wait(15)\r\n\r\n\tdef is_element_present(self, how, what):\r\n\t\ttry:\r\n\t\t\tself.driver.find_element(by=how,value=what)\r\n\t\texcept NoSuchElementException:\r\n\t\t\treturn False\r\n\t\treturn True\r\n\r\n\tdef is_alert_present(self):\r\n\t\ttry:\r\n\t\t\tself.driver.switch_to.alert.text()\r\n\t\texcept NoAlertPresentException:\r\n\t\t\treturn False\r\n\t\treturn True\r\n\r\n\tdef close_alert_and_get_text(self):\r\n\t\ttry:\r\n\t\t\talert = self.driver.switch_to.alert()\r\n\t\t\talert_text = alert.text\r\n\t\t\tif self.accept_next_alert:\r\n\t\t\t\talert.accept()\r\n\t\t\telse:\r\n\t\t\t\talert.dismiss()\r\n\t\t\treturn alert_text\r\n\t\tfinally:\r\n\t\t\tself.accept_next_alert = True\r\n\r\n\tdef click_wait(self, button, timeout=10):\r\n\t\tsource = self.driver.page_source\r\n\t\tbutton.click()\r\n\r\n\t\tdef compare_source(driver):\r\n\t\t\ttry:\r\n\t\t\t\treturn source != driver.page_source\r\n\t\t\texcept NoSuchElementException:\r\n\t\t\t\tprint(\"Failed\")\r\n\t\tWebDriverWait(self.driver, timeout).until(compare_source)\r\n\r\n\tdef tearDown(self):\r\n\t\tself.driver.quit()\r\n\r\n", "sub_path": "basetest.py", "file_name": "basetest.py", "file_ext": "py", "file_size_in_byte": 1634, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "unittest.TestCase", "line_number": 9, "usage_type": "attribute"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 26, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.NoAlertPresentException", "line_number": 33, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 56, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "497469108", "text": "import xml.etree.cElementTree as ET\nimport pprint\nimport re\nfrom collections import defaultdict\nimport json\n\"\"\"\nYour task is to explore the data a bit more.\nBefore you process the data and add it into MongoDB, you should\ncheck the \"k\" value for each \"\" and see if they can be valid keys in MongoDB,\nas well as see if there are any other potential problems.\n\nWe have provided you with 3 regular expressions to check for certain patterns\nin the tags. As we saw in the quiz earlier, we would like to change the data model\nand expand the \"addr:street\" type of keys to a dictionary like this:\n{\"address\": {\"street\": \"Some value\"}}\nSo, we have to see if we have such tags, and if we have any tags with problematic characters.\nPlease complete the function 'key_type'.\n\"\"\"\n\n\nlower = re.compile(r'^([a-z]|_)*$')\nlower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')\nproblemchars = re.compile(r'[=\\+/&<>;\\'\"\\?%#\\$@\\,\\. \\t\\r\\n]')\n\n\ndef key_type(element, keys):\n if element.tag == \"tag\":\n # YOUR CODE HERE\n if re.search(lower, element.attrib['k']):\n keys['lower'] += 1\n elif re.search(lower_colon, element.attrib['k']):\n keys['lower_colon'] += 1\n elif re.search(problemchars, element.attrib['k']):\n keys['problemchars'] += 1\n else:\n keys['other'] += 1\n \n return keys\n \n\n \ndef key_value(element, data):\n frequent_list = ['highway', 'building', 'name', 'tiger:county', 'tiger:name_base', \n 'tiger:cfcc', 'tiger:name_type', 'tiger:cfcc', 'tiger:name_type', \n 'tiger:zip_left', 'tiger:zip_right', 'tiger:reviewed', 'tiger:tlid', \n 'tiger:source', 'tiger:separated', 'source',\n 'phone', 'contact:phone', 'addr:postcode']\n \n frequent_list = ['tiger:zip_left', 'tiger:zip_right',]\n\n key = element.attrib['k']\n value = element.attrib['v']\n \n if key in frequent_list and len(data[key]) < 100:\n data[key].add(value)\n\n\n\ndef process_map(filename):\n data = defaultdict(set)\n for _, element in ET.iterparse(filename):\n if element.tag == 'node' or 'way':\n for tag in element.iter(\"tag\"):\n key_value(tag, data)\n\n return data\n\n\n\ndef test():\n # You can use another testfile 'map.osm' to look at your solution\n # Note that the assertions will be incorrect then.\n data = dict(process_map('san-jose_california.osm'))\n for k in data:\n data[k] = list(data[k])\n# json.dump(data, open('san-jose_california_tag_data.json', 'w'))\n pprint.pprint(data)\n\n\n\nif __name__ == \"__main__\":\n test()", "sub_path": "tags.py", "file_name": "tags.py", "file_ext": "py", "file_size_in_byte": 2650, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "re.compile", "line_number": 21, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 22, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 23, "usage_type": "call"}, {"api_name": "re.search", "line_number": 29, "usage_type": "call"}, {"api_name": "re.search", "line_number": 31, "usage_type": "call"}, {"api_name": "re.search", "line_number": 33, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 60, "usage_type": "call"}, {"api_name": "xml.etree.cElementTree.iterparse", "line_number": 61, "usage_type": "call"}, {"api_name": "xml.etree.cElementTree", "line_number": 61, "usage_type": "name"}, {"api_name": "pprint.pprint", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "654058475", "text": "from __future__ import unicode_literals\n\nfrom django import VERSION\n\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\nadmin.autodiscover()\n\nurlpatterns = [\n url(\n r'^calendar/', include('happenings.urls', namespace='calendar')\n ),\n]\n\nif VERSION >= (1, 9):\n urlpatterns += [\n url(\n r'^admin/', include(admin.site.urls[:2])\n ),\n ]\nelse:\n urlpatterns += [\n url(\n r'^admin/', include(admin.site.urls)\n ),\n ]\n\nurlpatterns += staticfiles_urlpatterns()\n", "sub_path": "tests/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 616, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "django.contrib.admin.autodiscover", "line_number": 9, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 9, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 13, "usage_type": "call"}, {"api_name": "django.VERSION", "line_number": 17, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 20, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 20, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 26, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 26, "usage_type": "name"}, {"api_name": "django.contrib.staticfiles.urls.staticfiles_urlpatterns", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "236977012", "text": "# Under MIT License, see LICENSE.txt\n\nimport numpy as np\nimport math as m\n\nfrom RULEngine.Debug import debug_interface\nfrom RULEngine.Util.Pose import Pose\nfrom RULEngine.Util.Position import Position\nfrom RULEngine.Util.SpeedPose import SpeedPose\nfrom RULEngine.Util.PID import PID\nfrom ai.Algorithm.path_partitionner import CollisionBody\nfrom ai.Util.ai_command import AICommandType, AIControlLoopType, AICommand\nfrom ai.Util.role import Role\nfrom ai.executors.executor import Executor\nfrom ai.states.world_state import WorldState\nfrom config.config_service import ConfigService\n\nMIN_DISTANCE_TO_REACH_TARGET_SPEED = 0.5\n\n\nclass DotDict(dict):\n __getattr__ = dict.get\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__\n\n\nclass MotionExecutor(Executor):\n def __init__(self, p_world_state: WorldState):\n super().__init__(p_world_state)\n is_simulation = ConfigService().config_dict[\"GAME\"][\"type\"] == \"sim\"\n self.robot_motion = [RobotMotion(p_world_state, player_id, is_sim=is_simulation) for player_id in range(12)]\n\n\n def exec(self):\n for player in self.ws.game_state.my_team.available_players.values():\n if player.ai_command is None:\n continue\n\n cmd = player.ai_command\n r_id = player.id\n\n player.collision_body_mask[0] = CollisionBody.COLLIDABLE\n if player is not self.ws.game_state.get_player_by_role(Role.GOALKEEPER):\n player.collision_body_mask[1] = CollisionBody.COLLIDABLE\n else:\n player.collision_body_mask[1] = CollisionBody.UNCOLLIDABLE\n\n if cmd.command is AICommandType.MOVE:\n\n if cmd.control_loop_type is AIControlLoopType.POSITION:\n cmd.speed = self.robot_motion[r_id].update(cmd)\n\n elif cmd.control_loop_type is AIControlLoopType.SPEED:\n speed = cmd.pose_goal.position.rotate(-player.pose.orientation)\n cmd.speed = SpeedPose(speed, cmd.pose_goal.orientation)\n\n elif cmd.control_loop_type is AIControlLoopType.OPEN:\n cmd.speed = SpeedPose(cmd.pose_goal)\n\n elif cmd.command is AICommandType.STOP:\n cmd.speed = SpeedPose()\n self.robot_motion[r_id].reset()\n\n\nclass RobotMotion(object):\n def __init__(self, world_state: WorldState, robot_id, is_sim=True):\n self.ws = world_state\n self.id = robot_id\n self.is_sim = is_sim\n self.setting = get_control_setting(is_sim)\n self.setting.translation.max_acc = None\n self.setting.translation.max_speed = None\n self.setting.rotation.max_angular_acc = None\n self.setting.rotation.max_speed = None\n\n self.current_pose = Pose()\n self.current_orientation = 0.0\n self.current_velocity = Pose()\n self.current_angular_speed = 0.0\n self.current_speed = 0.0\n self.current_acceleration = Position()\n\n self.pose_error = Pose()\n self.position_error = Position()\n\n self.target_pose = Pose()\n self.target_speed = 0.0\n self.target_direction = Position()\n self.target_angular_speed = 0.0\n self.target_angle = 0.0\n self.angle_error = 0.0\n\n self.last_translation_cmd = Position()\n self.cruise_speed = 0.0\n self.cruise_angular_speed = 0.0\n\n self.next_speed = 0.0\n self.next_angular_speed = 0.0\n\n self.x_controller = PID(self.setting.translation.kp,\n self.setting.translation.ki,\n self.setting.translation.kd,\n self.setting.translation.antiwindup)\n\n self.y_controller = PID(self.setting.translation.kp,\n self.setting.translation.ki,\n self.setting.translation.kd,\n self.setting.translation.antiwindup)\n\n self.angle_controller = PID(self.setting.rotation.kp,\n self.setting.rotation.ki,\n self.setting.rotation.kd,\n self.setting.rotation.antiwindup,\n wrap_err=True)\n self.position_flag = False\n self.rotation_flag = False\n self.last_position = Position()\n self.target_turn = self.target_pose.position\n\n def update(self, cmd: AICommand) -> Pose():\n #print(cmd.path_speeds)\n self.update_states(cmd)\n\n # Rotation control\n rotation_cmd = self.angle_controller.update(self.pose_error.orientation, dt=self.dt)\n rotation_cmd = self.apply_rotation_constraints(rotation_cmd)\n if abs(self.pose_error.orientation) < 0.2:\n self.rotation_flag = True\n # Translation control\n self.position_flag = False\n if self.position_error.norm() < MIN_DISTANCE_TO_REACH_TARGET_SPEED * max(1.0, self.cruise_speed):\n if self.target_speed < 0.01:\n self.position_flag = True\n\n if self.position_flag:\n translation_cmd = Position(self.x_controller.update(self.pose_error.position.x, dt=self.dt),\n self.y_controller.update(self.pose_error.position.y, dt=self.dt))\n else:\n translation_cmd = self.get_next_velocity()\n # Adjust command to robot's orientation\n # self.ws.debug_interface.add_line(start_point=(self.current_pose.position[0] * 1000, self.current_pose.position[1] * 1000),\n # end_point=(self.current_pose.position[0] * 1000 + translation_cmd[0] * 600, self.current_pose.position[1] * 1000 + translation_cmd[1] * 600),\n # timeout=0.01, color=debug_interface.CYAN.repr())\n\n compasation_ref_world = translation_cmd.rotate(self.dt * rotation_cmd)\n translation_cmd = translation_cmd.rotate(-(self.current_pose.orientation))\n if not self.rotation_flag and cmd.path[-1] is not cmd.path[0]:\n translation_cmd *= translation_cmd * 0.0\n self.next_speed = 0.0\n self.x_controller.reset()\n self.y_controller.reset()\n if self.position_error.norm() > 0.1 and self.rotation_flag:\n self.angle_controller.reset()\n rotation_cmd = 0\n\n\n\n # self.ws.debug_interface.add_line(\n # start_point=(self.current_pose.position[0] * 1000, self.current_pose.position[1] * 1000),\n # end_point=(self.current_pose.position[0] * 1000 + compasation_ref_world[0] * 600,\n # self.current_pose.position[1] * 1000 + compasation_ref_world[1] * 600),\n # timeout=0.01, color=debug_interface.ORANGE.repr())\n translation_cmd = self.apply_translation_constraints(translation_cmd)\n #if not translation_cmd.norm() < 0.01:\n # print(translation_cmd, \"self.target_reached()\", self.target_reached(), \"self.next_speed\", self.next_speed,\"self.target_speed\", self.target_speed )\n # self.debug(translation_cmd, rotation_cmd)\n return SpeedPose(translation_cmd, rotation_cmd)\n\n def get_next_velocity(self) -> Position:\n \"\"\"Return the next velocity according to a constant acceleration model of a point mass.\n It try to produce a trapezoidal velocity path with the required cruising and target speed.\n The target speed is the speed that the robot need to reach at the target point.\"\"\"\n\n if self.current_speed < self.target_speed: # accelerate\n self.next_speed += self.setting.translation.max_acc * self.dt\n else:\n if self.distance_accelerate():\n self.next_speed += self.setting.translation.max_acc * self.dt\n elif self.distance_break():\n self.next_speed -= self.setting.translation.max_acc * self.dt\n else:\n self.next_speed = self.current_speed\n # if self.target_reached(): # We need to go to target speed\n # if self.next_speed < self.target_speed: # Target speed is faster than current speed\n # self.next_speed += self.setting.translation.max_acc * self.dt\n # if self.next_speed > self.target_speed: # Next_speed is too fast\n # self.next_speed = self.target_speed\n # else: # Target speed is slower than current speed\n # self.next_speed -= self.setting.translation.max_acc * self.dt *2\n # else: # We need to go to the cruising speed\n # if self.next_speed < self.cruise_speed: # Going faster\n # self.next_speed += self.setting.translation.max_acc * self.dt\n # # self.next_speed = min(self.cruise_speed, self.next_speed)\n # else:\n # self.next_speed -= self.setting.translation.max_acc * self.dt * 2\n\n self.next_speed = np.clip(self.next_speed, 0.0, self.cruise_speed)\n self.next_speed = np.clip(self.next_speed, 0.0, self.setting.translation.max_speed)\n next_velocity = Position(self.target_direction * self.next_speed)\n\n return next_velocity\n\n def apply_rotation_constraints(self, r_cmd: float) -> float:\n if self.current_speed < 0.1:\n deadzone = self.setting.rotation.deadzone\n else:\n deadzone = 0.0\n\n sensibility = self.setting.rotation.sensibility\n max_speed = self.setting.rotation.max_speed\n\n r_cmd = self.limit_angular_speed(r_cmd)\n r_cmd = RobotMotion.apply_deadzone(r_cmd, deadzone, sensibility)\n r_cmd = clamp(r_cmd, -max_speed, max_speed)\n\n return r_cmd\n\n def apply_translation_constraints(self, t_cmd: Position) -> Position:\n deadzone = self.setting.translation.deadzone\n sensibility = self.setting.translation.sensibility\n\n t_cmd = self.limit_speed(t_cmd)\n t_cmd[0] = RobotMotion.apply_deadzone(t_cmd[0], deadzone, sensibility)\n t_cmd[1] = RobotMotion.apply_deadzone(t_cmd[1], deadzone, sensibility)\n\n return t_cmd\n\n @staticmethod\n def apply_deadzone(value, deadzone, sensibility):\n if m.fabs(value) < sensibility:\n value = 0.0\n elif m.fabs(value) <= deadzone:\n value = m.copysign(deadzone, value)\n return value\n\n def limit_speed(self, translation_cmd: Position) -> Position:\n if translation_cmd.norm() != 0.0:\n translation_speed = translation_cmd.norm()\n translation_speed = clamp(translation_speed, 0, self.setting.translation.max_speed)\n new_speed = translation_cmd.normalized() * translation_speed\n else:\n new_speed = Position()\n return new_speed\n\n def limit_angular_speed(self, angular_speed: float) -> float:\n if m.fabs(angular_speed) != 0.0:\n rotation_sign = m.copysign(1, angular_speed)\n angular_speed = clamp(m.fabs(angular_speed), 0.0, self.setting.translation.max_speed)\n new_speed = m.copysign(angular_speed, rotation_sign) * angular_speed\n else:\n new_speed = 0.0\n return new_speed\n\n def target_reached(self, boost_factor=1) -> bool: # distance_to_reach_target_speed\n distance = 0.5 * (self.target_speed ** 2 - self.current_speed ** 2) / self.setting.translation.max_acc\n distance = boost_factor * m.fabs(distance)\n distance = max(distance, MIN_DISTANCE_TO_REACH_TARGET_SPEED)\n return self.position_error.norm() <= distance\n\n def distance_accelerate(self, boost_factor=1) -> bool: # distance_to_reach_target_speed\n distance = 0.5 * (self.target_speed ** 2 - self.current_speed ** 2) / self.setting.translation.max_acc\n distance = boost_factor * m.fabs(distance)\n distance = max(distance, MIN_DISTANCE_TO_REACH_TARGET_SPEED)\n return self.position_error.norm() >= distance * 2\n\n\n def distance_break(self, boost_factor=1) -> bool: # distance_to_reach_target_speed\n distance = 0.5 * (self.target_speed ** 2 - self.current_speed ** 2) / self.setting.translation.max_acc\n distance = boost_factor * m.fabs(distance)\n distance = max(distance, MIN_DISTANCE_TO_REACH_TARGET_SPEED)\n return self.position_error.norm() <= distance\n\n def update_states(self, cmd: AICommand):\n self.dt = self.ws.game_state.game.delta_t\n\n # Dynamics constraints\n self.setting.translation.max_acc = self.ws.game_state.get_player(self.id).max_acc\n self.setting.translation.max_speed = self.ws.game_state.get_player(self.id).max_speed\n self.setting.translation.max_angular_acc = self.ws.game_state.get_player(self.id).max_angular_acc\n self.setting.rotation.max_speed = self.ws.game_state.get_player(self.id).max_angular_speed\n\n # Current state of the robot\n self.current_pose = self.ws.game_state.game.friends.players[self.id].pose.scale(1 / 1000)\n self.current_velocity = self.ws.game_state.game.friends.players[self.id].velocity.scale(1 / 1000)\n self.current_speed = self.current_velocity.position.norm()\n self.current_angular_speed = self.current_velocity.orientation\n self.current_orientation = self.current_pose.orientation\n\n # Desired parameters\n if cmd.path != []:\n current_path_position = Position(cmd.path[0] / 1000)\n if not self.last_position.is_close(current_path_position, 0.1) and self.target_speed < 0.2:\n self.reset()\n self.last_position = current_path_position\n\n self.target_pose = Pose(cmd.path[0], cmd.pose_goal.orientation).scale(1 / 1000)\n self.target_turn = cmd.path_turn[1] / 1000\n self.target_speed = cmd.path_speeds[1] / 1000\n\n else: # No pathfinder case\n self.target_pose = cmd.pose_goal.scale(1 / 1000)\n self.target_turn = self.target_pose.position\n self.target_speed = 0.0\n\n self.target_angle = self.target_pose.orientation\n self.pose_error = self.target_pose - self.current_pose # Pose are always wrap to pi\n self.position_error = self.pose_error.position\n self.angle_error = self.pose_error.orientation\n if self.position_error.norm() != 0.0:\n self.target_direction = (self.target_turn - self.current_pose.position).normalized()\n\n self.cruise_speed = cmd.cruise_speed\n\n def reset(self):\n self.angle_controller.reset()\n self.x_controller.reset()\n self.y_controller.reset()\n self.position_flag = False\n self.rotation_flag = False\n self.last_translation_cmd = Position()\n self.next_speed = 0.0\n self.next_angular_speed = 0.0\n self.last_position = Position()\n\n def debug(self, translation_cmd, rotation_cmd):\n print('Speed: {:5.3f}, Command: {}, {:5.3f}, next speed: {:5.3f}, target_speed: {:5.3f}, '\n '{:5.3f}, reached:{}, error: {}'.format(self.current_speed,\n translation_cmd,\n rotation_cmd,\n self.next_speed,\n self.target_speed,\n self.target_direction.angle()/m.pi*180,\n self.target_reached(),\n self.pose_error))\n\n\ndef get_control_setting(is_sim: bool):\n\n if is_sim:\n translation = {\"kp\": 1, \"ki\": 0.1, \"kd\": 0.5, \"antiwindup\": 0, \"deadzone\": 0, \"sensibility\": 0}\n rotation = {\"kp\": 3, \"ki\": 3, \"kd\": 0.01, \"antiwindup\": 0, \"deadzone\": 0, \"sensibility\": 0}\n else:\n translation = {\"kp\": 1, \"ki\": 0.1, \"kd\": 0.5, \"antiwindup\": 0, \"deadzone\": 0, \"sensibility\": 0}\n rotation = {\"kp\": 3, \"ki\": 3, \"kd\": 0.01, \"antiwindup\": 0, \"deadzone\": 0, \"sensibility\": 0}\n\n control_setting = DotDict()\n control_setting.translation = DotDict(translation)\n control_setting.rotation = DotDict(rotation)\n\n return control_setting\n\n\ndef clamp(val: float, min_val: float, max_val: float) -> float:\n return max(min(val, max_val), min_val)\n", "sub_path": "ai/executors/motion_executor.py", "file_name": "motion_executor.py", "file_ext": "py", "file_size_in_byte": 16268, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "ai.executors.executor.Executor", "line_number": 27, "usage_type": "name"}, {"api_name": "ai.states.world_state.WorldState", "line_number": 28, "usage_type": "name"}, {"api_name": "config.config_service.ConfigService", "line_number": 30, "usage_type": "call"}, {"api_name": "ai.Algorithm.path_partitionner.CollisionBody.COLLIDABLE", "line_number": 42, "usage_type": "attribute"}, {"api_name": "ai.Algorithm.path_partitionner.CollisionBody", "line_number": 42, "usage_type": "name"}, {"api_name": "ai.Util.role.Role.GOALKEEPER", "line_number": 43, "usage_type": "attribute"}, {"api_name": "ai.Util.role.Role", "line_number": 43, "usage_type": "name"}, {"api_name": "ai.Algorithm.path_partitionner.CollisionBody.COLLIDABLE", "line_number": 44, "usage_type": "attribute"}, {"api_name": "ai.Algorithm.path_partitionner.CollisionBody", "line_number": 44, "usage_type": "name"}, {"api_name": "ai.Algorithm.path_partitionner.CollisionBody.UNCOLLIDABLE", "line_number": 46, "usage_type": "attribute"}, {"api_name": "ai.Algorithm.path_partitionner.CollisionBody", "line_number": 46, "usage_type": "name"}, {"api_name": "ai.Util.ai_command.AICommandType.MOVE", "line_number": 48, "usage_type": "attribute"}, {"api_name": "ai.Util.ai_command.AICommandType", "line_number": 48, "usage_type": "name"}, {"api_name": "ai.Util.ai_command.AIControlLoopType.POSITION", "line_number": 50, "usage_type": "attribute"}, {"api_name": "ai.Util.ai_command.AIControlLoopType", "line_number": 50, "usage_type": "name"}, {"api_name": "ai.Util.ai_command.AIControlLoopType.SPEED", "line_number": 53, "usage_type": "attribute"}, {"api_name": "ai.Util.ai_command.AIControlLoopType", "line_number": 53, "usage_type": "name"}, {"api_name": "RULEngine.Util.SpeedPose.SpeedPose", "line_number": 55, "usage_type": "call"}, {"api_name": "ai.Util.ai_command.AIControlLoopType.OPEN", "line_number": 57, "usage_type": "attribute"}, {"api_name": "ai.Util.ai_command.AIControlLoopType", "line_number": 57, "usage_type": "name"}, {"api_name": "RULEngine.Util.SpeedPose.SpeedPose", "line_number": 58, "usage_type": "call"}, {"api_name": "ai.Util.ai_command.AICommandType.STOP", "line_number": 60, "usage_type": "attribute"}, {"api_name": "ai.Util.ai_command.AICommandType", "line_number": 60, "usage_type": "name"}, {"api_name": "RULEngine.Util.SpeedPose.SpeedPose", "line_number": 61, "usage_type": "call"}, {"api_name": "ai.states.world_state.WorldState", "line_number": 66, "usage_type": "name"}, {"api_name": "RULEngine.Util.Pose.Pose", "line_number": 76, "usage_type": "call"}, {"api_name": "RULEngine.Util.Pose.Pose", "line_number": 78, "usage_type": "call"}, {"api_name": "RULEngine.Util.Position.Position", "line_number": 81, "usage_type": "call"}, {"api_name": "RULEngine.Util.Pose.Pose", "line_number": 83, "usage_type": "call"}, {"api_name": "RULEngine.Util.Position.Position", "line_number": 84, "usage_type": "call"}, {"api_name": "RULEngine.Util.Pose.Pose", "line_number": 86, "usage_type": "call"}, {"api_name": "RULEngine.Util.Position.Position", "line_number": 88, "usage_type": "call"}, {"api_name": "RULEngine.Util.Position.Position", "line_number": 93, "usage_type": "call"}, {"api_name": "RULEngine.Util.PID.PID", "line_number": 100, "usage_type": "call"}, {"api_name": "RULEngine.Util.PID.PID", "line_number": 105, "usage_type": "call"}, {"api_name": "RULEngine.Util.PID.PID", "line_number": 110, "usage_type": "call"}, {"api_name": "RULEngine.Util.Position.Position", "line_number": 117, "usage_type": "call"}, {"api_name": "ai.Util.ai_command.AICommand", "line_number": 120, "usage_type": "name"}, {"api_name": "RULEngine.Util.Position.Position", "line_number": 136, "usage_type": "call"}, {"api_name": "RULEngine.Util.SpeedPose.SpeedPose", "line_number": 167, "usage_type": "call"}, {"api_name": "RULEngine.Util.Pose.Pose", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 198, "usage_type": "call"}, {"api_name": "RULEngine.Util.Position.Position", "line_number": 199, "usage_type": "call"}, {"api_name": "RULEngine.Util.Position.Position", "line_number": 169, "usage_type": "name"}, {"api_name": "RULEngine.Util.Position.Position", "line_number": 218, "usage_type": "name"}, {"api_name": "math.fabs", "line_number": 230, "usage_type": "call"}, {"api_name": "math.fabs", "line_number": 232, "usage_type": "call"}, {"api_name": "math.copysign", "line_number": 233, "usage_type": "call"}, {"api_name": "RULEngine.Util.Position.Position", "line_number": 236, "usage_type": "name"}, {"api_name": "RULEngine.Util.Position.Position", "line_number": 242, "usage_type": "call"}, {"api_name": "math.fabs", "line_number": 246, "usage_type": "call"}, {"api_name": "math.copysign", "line_number": 247, "usage_type": "call"}, {"api_name": "math.fabs", "line_number": 248, "usage_type": "call"}, {"api_name": "math.copysign", "line_number": 249, "usage_type": "call"}, {"api_name": "math.fabs", "line_number": 256, "usage_type": "call"}, {"api_name": "math.fabs", "line_number": 262, "usage_type": "call"}, {"api_name": "math.fabs", "line_number": 269, "usage_type": "call"}, {"api_name": "ai.Util.ai_command.AICommand", "line_number": 273, "usage_type": "name"}, {"api_name": "RULEngine.Util.Position.Position", "line_number": 291, "usage_type": "call"}, {"api_name": "RULEngine.Util.Pose.Pose", "line_number": 296, "usage_type": "call"}, {"api_name": "RULEngine.Util.Position.Position", "line_number": 320, "usage_type": "call"}, {"api_name": "RULEngine.Util.Position.Position", "line_number": 323, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 332, "usage_type": "attribute"}]} +{"seq_id": "136128746", "text": "from treelib import Node, Tree\n\ninp = []\nwith open('example.txt', 'r') as f:\n for line in f:\n inp.append(line.strip())\n\nprint(inp)\n\nclass Inode():\n def __init__(self, size, type) -> None:\n self.size = size\n self.type = type\n\ntree = Tree()\ntree.show()\nroot = tree.create_node(\"/\", data=Inode(0, 'dir')) # root node\n\npwd = root\nmode = None\nfor line in inp[1:]:\n if line == '$ ls':\n print(\"About to ls some files\")\n if mode:\n raise Exception(\"How do you already have a mode?!\")\n mode = 'ls'\n elif line.startswith('$ cd'):\n mode = None\n dir = line.split()[2]\n if dir == '..':\n print(\"cd up a directory\")\n pwd = tree.parent(pwd.identifier)\n else:\n print(f\"cd to {dir}\")\n pwd = [n for n in tree.children(pwd.identifier) if n.tag == dir][0]\n elif mode == 'ls':\n size, name = line.split()\n if size == 'dir':\n node = tree.create_node(name, data=Inode(0, 'dir'), parent=pwd.identifier)\n print(f\"Adding dir {name} with identifer {node.identifier}\")\n else:\n print(f\"Adding file {name} with size {int(size)} in directory {pwd.identifier}\")\n tree.create_node(name, data=Inode(int(size), 'file'), parent=pwd.identifier)\n\ntree.show()\n\ndef calc_dir_sizes(root):\n if root.data.type == 'dir':\n total_size = sum(map(lambda n: calc_dir_sizes(n), tree.children(root.identifier)))\n root.data.size = total_size\n return total_size\n else:\n return root.data.size\n\ncalc_dir_sizes(root)\n\ntree.show(data_property=\"size\")\n\nnodes = tree.all_nodes()\ndirs_under100k = [n for n in nodes if n.data.type == 'dir' and n.data.size <= 100000]\nprint(sum(map(lambda n: n.data.size, dirs_under100k)))\n", "sub_path": "2022/day7/day7.py", "file_name": "day7.py", "file_ext": "py", "file_size_in_byte": 1661, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "treelib.Tree", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "200693610", "text": "import argparse\n\n\ndef format_price(price):\n not_available = 'N/A'\n try:\n if not isinstance(price, bool):\n price = float(price)\n else:\n return not_available\n except (ValueError, TypeError):\n return not_available\n if not price:\n return not_available\n if (price).is_integer():\n price = int(price)\n else:\n price = round_float_number_to_two_decimals(price)\n return \"{:,}\".format(price).replace(',', ' ')\n\n\ndef round_float_number_to_two_decimals(number):\n number = float('{:.2f}'.format(number))\n if (number).is_integer():\n return int(number)\n else:\n return number\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('price', help='a price to format')\n args = parser.parse_args()\n print(format_price(args.price))\n", "sub_path": "format_price.py", "file_name": "format_price.py", "file_ext": "py", "file_size_in_byte": 861, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "432236332", "text": "# ------------------------------------------------------------------------------\r\n# BSD 2-Clause License\r\n# \r\n# Copyright (c) 2019, Thomas Larsson\r\n# All rights reserved.\r\n# \r\n# Redistribution and use in source and binary forms, with or without\r\n# modification, are permitted provided that the following conditions are met:\r\n# \r\n# 1. Redistributions of source code must retain the above copyright notice, this\r\n# list of conditions and the following disclaimer.\r\n# \r\n# 2. Redistributions in binary form must reproduce the above copyright notice,\r\n# this list of conditions and the following disclaimer in the documentation\r\n# and/or other materials provided with the distribution.\r\n# \r\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\r\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\r\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\r\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\r\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\r\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\r\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\r\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\r\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\r\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r\n# ------------------------------------------------------------------------------\r\n\r\n\r\n\r\nimport bpy\r\nfrom bpy.props import *\r\n\r\nimport os\r\nimport math\r\nfrom mathutils import Quaternion, Matrix\r\nfrom .utils import *\r\nfrom .io_json import *\r\nif bpy.app.version < (2,80,0):\r\n from .buttons27 import ProblemsString, LoadJson\r\nelse:\r\n from .buttons28 import ProblemsString, LoadJson\r\n\r\n#------------------------------------------------------------------\r\n# Define current pose as rest pose\r\n#------------------------------------------------------------------\r\n\r\ndef applyRestPose(context, value):\r\n rig = context.object\r\n children = []\r\n for ob in getSceneObjects(context):\r\n if ob.type != 'MESH':\r\n continue\r\n\r\n setActiveObject(context, ob)\r\n if ob != context.object:\r\n raise StandardError(\"Context switch did not take:\\nob = %s\\nc.ob = %s\\nc.aob = %s\" %\r\n (ob, context.object, context.active_object))\r\n\r\n if (ob.McpArmatureName == rig.name and\r\n ob.McpArmatureModifier != \"\"):\r\n mod = ob.modifiers[ob.McpArmatureModifier]\r\n ob.modifiers.remove(mod)\r\n ob.data.shape_keys.key_blocks[ob.McpArmatureModifier].value = value\r\n children.append(ob)\r\n else:\r\n for mod in ob.modifiers:\r\n if (mod.type == 'ARMATURE' and\r\n mod.object == rig):\r\n children.append(ob)\r\n bpy.ops.object.modifier_apply(apply_as='SHAPE', modifier=mod.name)\r\n ob.data.shape_keys.key_blocks[mod.name].value = value\r\n ob.McpArmatureName = rig.name\r\n ob.McpArmatureModifier = mod.name\r\n break\r\n\r\n setActiveObject(context, rig)\r\n bpy.ops.object.mode_set(mode='POSE')\r\n bpy.ops.pose.armature_apply()\r\n for ob in children:\r\n name = ob.McpArmatureModifier\r\n setActiveObject(context, ob)\r\n mod = ob.modifiers.new(name, 'ARMATURE')\r\n mod.object = rig\r\n mod.use_vertex_groups = True\r\n bpy.ops.object.modifier_move_up(modifier=name)\r\n #setShapeKey(ob, name, value)\r\n\r\n setActiveObject(context, rig)\r\n print(\"Applied pose as rest pose\")\r\n\r\n\r\ndef setShapeKey(ob, name, value):\r\n if not ob.data.shape_keys:\r\n return\r\n skey = ob.data.shape_keys.key_blocks[name]\r\n skey.value = value\r\n\r\n\r\nclass MCP_OT_RestCurrentPose(bpy.types.Operator):\r\n bl_idname = \"mcp.rest_current_pose\"\r\n bl_label = \"Current Pose => Rest Pose\"\r\n bl_description = \"Change rest pose to current pose\"\r\n bl_options = {'UNDO'}\r\n\r\n def execute(self, context):\r\n try:\r\n initRig(context)\r\n applyRestPose(context, 1.0)\r\n print(\"Set current pose to rest pose\")\r\n except MocapError:\r\n bpy.ops.mcp.error('INVOKE_DEFAULT')\r\n return{'FINISHED'}\r\n\r\n#------------------------------------------------------------------\r\n# Automatic T-Pose\r\n#------------------------------------------------------------------\r\n\r\nTPose = {\r\n \"upper_arm.L\" : (0, 0, -pi/2, 'XYZ'),\r\n \"forearm.L\" : (0, 0, -pi/2, 'XYZ'),\r\n #\"hand.L\" : (0, 0, -pi/2, 'XYZ'),\r\n\r\n \"upper_arm.R\" : (0, 0, pi/2, 'XYZ'),\r\n \"forearm.R\" : (0, 0, pi/2, 'XYZ'),\r\n #\"hand.R\" : (0, 0, pi/2, 'XYZ'),\r\n\r\n \"thigh.L\" : (-pi/2, 0, 0, 'XYZ'),\r\n \"shin.L\" : (-pi/2, 0, 0, 'XYZ'),\r\n #\"foot.L\" : (None, 0, 0, 'XYZ'),\r\n #\"toe.L\" : (pi, 0, 0, 'XYZ'),\r\n\r\n \"thigh.R\" : (-pi/2, 0, 0, 'XYZ'),\r\n \"shin.R\" : (-pi/2, 0, 0, 'XYZ'),\r\n #\"foot.R\" : (None, 0, 0, 'XYZ'),\r\n #\"toe.R\" : (pi, 0, 0, 'XYZ'),\r\n}\r\n\r\ndef autoTPose(rig, context):\r\n print(\"Auto T-pose\", rig.name)\r\n putInRestPose(rig, True)\r\n for pb in rig.pose.bones:\r\n try:\r\n ex,ey,ez,order = TPose[pb.McpBone]\r\n except KeyError:\r\n continue\r\n\r\n euler = pb.matrix.to_euler(order)\r\n if ex is None:\r\n ex = euler.x\r\n if ey is None:\r\n ey = euler.y\r\n if ez is None:\r\n ez = euler.z\r\n euler = Euler((ex,ey,ez), order)\r\n mat = euler.to_matrix().to_4x4()\r\n mat.col[3] = pb.matrix.col[3]\r\n\r\n loc = pb.bone.matrix_local\r\n if pb.parent:\r\n mat = Mult2(pb.parent.matrix.inverted(), mat)\r\n loc = Mult2(pb.parent.bone.matrix_local.inverted(), loc)\r\n mat = Mult2(loc.inverted(), mat)\r\n euler = mat.to_euler('YZX')\r\n euler.y = 0\r\n pb.matrix_basis = euler.to_matrix().to_4x4()\r\n bpy.ops.object.mode_set(mode='EDIT')\r\n bpy.ops.object.mode_set(mode='POSE')\r\n\r\n#------------------------------------------------------------------\r\n# Set current pose to T-Pose\r\n#------------------------------------------------------------------\r\n\r\ndef setTPose(rig, context, filename=None, reload=False):\r\n if reload or not rig.McpTPoseDefined:\r\n if isMakeHumanRig(rig) and scn.McpMakeHumanTPose:\r\n if isMhOfficialRig(rig):\r\n filename = \"target_rigs/mh_official_tpose.json\"\r\n else:\r\n filename = \"target_rigs/makehuman_tpose.json\"\r\n elif filename is None:\r\n filename = rig.McpTPoseFile\r\n hasFile = loadPose(rig, filename)\r\n if not hasFile:\r\n autoTPose(rig, context)\r\n defineTPose(rig)\r\n else:\r\n getStoredTPose(rig)\r\n\r\n\r\nclass MCP_OT_SetTPose(bpy.types.Operator):\r\n bl_idname = \"mcp.set_t_pose\"\r\n bl_label = \"Put In T-pose\"\r\n bl_description = \"Set current pose to T-pose\"\r\n bl_options = {'UNDO'}\r\n\r\n def execute(self, context):\r\n try:\r\n rig = initRig(context)\r\n isdefined = rig.McpTPoseDefined\r\n setTPose(rig, context, reload=True)\r\n rig.McpTPoseDefined = isdefined\r\n print(\"Pose set to T-pose\")\r\n except MocapError:\r\n bpy.ops.mcp.error('INVOKE_DEFAULT')\r\n return{'FINISHED'}\r\n\r\n#------------------------------------------------------------------\r\n# Set T-Pose\r\n#------------------------------------------------------------------\r\n\r\ndef getStoredTPose(rig):\r\n for pb in rig.pose.bones:\r\n pb.matrix_basis = getStoredBonePose(pb)\r\n\r\n\r\ndef getStoredBonePose(pb):\r\n try:\r\n quat = Quaternion((pb.McpQuatW, pb.McpQuatX, pb.McpQuatY, pb.McpQuatZ))\r\n except KeyError:\r\n quat = Quaternion()\r\n return quat.to_matrix().to_4x4()\r\n\r\n\r\ndef addTPoseAtFrame0(rig, scn):\r\n from .source import getSourceTPoseFile\r\n\r\n scn.frame_current = 0\r\n if rig.McpTPoseDefined:\r\n getStoredTPose(rig)\r\n elif getSourceTPoseFile():\r\n rig.McpTPoseFile = getSourceTPoseFile()\r\n defineTPose(rig)\r\n else:\r\n setRestPose(rig)\r\n defineTPose(rig)\r\n\r\n for pb in rig.pose.bones:\r\n if pb.rotation_mode == 'QUATERNION':\r\n pb.keyframe_insert('rotation_quaternion', group=pb.name)\r\n else:\r\n pb.keyframe_insert('rotation_euler', group=pb.name)\r\n\r\n#------------------------------------------------------------------\r\n# Define current pose as T-Pose\r\n#------------------------------------------------------------------\r\n\r\ndef defineTPose(rig):\r\n for pb in rig.pose.bones:\r\n quat = pb.matrix_basis.to_quaternion()\r\n pb.McpQuatW = quat.w\r\n pb.McpQuatX = quat.x\r\n pb.McpQuatY = quat.y\r\n pb.McpQuatZ = quat.z\r\n rig.McpTPoseDefined = True\r\n\r\n\r\nclass MCP_OT_DefineTPose(bpy.types.Operator, ProblemsString):\r\n bl_idname = \"mcp.define_t_pose\"\r\n bl_label = \"Define T-pose\"\r\n bl_description = \"Define T-pose as current pose\"\r\n bl_options = {'UNDO'}\r\n\r\n def execute(self, context):\r\n if self.problems:\r\n return{'FINISHED'}\r\n try:\r\n rig = initRig(context)\r\n defineTPose(rig)\r\n print(\"T-pose defined as current pose\")\r\n except MocapError:\r\n bpy.ops.mcp.error('INVOKE_DEFAULT')\r\n return{'FINISHED'}\r\n\r\n def invoke(self, context, event):\r\n return checkObjectProblems(self, context)\r\n\r\n def draw(self, context):\r\n drawObjectProblems(self)\r\n\r\n#------------------------------------------------------------------\r\n# Undefine stored T-pose\r\n#------------------------------------------------------------------\r\n\r\ndef setRestPose(rig):\r\n unit = Matrix()\r\n for pb in rig.pose.bones:\r\n pb.matrix_basis = unit\r\n\r\n\r\nclass MCP_OT_UndefineTPose(bpy.types.Operator):\r\n bl_idname = \"mcp.undefine_t_pose\"\r\n bl_label = \"Undefine T-pose\"\r\n bl_description = \"Remove definition of T-pose\"\r\n bl_options = {'UNDO'}\r\n\r\n def execute(self, context):\r\n try:\r\n rig = initRig(context)\r\n rig.McpTPoseDefined = False\r\n print(\"Undefined T-pose\")\r\n except MocapError:\r\n bpy.ops.mcp.error('INVOKE_DEFAULT')\r\n return{'FINISHED'}\r\n\r\n#------------------------------------------------------------------\r\n# Load T-pose from file\r\n#------------------------------------------------------------------\r\n\r\ndef loadPose(rig, filename):\r\n if filename:\r\n filepath = os.path.join(os.path.dirname(__file__), filename)\r\n filepath = os.path.normpath(filepath)\r\n print(\"Loading %s\" % filepath)\r\n struct = loadJson(filepath)\r\n rig.McpTPoseFile = filename\r\n else:\r\n return False\r\n\r\n setRestPose(rig)\r\n\r\n for name,value in struct:\r\n bname = getBoneName(rig, name)\r\n try:\r\n pb = rig.pose.bones[bname]\r\n except KeyError:\r\n continue\r\n quat = Quaternion(value)\r\n pb.matrix_basis = quat.to_matrix().to_4x4()\r\n\r\n return True\r\n\r\n\r\ndef getBoneName(rig, name):\r\n if rig.McpIsSourceRig:\r\n return name\r\n else:\r\n pb = getTrgBone(name, rig)\r\n if pb:\r\n return pb.name\r\n else:\r\n return \"\"\r\n\r\n\r\nclass MCP_OT_LoadPose(bpy.types.Operator, LoadJson):\r\n bl_idname = \"mcp.load_pose\"\r\n bl_label = \"Load Pose\"\r\n bl_description = \"Load pose from file\"\r\n bl_options = {'UNDO'}\r\n\r\n def execute(self, context):\r\n rig = initRig(context)\r\n filename = os.path.relpath(self.filepath, os.path.dirname(__file__))\r\n try:\r\n loadPose(rig, filename)\r\n except MocapError:\r\n bpy.ops.mcp.error('INVOKE_DEFAULT')\r\n print(\"Loaded pose\")\r\n return{'FINISHED'}\r\n\r\n def invoke(self, context, event):\r\n context.window_manager.fileselect_add(self)\r\n return {'RUNNING_MODAL'}\r\n\r\n#------------------------------------------------------------------\r\n# Save current pose to file\r\n#------------------------------------------------------------------\r\n\r\ndef savePose(context, filepath):\r\n rig = context.object\r\n struct = []\r\n for pb in rig.pose.bones:\r\n bmat = pb.matrix\r\n rmat = pb.bone.matrix_local\r\n if pb.parent:\r\n bmat = Mult2(pb.parent.matrix.inverted(), bmat)\r\n rmat = Mult2(pb.parent.bone.matrix_local.inverted(), rmat)\r\n mat = Mult2(rmat.inverted(), bmat)\r\n q = mat.to_quaternion()\r\n magn = math.sqrt( (q.w-1)*(q.w-1) + q.x*q.x + q.y*q.y + q.z*q.z )\r\n if magn > 1e-4:\r\n if pb.McpBone:\r\n struct.append((pb.McpBone, tuple(q)))\r\n\r\n if os.path.splitext(filepath)[1] != \".json\":\r\n filepath = filepath + \".json\"\r\n filepath = os.path.join(os.path.dirname(__file__), filepath)\r\n print(\"Saving %s\" % filepath)\r\n saveJson(struct, filepath)\r\n\r\n\r\nclass MCP_OT_SavePose(bpy.types.Operator, LoadJson):\r\n bl_idname = \"mcp.save_pose\"\r\n bl_label = \"Save Pose\"\r\n bl_description = \"Save current pose as .json file\"\r\n bl_options = {'UNDO'}\r\n\r\n def execute(self, context):\r\n try:\r\n savePose(context, self.filepath)\r\n except MocapError:\r\n bpy.ops.mcp.error('INVOKE_DEFAULT')\r\n print(\"Saved current pose\")\r\n return{'FINISHED'}\r\n\r\n def invoke(self, context, event):\r\n context.window_manager.fileselect_add(self)\r\n return {'RUNNING_MODAL'}\r\n\r\n#------------------------------------------------------------------\r\n# Utils\r\n#------------------------------------------------------------------\r\n\r\ndef initRig(context):\r\n from . import target\r\n from . import source\r\n from .fkik import setRigifyFKIK, setRigify2FKIK\r\n\r\n rig = context.object\r\n pose = [(pb, pb.matrix_basis.copy()) for pb in rig.pose.bones]\r\n\r\n if rig.McpIsSourceRig:\r\n source.findSrcArmature(context, rig)\r\n else:\r\n target.getTargetArmature(rig, context)\r\n\r\n for pb,mat in pose:\r\n pb.matrix_basis = mat\r\n\r\n if isRigify(rig):\r\n setRigifyFKIK(rig, 0.0)\r\n elif isRigify2(rig):\r\n setRigify2FKIK(rig, 1.0)\r\n\r\n return rig\r\n\r\n#----------------------------------------------------------\r\n# Initialize\r\n#----------------------------------------------------------\r\n\r\nclasses = [\r\n MCP_OT_RestCurrentPose,\r\n MCP_OT_SetTPose,\r\n MCP_OT_DefineTPose,\r\n MCP_OT_UndefineTPose,\r\n MCP_OT_LoadPose,\r\n MCP_OT_SavePose,\r\n]\r\n\r\ndef initialize():\r\n for cls in classes:\r\n bpy.utils.register_class(cls)\r\n\r\n\r\ndef uninitialize():\r\n for cls in classes:\r\n bpy.utils.unregister_class(cls)\r\n", "sub_path": "t_pose.py", "file_name": "t_pose.py", "file_ext": "py", "file_size_in_byte": 14884, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "bpy.app", "line_number": 39, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.modifier_apply", "line_number": 71, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 71, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.mode_set", "line_number": 78, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 78, "usage_type": "attribute"}, {"api_name": "bpy.ops.pose.armature_apply", "line_number": 79, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 79, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.modifier_move_up", "line_number": 86, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 86, "usage_type": "attribute"}, {"api_name": "bpy.types", "line_number": 100, "usage_type": "attribute"}, {"api_name": "bpy.ops.mcp.error", "line_number": 112, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 112, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.mode_set", "line_number": 167, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 167, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.mode_set", "line_number": 168, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 168, "usage_type": "attribute"}, {"api_name": "bpy.types", "line_number": 191, "usage_type": "attribute"}, {"api_name": "bpy.ops.mcp.error", "line_number": 205, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 205, "usage_type": "attribute"}, {"api_name": "mathutils.Quaternion", "line_number": 219, "usage_type": "call"}, {"api_name": "mathutils.Quaternion", "line_number": 221, "usage_type": "call"}, {"api_name": "source.getSourceTPoseFile", "line_number": 231, "usage_type": "call"}, {"api_name": "source.getSourceTPoseFile", "line_number": 232, "usage_type": "call"}, {"api_name": "bpy.types", "line_number": 258, "usage_type": "attribute"}, {"api_name": "buttons28.ProblemsString", "line_number": 258, "usage_type": "name"}, {"api_name": "bpy.ops.mcp.error", "line_number": 272, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 272, "usage_type": "attribute"}, {"api_name": "mathutils.Matrix", "line_number": 286, "usage_type": "call"}, {"api_name": "bpy.types", "line_number": 291, "usage_type": "attribute"}, {"api_name": "bpy.ops.mcp.error", "line_number": 303, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 303, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 312, "usage_type": "call"}, {"api_name": "os.path", "line_number": 312, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 312, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 313, "usage_type": "call"}, {"api_name": "os.path", "line_number": 313, "usage_type": "attribute"}, {"api_name": "mathutils.Quaternion", "line_number": 328, "usage_type": "call"}, {"api_name": "bpy.types", "line_number": 345, "usage_type": "attribute"}, {"api_name": "buttons28.LoadJson", "line_number": 345, "usage_type": "name"}, {"api_name": "os.path.relpath", "line_number": 353, "usage_type": "call"}, {"api_name": "os.path", "line_number": 353, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 353, "usage_type": "call"}, {"api_name": "bpy.ops.mcp.error", "line_number": 357, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 357, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 380, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 385, "usage_type": "call"}, {"api_name": "os.path", "line_number": 385, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 387, "usage_type": "call"}, {"api_name": "os.path", "line_number": 387, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 387, "usage_type": "call"}, {"api_name": "bpy.types", "line_number": 392, "usage_type": "attribute"}, {"api_name": "buttons28.LoadJson", "line_number": 392, "usage_type": "name"}, {"api_name": "bpy.ops.mcp.error", "line_number": 402, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 402, "usage_type": "attribute"}, {"api_name": "source.findSrcArmature", "line_number": 423, "usage_type": "call"}, {"api_name": "fkik.setRigifyFKIK", "line_number": 431, "usage_type": "call"}, {"api_name": "fkik.setRigify2FKIK", "line_number": 433, "usage_type": "call"}, {"api_name": "bpy.utils.register_class", "line_number": 452, "usage_type": "call"}, {"api_name": "bpy.utils", "line_number": 452, "usage_type": "attribute"}, {"api_name": "bpy.utils.unregister_class", "line_number": 457, "usage_type": "call"}, {"api_name": "bpy.utils", "line_number": 457, "usage_type": "attribute"}]} +{"seq_id": "166165531", "text": "import numpy\nimport PIL\nimport math\nimport time\nimport picamera\nimport numpy as np\nimport cv2\nimport matplotlib as plt\nimport io\n\nstream=io.BytesIO()\nwith picamera.PiCamera() as camera:\n\tcamera.resolution=(320,240)\n\tcamera.framerate=24\n\ttime.sleep(1)\n\toutput=np.empty((240,320,3),dtype=np.uint8)\n\tcamera.capture(stream,format='jpeg')\ndata=np.fromstring(stream.getvalue(),dtype=np.uint8)\nimage=cv2.imdecode(data,1)\nimage=image[:,:,::-1]\nwindow_name = 'Edge AI- Tiny Yolo'\ncv2.imshow(window_name, image)\nraw_key = cv2.waitKey(1000)\nimage2=np.empty((240,320,3),dtype=np.uint8)\nacc_x=0\nacc_y=0\nacc_count=0\nfor x in range (0,240):\n\tfor y in range (0,320):\n\t\tb,g,r=image[x,y]\n\t\tif r>g and r>b:\n\t\t\tacc_x+=x\n\t\t\tacc_y+=y\n\t\t\tacc_count+=1\n\t\t\timage[x,y]=0,0,0\n\n\n#cv2.namedWIndow('imageWindow',cv2.WINDOW_AUTOSIZE)\n#cv2.imshow('imageWindow',stream)\n#cv2.waiKey(0)\n#cv2.destoyAllWindows()\nif acc_count>0:\n\tmean_x=(acc_x/acc_count)\n\tmean_y=(acc_y/acc_count)\n\tmean_x=numpy.floor(mean_x)\n\tmean_y=numpy.floor(mean_y)\n\timage[int(mean_x)+0,int(mean_y)-1]=0,0,255\n\timage[int(mean_x)-1,int(mean_y)+0]=0,0,255\n\timage[int(mean_x)+0,int(mean_y)+0]=0,0,255\n\timage[int(mean_x)+1,int(mean_y)+0]=255,0,0\n\timage[int(mean_x)+0,int(mean_y)+1]=255,0,0\nwindow_name2 = 'Edge AI- Tiny Yolo2'\ncv2.imshow(window_name2, image)\nraw_key = cv2.waitKey(20000)\n\n\n\n", "sub_path": "Lab5/lab5.4.py", "file_name": "lab5.4.py", "file_ext": "py", "file_size_in_byte": 1320, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "io.BytesIO", "line_number": 11, "usage_type": "call"}, {"api_name": "picamera.PiCamera", "line_number": 12, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.fromstring", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 18, "usage_type": "attribute"}, {"api_name": "cv2.imdecode", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.floor", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "16393693", "text": "import datetime\r\nfrom mongokit import Document\r\nfrom rex import app, db\r\nimport validators\r\nfrom bson.objectid import ObjectId\r\n__author__ = 'taijoe'\r\n\r\n\r\nclass User(Document):\r\n __collection__ = 'users'\r\n\r\n structure = {\r\n 'customer_id' : unicode,\r\n 'email': unicode,\r\n 'code_active': unicode,\r\n 'username': unicode,\r\n 'password': unicode,\r\n 'creation': datetime.datetime,\r\n 'p_binary' : unicode,\r\n 'left' : unicode,\r\n 'right' : unicode,\r\n 'telephone' : float,\r\n 'p_node' : unicode,\r\n 'password_transaction' : unicode,\r\n 'btc_address' : unicode,\r\n 'eth_address' : unicode,\r\n 'ltc_address' : unicode,\r\n 'bch_address' : unicode,\r\n 'usdt_address' : unicode,\r\n 'level' : int,\r\n 'password_custom' : unicode,\r\n 'total_pd_left' : float,\r\n 'total_pd_right' : float,\r\n 'total_amount_left' : float,\r\n 'total_amount_right': float,\r\n 'm_wallet' : float,\r\n 'r_wallet' : float,\r\n 's_wallet' : float,\r\n 'd_wallet' : float,\r\n 'g_wallet' : float,\r\n 'coin_wallet' : float,\r\n 'balance_wallet' : float,\r\n 'status_authen' : int,\r\n 'authentication' : unicode,\r\n 'max_out' : float,\r\n 'total_max_out' : float,\r\n 'total_earn' : float,\r\n 'position' : unicode,\r\n 'country' : unicode,\r\n 'total_invest' : float,\r\n 'status':int,\r\n 'type': int,\r\n 'active_email': int,\r\n 'secret_2fa': unicode,\r\n 'status_2fa': int,\r\n 'status_withdraw' : int,\r\n 'max_daily': float,\r\n 'investment': float,\r\n 'total_node' : float,\r\n 'max_out_day' : float,\r\n 'max_out_package' : float,\r\n 'status_verify': int,\r\n 'amount_transfer' : float,\r\n 'personal_info': {\r\n 'firstname' : unicode,\r\n 'lastname' : unicode,\r\n 'date_birthday' :unicode,\r\n 'address' :unicode,\r\n 'postalcode' : unicode,\r\n 'city' : unicode,\r\n 'country' : unicode,\r\n 'img_passport_fontside' : unicode,\r\n 'img_passport_backside' : unicode,\r\n 'img_address' : unicode\r\n },\r\n 'wallet': {\r\n 'bitcoin' : {\r\n 'address' : unicode,\r\n 'balance' : float\r\n },\r\n 'ethereum' : {\r\n 'address' : unicode,\r\n 'balance' : float\r\n }\r\n }\r\n }\r\n validators = {\r\n 'email': validators.max_length(120)\r\n }\r\n default_values = {\r\n 'creation': datetime.datetime.utcnow(),\r\n 'm_wallet' : 0,\r\n 'r_wallet' : 0,\r\n 's_wallet' : 0,\r\n 'max_out' : 0,\r\n 'total_earn' : 0,\r\n 'total_pd_left' : 0,\r\n 'total_pd_right' : 0,\r\n 'total_amount_left' : 0,\r\n 'total_amount_right' : 0,\r\n 'level' : 0,\r\n 'status_authen' : 0,\r\n 'authentication' : '',\r\n 'left' : '',\r\n 'right' : '',\r\n 'p_binary' : '',\r\n 'type': 0,\r\n 'balance_wallet' : 0,\r\n 'active_email' : 0,\r\n 'code_active' : '',\r\n 'investment' : 0,\r\n 'coin_wallet' : 0,\r\n 'total_node' : 0,\r\n 'max_out_day' : 0,\r\n 'max_out_package' : 0,\r\n 'status_verify' : 0\r\n\r\n }\r\n use_dot_notation = True\r\n\r\n def __repr__(self):\r\n return '' % self.name\r\n\r\n # Flask-Login integration\r\n def is_authenticated(self):\r\n return True\r\n\r\n def is_active(self):\r\n return True\r\n\r\n def is_anonymous(self):\r\n return False\r\n\r\n def get_id(self):\r\n return self._id\r\n\r\n def get_role(self):\r\n return self.role\r\n\r\n def get_user_home(self):\r\n role = db['roles'].find_one({'_id': self.get_role()})\r\n return role['home_page']\r\n\r\n\r\ndb.register([User])", "sub_path": "rex/models/user_model.py", "file_name": "user_model.py", "file_ext": "py", "file_size_in_byte": 3929, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "mongokit.Document", "line_number": 9, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "attribute"}, {"api_name": "validators.max_length", "line_number": 88, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 91, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 91, "usage_type": "attribute"}, {"api_name": "rex.db", "line_number": 141, "usage_type": "name"}, {"api_name": "rex.db.register", "line_number": 145, "usage_type": "call"}, {"api_name": "rex.db", "line_number": 145, "usage_type": "name"}]} +{"seq_id": "508682009", "text": "import os\r\nimport sqlite3\r\nimport logging\r\nfrom datetime import datetime\r\nfrom settings import SETTINGS as settings\r\nfrom settings import TEXT\r\n\r\n# open connection to sqlite database\r\ndb = sqlite3.connect('quotes.sqlite')\r\ncursor = db.cursor()\r\n\r\nlogging.basicConfig(level=logging.INFO)\r\nlogger = logging.getLogger(__name__)\r\n\r\ndef addQuote(text):\r\n text = text[5:]\r\n date = datetime.now().strftime('%d.%m.%Y')\r\n\r\n command = f\"INSERT INTO quotes(text, date) VALUES('{text}', '{date}')\"\r\n logger.info(command)\r\n cursor.execute(command)\r\n\r\n db.commit()\r\n\r\n cursor.execute(f\"SELECT id FROM quotes WHERE text = '{text}' and date = '{date}'\")\r\n id = cursor.fetchone()[0]\r\n\r\n return \"Done! ID = \" + str(id)\r\n\r\n\r\ndef getQuote(quote):\r\n if settings['language'] == 'english':\r\n try:\r\n result = f'Quote #{quote[0]}: |{quote[1]}| from the {quote[2]}'\r\n except TypeError:\r\n result = 'This quote does not exist'\r\n else:\r\n try:\r\n result = f'Zitat #{quote[0]}: |{quote[1]}| vom {quote[2]}'\r\n except TypeError:\r\n result = 'Zitat existiert nicht'\r\n return result\r\n\r\n\r\ndef randomQuote():\r\n cursor.execute('SELECT * FROM quotes ORDER BY RANDOM() LIMIT 1')\r\n quote = cursor.fetchone()\r\n result = getQuote(quote)\r\n return result\r\n\r\n\r\ndef numQuote(text):\r\n try:\r\n id = text[text.index(' '):].strip()\r\n command = f\"SELECT * FROM quotes WHERE id = '{id}'\"\r\n logger.info(command)\r\n cursor.execute(command)\r\n result = getQuote(cursor.fetchone())\r\n return result\r\n except ValueError:\r\n return randomQuote()\r\n\r\n\r\ndef infoQuote():\r\n if settings['language'] == 'english':\r\n result = TEXT['eng']\r\n else:\r\n result = TEXT['ger']\r\n return result\r\n\r\n\r\ndef allQuotes():\r\n command = \"SELECT * FROM quotes\"\r\n cursor.execute(command)\r\n with open(\"C:\\\\Users\\\\ad\\\\ownCloud\\\\Code\\\\quote\\\\Legendarymarvin Zitate.html\", \"r\") as f:\r\n text = f.readlines()\r\n\r\n table = []\r\n for row in cursor.fetchall():\r\n newrow = f\"Zitat {row[0]} vom {row[2]}: {row[1]}\\n\"\r\n table.append(newrow)\r\n\r\n with open(\"zitate.txt\", 'w+') as f:\r\n f.writelines(table)\r\n\r\n\r\n\r\ndef close():\r\n db.close\r\n return None\r\n\r\ndef printfile(file):\r\n \"\"\"\r\n Reads out goal.txt and gives out the content\r\n :return: String\r\n \"\"\"\r\n goal = os.path.join(os.path.dirname(os.path.realpath(__file__)), file)\r\n with open(goal, 'r') as f:\r\n text = f.readlines()\r\n\r\n return ' '.join(text)\r\n", "sub_path": "quote.py", "file_name": "quote.py", "file_ext": "py", "file_size_in_byte": 2572, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "sqlite3.connect", "line_number": 9, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 12, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 17, "usage_type": "name"}, {"api_name": "settings.SETTINGS", "line_number": 32, "usage_type": "name"}, {"api_name": "settings.SETTINGS", "line_number": 65, "usage_type": "name"}, {"api_name": "settings.TEXT", "line_number": 66, "usage_type": "name"}, {"api_name": "settings.TEXT", "line_number": 68, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path", "line_number": 97, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "26552384", "text": "import requests\nimport datetime\n\n\nAPI_RESOURCE = 'https://api.github.com/search/repositories'\n\n\ndef get_trending_repositories(results_amount, days_amount):\n period = str(datetime.date.today() - datetime.timedelta(days=days_amount))\n request_params = {'q': 'created:>{}'.format(period),\n 'sort': 'stars',\n 'order': 'desc',\n 'per_page': str(results_amount)}\n all_repos = requests.get(API_RESOURCE, request_params)\n return all_repos.json()['items']\n\n\nif __name__ == '__main__':\n results_amount = 20\n days_amount = 7\n all_repos = get_trending_repositories(results_amount, days_amount)\n for repo in all_repos:\n print('Repository %s had %d issues, issues page - %s' %\n (repo['html_url'], repo['open_issues'], repo['html_url'] + '/issues'))\n", "sub_path": "github_trending.py", "file_name": "github_trending.py", "file_ext": "py", "file_size_in_byte": 841, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "datetime.date.today", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 9, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 9, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "139801968", "text": "from collections import deque\n\n#4x5 matrix\n#thus, there will be 4//2 = 2 concentric levels. One outer, one inner\nnumrows = 4\nnumcols = 5\n\ndef less(a,b):\n if(a < b):\n return a\n else:\n return b\n\n#constraint : lower value needs to be even\nlevels = less(numrows,numcols)//2 # levels = 2\nbuffer = {level:deque() for level in range(levels)}\n\n#creating sq array with random elements\n#arr = [[random.choice(x) for cols in range(numcols)] for rows in range(numrows)]\n\narr =[ \n [1, 2, 3, 4, 5 ],\n [14, 15, 16, 17, 6 ],\n [13, 20, 19, 18, 7 ],\n [12, 11, 10, 9, 8 ] \n ]\n\n\npos = {0:0 , 1:0} #level:value\nflag = ('populate','retrieve') #to,from deque\n\ndef printarr(arr):\n for eachrow in arr:\n for eachcol in eachrow:\n print('{:5d}'.format(eachcol),end='')\n print('')\n\ndef left(arr,flag):\n #row fixed. level signifies row.\n for level in range(levels):\n for cols in range(level,numcols - level - 1): # (0,1,2,3),(1,2)\n if(flag == 'populate'):\n buffer[level].append(arr[level][cols])\n else: #retrieve\n arr[level][cols] = buffer[level][pos[level]]\n pos[level] += 1 #this behaves like a static\n\n\ndef down(arr,flag):\n #col fixed, level signifies col.\n for level in range(levels):\n for rows in range(level,numrows - level - 1): # (0,1,2),(1)\n if(flag == 'populate'):\n buffer[level].append(arr[rows][numcols - level - 1])\n else: #retrieve\n arr[rows][numcols - level - 1] = buffer[level][pos[level]]\n pos[level] += 1 \n\n\ndef right(arr,flag):\n #row fixed. level signifies row. col descending.\n for level in range(levels): #0,1\n for cols in reversed(range(level + 1,numcols - level)): # (4,3,2,1),(3,2)\n if(flag == 'populate'):\n buffer[level].append(arr[numrows - level - 1][cols]) \n else: #retrieve\n arr[numrows - level - 1][cols] = buffer[level][pos[level]]\n pos[level] += 1 \n\ndef up(arr,flag):\n #col fixed, level signifies col. row descending.\n for level in range(levels):\n for rows in reversed(range(level + 1,numrows - level)): # (3,2,1),(2)\n if(flag == 'populate'):\n buffer[level].append(arr[rows][level])\n else: #retrieve\n arr[rows][level] = buffer[level][pos[level]]\n pos[level] += 1\n\n\ndef buildlevels(arr,f):\n left(arr,f)\n down(arr,f)\n right(arr,f)\n up(arr,f)\n\n\n\nbuildlevels(arr,flag[0]) \nprint(buffer[0])\nprint(buffer[1])\nbuffer[0].rotate(1)\nbuffer[1].rotate(1)\n\n\nbuildlevels(arr,flag[1])\nprintarr(arr)\n", "sub_path": "hackerrank/matrixRotationStepwise.py", "file_name": "matrixRotationStepwise.py", "file_ext": "py", "file_size_in_byte": 2746, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "collections.deque", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "175858151", "text": "from datetime import datetime\n\nfrom core.exceptions.NoChatGivenException import NoChatGivenException\nfrom core.exceptions.FileDoesNotExistsException import FileDoesNotExistsException\n\nclass Kernel:\n\n\tdef __init__(self, argument_provider):\n\t\t\"\"\"Initializes the kernel with the given argument provider\"\"\"\n\t\tself.argument_provider = argument_provider\n\t\tself.handle_status = self.generate_status()\n\n\tdef handle(self):\n\t\t\"\"\"Handles the process and returns a status dictionary\"\"\"\n\n\t\tparams = self.argument_provider.get_params()\n\t\tif len(params) == 0:\n\t\t\treturn self.status(success=False, error=NoChatGivenException())\n\n\t\ttry:\n\t\t\tfile = open(params[0], 'r')\n\t\texcept FileNotFoundError:\n\t\t\treturn self.status(success=False, error=FileDoesNotExistsException(params[0]))\n\n\t\treturn self.status()\n\n\tdef generate_status(self):\n\t\treturn {\n\t\t\t'success': True,\n\t\t\t'error': None,\n\t\t\t'meta': {\n\t\t\t\t'timestamp_start': datetime.utcnow().timestamp(),\n\t\t\t\t'timestamp_end': None,\n\t\t\t\t'timestamp_delta': None,\n\t\t\t}\n\t\t}\n\n\tdef status(self, success=True, error=None):\n\t\tself.handle_status['success'] = success\n\t\tself.handle_status['error'] = error\n\n\t\tself.handle_status['meta']['timestamp_end'] = datetime.utcnow().timestamp()\n\t\tself.handle_status['meta']['timestamp_delta'] = self.handle_status['meta']['timestamp_end'] - self.handle_status['meta']['timestamp_start']\n\t\treturn self.handle_status\n", "sub_path": "core/Kernel.py", "file_name": "Kernel.py", "file_ext": "py", "file_size_in_byte": 1370, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "core.exceptions.NoChatGivenException.NoChatGivenException", "line_number": 18, "usage_type": "call"}, {"api_name": "core.exceptions.FileDoesNotExistsException.FileDoesNotExistsException", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 42, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "17781278", "text": "from bs4 import BeautifulSoup\nimport urllib2\nimport re\nwiki = \"https://en.wikipedia.org/wiki/List_of_current_United_States_Senators\"\nheader = {'User-Agent': 'Mozilla/5.0'} #Needed to prevent 403 error on Wikipedia\nreq = urllib2.Request(wiki,headers=header)\npage = urllib2.urlopen(req)\nsoup = BeautifulSoup(page)\ntable = soup.find(\"table\", { \"class\" : \"sortable wikitable\" })\n\n\ndef remove_html_markup(s):\n tag = False\n quote = False\n out = \"\"\n\n for c in s:\n if c == '<' and not quote:\n tag = True\n elif c == '>' and not quote:\n tag = False\n elif (c == '\"' or c == \"'\") and tag:\n quote = not quote\n elif not tag:\n out = out + c\n\n return out\n\n\n\n\ndataSet = []\nfor row in table.findAll('tr'):\n\tcells = row.findAll('td')\n\tif len(cells)==12:\n\t\tdataSet.append(cells)\n#sorts dataset by last name of Senator\ndataSet = sorted(dataSet, key=lambda d:str(d[4]))\n#all ordered by senator last name\ndateAssumedOffice = [] #index = 9\ndateOfBirth = [] #index = 10\ndateTermExpires = [] #index = 11\nfor row in dataSet:\n\t#print row\n\tfor index, col in enumerate(row):\n\n\t\ttext = str(col)\n\t\ttext =remove_html_markup(text)\n\t\tif index == 9:\n\t\t\tdateAssumedOffice.append(text)\n\t\telif index == 10:\n\t\t\tdateOfBirth.append(text)\n\t\telif index == 11:\n\t\t\tdateTermExpires.append(text)\n\n\n", "sub_path": "Data Collection/WikipediaSenatorTableScraper.py", "file_name": "WikipediaSenatorTableScraper.py", "file_ext": "py", "file_size_in_byte": 1369, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "urllib2.Request", "line_number": 6, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 7, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "463455313", "text": "from django.http import HttpResponse\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom apps.index.models import Shop, ShopImage, ShopProperty\nfrom group import settings\n\n\ndef detail(request):\n sid = request.GET.get('sid')\n settings.SID = sid\n if sid:\n try:\n shops = Shop.objects.filter(shop_id=sid).values(\n 'shop_id',\n 'name',\n 'promote_price',\n 'original_price',\n 'stock',\n 'quantity',\n )\n if shops.exists():\n shop = shops.first()\n imgs = ShopImage.objects.filter(shop_id=shop.get('shop_id')).values('img_url')\n shop.update(imgs=imgs)\n values = ShopProperty.objects.filter(shop_id=shop.get('shop_id'))\n x = Shop.objects.filter(shop_id=sid).first()\n cate_name = x.sub_cate_id.name\n return render(request, 'shop_detail.html', {'shop': shop, 'cate_name': cate_name, 'values': values})\n else:\n return HttpResponse('错误')\n except Exception as e:\n print(e)\n else:\n return HttpResponse('404')\n", "sub_path": "apps/detail/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1207, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "group.settings.SID", "line_number": 11, "usage_type": "attribute"}, {"api_name": "group.settings", "line_number": 11, "usage_type": "name"}, {"api_name": "apps.index.models.Shop.objects.filter", "line_number": 14, "usage_type": "call"}, {"api_name": "apps.index.models.Shop.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "apps.index.models.Shop", "line_number": 14, "usage_type": "name"}, {"api_name": "apps.index.models.ShopImage.objects.filter", "line_number": 24, "usage_type": "call"}, {"api_name": "apps.index.models.ShopImage.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "apps.index.models.ShopImage", "line_number": 24, "usage_type": "name"}, {"api_name": "apps.index.models.ShopProperty.objects.filter", "line_number": 26, "usage_type": "call"}, {"api_name": "apps.index.models.ShopProperty.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "apps.index.models.ShopProperty", "line_number": 26, "usage_type": "name"}, {"api_name": "apps.index.models.Shop.objects.filter", "line_number": 27, "usage_type": "call"}, {"api_name": "apps.index.models.Shop.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "apps.index.models.Shop", "line_number": 27, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 29, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 31, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "146989000", "text": "# -*- coding: utf-8 -*-\nimport cv2\n\ndef main():\n # 入力画像とマスク画像の取得\n im = cv2.imread(\"test.png\",1)\n mask = cv2.imread(\"mask.png\",0)\n # マスク処理\n im2 = cv2.bitwise_and(im,im, mask=mask)\n # 結果表示\n cv2.imshow(\"Mask\",im2)\n cv2.waitKey(0) # キー入力待機\n cv2.destroyAllWindows() # ウィンドウ破棄\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "python/opencv/opencv-old/mask.py", "file_name": "mask.py", "file_ext": "py", "file_size_in_byte": 441, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "cv2.imread", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "600811843", "text": "import graphviz as gv\n\nclass NodoCola:\n\tdef __init__(self,numero=0, sig=None):\n\t\tself.numero=numero\n\t\tself.sig=sig\n\n\tdef __str__(self):\n\t\treturn (self.numero)\n\nclass Cola:\n\tdef __init__(self):\n\t\tself.inicio=None\n\t\tself.fin=None\n\n\tdef agregar(self, elemento):\n\t\tif self.inicio==None:\n\t\t\tself.inicio=elemento\n\n\t\tif self.fin != None:\n\t\t\tself.fin.sig=elemento\t\n\n\t\tself.fin=elemento\n\n\tdef listar(self):\n\t\taux = self.inicio\n\t\twhile aux != None:\n\t\t\tprint(aux)\n\t\t\taux=aux.sig\n\n\t\n\tdef borrar(self):\n\t\tif self.inicio!=None:\n\t\t\taux=self.inicio.numero\n\t\t\tself.inicio =self.inicio.sig\n\t\t\treturn aux\n\n\tdef GraphCola(self):\n\t\t\tg2 = gv.Digraph(format='png')\n\t\t\taux = self.inicio\n\t\t\ta=0\n\t\t\tb=0\n\t\t\twhile aux != None:\n\t\t\t\tnodoa=\"\"\n\t\t\t\tnodob=\"\"\n\t\t\t\tif(aux.sig!=None):\n\t\t\t\t\tnodoa=aux.numero\n\t\t\t\t\tnodob=aux.sig.numero\n\t\t\t\t\tb=b+1\n\t\t\t\t\tg2.edge(\"No_\"+str(a)+\"=\"+nodoa, \"No_\"+str(b)+\"=\"+nodob)\n\t\t\t\t\ta=a+1\n\t\t\t\telse:\n\t\t\t\t\tnodoa=aux.numero\n\t\t\t\t\tnodob=\"\"\n\t\t\t\t\tg2.node(\"No_\"+str(a)+\"=\"+nodoa)\n\t\t\t\t\t\n\t\t\t\taux=aux.sig\n\t\t\t\t\n\t\t\tg2.render('img/cola')\t\t\n\t\t\nif __name__ == \"__main__\":\n\tcl = Cola()\n\t\n\twhile(True):\n\t\tprint(\"----Menu----\\n\"+\n\t\t\t\"1.Agregar\\n\"+\n\t\t\t\"2.Listar\\n\"+\n\t\t\t\"3.Borrar\\n\"+\n\t\t\t\"5.Salir\")\n\t\t\t\n\t\tnum = input(\"ingrese la opcion: \")\n\t\t\t\n\t\tif num == \"1\":\n\t\t numero = input(\"ingrese el string: \")\n\t\t nod=NodoCola(numero)\n\t\t cl.agregar(nod)\n\t\telif num == \"2\":\n\t\t\tcl.listar()\n\t\t\tcl.GraphCola()\n\t\telif num == \"3\":\n\t\t\tprint(cl.borrar())\n\t\t\tcl.GraphCola()\t\t\n\t\telif num == \"5\":\n\t\t\texit()\t\t\t\n", "sub_path": "Practica2EDDPy/cola.py", "file_name": "cola.py", "file_ext": "py", "file_size_in_byte": 1467, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "graphviz.Digraph", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "51856165", "text": "'''\r\nCreated on 2016. 8. 13.\r\n\r\n@author: Seonghyeon\r\n'''\r\n\r\nimport datetime, calendar\r\n# lots of external library is strength of python\r\n\r\na = datetime.date(1901,1,1)\r\nb = datetime.date(2000,12,31)\r\nans = 0\r\nwhile a < b:\r\n ans = ans + 1 if a.weekday() is 6 else ans\r\n a = a + datetime.timedelta(days=calendar.monthrange(a.year,a.month)[1])\r\nprint(ans)", "sub_path": "src/problem19.py", "file_name": "problem19.py", "file_ext": "py", "file_size_in_byte": 357, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "datetime.date", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 15, "usage_type": "call"}, {"api_name": "calendar.monthrange", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "539408165", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 22 20:59:27 2019\n\n@author: YSu\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn import linear_model\nfrom statsmodels.tsa.arima_model import ARMA\nfrom datetime import datetime\nfrom datetime import timedelta\n\ndef solar_sim(sim_years,cap):\n sim_years=sim_years+3\n df_CAISO = pd.read_excel('Synthetic_wind_power/renewables_2011_2017.xlsx',sheet_name='CAISO',header=0)\n df_cap = pd.read_excel('Synthetic_wind_power/cap_by_month.xlsx',sheet_name = 'solar',header=0)\n \n years = range(2011,2018)\n \n ## first standardize solar by installed capacity, yielding hourly capacity factors\n hours = len(df_CAISO)\n num_years = int(len(years))\n st_solar = np.zeros((hours,1))\n \n \n for i in years:\n \n year_index = years.index(i)\n \n for j in range(0,31):\n for k in range(0,24):\n \n st_solar[year_index*8760 +j*24+k] = df_CAISO.loc[year_index*8760 + j*24+k,'solar']/df_cap.loc[(df_cap['Year']==i) & (df_cap['Month']==1),'CAISO']\n st_solar[year_index*8760 +j*24+1416+k] = df_CAISO.loc[year_index*8760 + j*24+1416+k,'solar']/df_cap.loc[(df_cap['Year']==i) & (df_cap['Month']==3),'CAISO']\n st_solar[year_index*8760 +j*24+2880+k] = df_CAISO.loc[year_index*8760 + j*24+2880+k,'solar']/df_cap.loc[(df_cap['Year']==i) & (df_cap['Month']==5),'CAISO']\n st_solar[year_index*8760 +j*24+4344+k] = df_CAISO.loc[year_index*8760 + j*24+4344+k,'solar']/df_cap.loc[(df_cap['Year']==i) & (df_cap['Month']==7),'CAISO']\n st_solar[year_index*8760 +j*24+5088+k] = df_CAISO.loc[year_index*8760 + j*24+5088+k,'solar']/df_cap.loc[(df_cap['Year']==i) & (df_cap['Month']==8),'CAISO']\n st_solar[year_index*8760 +j*24+6552+k] = df_CAISO.loc[year_index*8760 + j*24+6552+k,'solar']/df_cap.loc[(df_cap['Year']==i) & (df_cap['Month']==10),'CAISO']\n st_solar[year_index*8760 +j*24+8016+k] = df_CAISO.loc[year_index*8760 + j*24+8016+k,'solar']/df_cap.loc[(df_cap['Year']==i) & (df_cap['Month']==12),'CAISO']\n \n for j in range(0,30):\n for k in range(0,24):\n \n st_solar[year_index*8760 +j*24+2160+k] = df_CAISO.loc[year_index*8760 + j*24+2160+k,'solar']/df_cap.loc[(df_cap['Year']==i) & (df_cap['Month']==4),'CAISO']\n st_solar[year_index*8760 +j*24+3624+k] = df_CAISO.loc[year_index*8760 + j*24+3624+k,'solar']/df_cap.loc[(df_cap['Year']==i) & (df_cap['Month']==6),'CAISO']\n st_solar[year_index*8760 +j*24+5832+k] = df_CAISO.loc[year_index*8760 + j*24+5832+k,'solar']/df_cap.loc[(df_cap['Year']==i) & (df_cap['Month']==9),'CAISO']\n st_solar[year_index*8760 +j*24+7296+k] = df_CAISO.loc[year_index*8760 + j*24+7296+k,'solar']/df_cap.loc[(df_cap['Year']==i) & (df_cap['Month']==11),'CAISO']\n \n for j in range(0,28):\n for k in range(0,24):\n \n st_solar[year_index*8760 +j*24+744+k] = df_CAISO.loc[year_index*8760 + j*24+744+k,'solar']/df_cap.loc[(df_cap['Year']==i) & (df_cap['Month']==2),'CAISO']\n \n st_solar=st_solar[35040:]\n daily_st_solar=np.reshape(st_solar,(3*365,24))\n daily_st_solar=np.sum(daily_st_solar,axis=1)\n irrediance=pd.read_csv('Synthetic_solar_power/Solar_data_GHI_regress.csv',header=0)\n \n \n \n #reg=linear_model.LinearRegression(fit_intercept=False)\n #xx=X.loc[:,'1':]\n #yy=X['y']\n #reg_s=reg.fit(xx,yy)\n #reg_s.score(xx,yy)\n #sss=reg_s.predict(xx)\n \n Normal_Starting=datetime(1900,1,1)\n \n datelist=pd.date_range(Normal_Starting,periods=365)\n count=0\n m=np.zeros(len(daily_st_solar))\n for i in range(0,len(daily_st_solar)):\n m[i]=int(datelist[count].month)\n count= count +1\n if count >364:\n count=0\n \n X=pd.DataFrame()\n \n \n \n \n X['Month']=m\n X['y']=daily_st_solar\n X['1']=np.sum(np.reshape(irrediance['Site1'].values[35040:],(3*365,24)),axis=1)\n X['2']=np.sum(np.reshape(irrediance['Site2'].values[35040:],(3*365,24)),axis=1)\n X['3']=np.sum(np.reshape(irrediance['Site3'].values[35040:],(3*365,24)),axis=1)\n X['4']=np.sum(np.reshape(irrediance['Site4'].values[35040:],(3*365,24)),axis=1)\n X['5']=np.sum(np.reshape(irrediance['Site5'].values[35040:],(3*365,24)),axis=1)\n X['6']=np.sum(np.reshape(irrediance['Site6'].values[35040:],(3*365,24)),axis=1)\n X['7']=np.sum(np.reshape(irrediance['Site7'].values[35040:],(3*365,24)),axis=1)\n \n \n for i in range(1,13):\n name='reg_' + str(i)\n data=X.loc[X['Month']==i]\n y=data['y']\n x=data.loc[:,'1':]\n # y=np.log(y+1)\n # x=np.log(x+1)\n locals()[name]=linear_model.LinearRegression(fit_intercept=False)\n locals()[name].fit(x,y)\n# print(locals()[name].score(x,y))\n \n \n \n \n Syn_irr=pd.read_csv('Synthetic_weather/synthetic_irradiance_data.csv',header=0,index_col=0)\n Syn_irr = Syn_irr.loc[0:365*sim_years-1,:]\n \n Normal_Starting=datetime(1900,1,1)\n \n datelist=pd.date_range(Normal_Starting,periods=365)\n count=0\n m=np.zeros(len(Syn_irr))\n for i in range(0,len(Syn_irr)):\n m[i]=int(datelist[count].month)\n count= count +1\n if count >364:\n count=0\n d_sim=np.column_stack((Syn_irr.values,m)) \n #\n ##Test the fit\n predicted = np.zeros(len(X))\n \n for i in range(0,len(X)):\n data=X.loc[i,:]\n Month=int(data['Month'])\n x_values=data['1':].values\n x_values = np.reshape(x_values,(1,7))\n reg_name='reg_' + str(Month)\n p=locals()[reg_name].predict(x_values)\n predicted[i]=p\n residules= predicted - X['y'].values\n \n \n# plt.plot(daily_st_solar)\n# plt.plot(predicted,alpha=0.5)\n #\n predicted_sim=np.zeros(len(Syn_irr))\n for i in range(0,len(Syn_irr)):\n data=d_sim[i,:]\n Month=int(data[7])\n x_values=data[:7]\n x_values = np.reshape(x_values,(1,7))\n reg_name='reg_' + str(Month)\n p=locals()[reg_name].predict(x_values)\n predicted_sim[i]=p\n \n \n \n Model=ARMA(residules,order=(7,0))\n arma_fit1 = Model.fit()\n #ARMA_residuals = arma_fit1.resid\n \n \n y_seeds=residules[-7:]\n e=np.random.normal(np.mean(residules),np.std(residules),len(Syn_irr))\n \n\n \n p=arma_fit1.params\n \n res_sim=np.zeros(len(Syn_irr)+7)\n res_sim[0:7]=y_seeds\n for i in range(0,len(Syn_irr)):\n y=p[0]+p[1]*y_seeds[6]+ p[2]*y_seeds[5] + p[3]*y_seeds[4] + p[4]*y_seeds[3] +p[5]*y_seeds[2] + p[6]*y_seeds[1] + p[7]*y_seeds[0]+e[i]\n res_sim[i+7]=y\n y_seeds=res_sim[i:i+7]\n \n \n \n Solar=predicted_sim -e\n \n Solar[Solar t and s < 10):\n \n if j + s > 364:\n up = j + s - 365\n else:\n up = j + s\n \n if j - s < 0:\n down = j - s + 365\n else:\n down = j - s\n \n for k in range(0,3):\n if np.abs(sim_solar[i*365+j] - daily[up,k]) < tol:\n tol = np.abs(sim_solar[i*365+j] - daily[up,k])\n day = up\n year = k\n \n for k in range(0,3):\n if np.abs(sim_solar[i*365+j] - daily[down,k]) < tol:\n tol = np.abs(sim_solar[i*365+j] - daily[down,k])\n day = down\n year = k \n \n s = s + 1\n \n days[j,i] = day\n years[j,i] = year\n \n sim_hourly[i*8760+j*24:i*8760+j*24+24] = st_solar[year*8760+day*24:year*8760+day*24+24]*(sim_solar[i*365+j]/daily[day,year])\n \n #impose maximum constraint\n for i in range(0,len(sim_hourly)):\n if sim_hourly[i] > 1:\n sim_hourly[i] = 1\n \n #multiply by installed capacity\n solar_sim = sim_hourly*cap\n \n h = int(len(solar_sim))\n solar_sim = solar_sim[8760:h-2*8760,:]\n S = pd.DataFrame(solar_sim)\n S.columns = ['CAISO']\n S.to_csv('Synthetic_solar_power/solar_power_sim.csv')\n \n return None", "sub_path": "Stochastic_engine/solar_production_simulation.py", "file_name": "solar_production_simulation.py", "file_ext": "py", "file_size_in_byte": 9148, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pandas.read_excel", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 58, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 59, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 70, "usage_type": "call"}, {"api_name": "pandas.date_range", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 74, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 94, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 104, "usage_type": "call"}, {"api_name": "sklearn.linear_model", "line_number": 104, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 111, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 114, "usage_type": "call"}, {"api_name": "pandas.date_range", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 148, "usage_type": "call"}, {"api_name": "statsmodels.tsa.arima_model.ARMA", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 161, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 225, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 226, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 247, "usage_type": "call"}]} +{"seq_id": "445032978", "text": "from django import forms\nfrom django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.template.loader import render_to_string\n\nfrom .models import Contact\n\n\nclass ContactForm(forms.ModelForm):\n\n class Meta:\n model = Contact\n fields = ['name', 'email', 'message']\n\n def send_mail(self):\n context = {\n 'name': self.cleaned_data['name'],\n 'email': self.cleaned_data['email'],\n 'message': self.cleaned_data['message']\n }\n\n body = render_to_string('email.txt', context=context)\n\n send_mail(\n 'Contato do Django E-Commerce', body, self.cleaned_data['email'],\n [self.cleaned_data['email'], settings.DEFAULT_FROM_EMAIL]\n )\n", "sub_path": "djangoecommerce/core/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 748, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "django.forms.ModelForm", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 9, "usage_type": "name"}, {"api_name": "models.Contact", "line_number": 12, "usage_type": "name"}, {"api_name": "django.template.loader.render_to_string", "line_number": 22, "usage_type": "call"}, {"api_name": "django.core.mail.send_mail", "line_number": 24, "usage_type": "call"}, {"api_name": "django.conf.settings.DEFAULT_FROM_EMAIL", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "249680325", "text": "from unittest import TestCase\nimport unittest.mock\nimport crud\nimport io\n\n\nclass TestGetGrades(TestCase):\n\n @unittest.mock.patch('builtins.input', side_effect=['-1', '4', 'safd', 'q'])\n @unittest.mock.patch('sys.stdout', new_callable=io.StringIO)\n def test_get_grades_invalid_input(self, mock_stdout, mock_input):\n grade_list = crud.get_grades()\n self.assertEqual(grade_list, [-1.0, 4.0])\n self.assertEqual(mock_stdout.getvalue(), 'grade must be a digit or \"q\"\\n')\n\n @unittest.mock.patch('builtins.input', side_effect=['q'])\n def test_get_grades_empty_list(self, mock_input):\n grade_list = crud.get_grades()\n self.assertEqual(grade_list, [])\n\n @unittest.mock.patch('builtins.input', side_effect=['144', '100', 'q'])\n def test_get_grades_empty_list_valid(self, mock_input):\n grade_list = crud.get_grades()\n self.assertEqual(grade_list, [144, 100])\n\n", "sub_path": "A4/test_get_grades.py", "file_name": "test_get_grades.py", "file_ext": "py", "file_size_in_byte": 918, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "unittest.TestCase", "line_number": 7, "usage_type": "name"}, {"api_name": "crud.get_grades", "line_number": 12, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 9, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 9, "usage_type": "attribute"}, {"api_name": "unittest.mock.patch", "line_number": 10, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 10, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 10, "usage_type": "attribute"}, {"api_name": "crud.get_grades", "line_number": 18, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 16, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 16, "usage_type": "attribute"}, {"api_name": "crud.get_grades", "line_number": 23, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 21, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 21, "usage_type": "attribute"}]} +{"seq_id": "307448458", "text": "import logging\n\nimport pytest\nfrom bitshares.amount import Amount\nfrom dexbot.strategies.flexible_orders import Strategy\n\n# Turn on debug for dexbot logger\nlog = logging.getLogger(\"dexbot\")\nlog.setLevel(logging.DEBUG)\n\n\ndef test_validate_orders():\n # Correct input\n test_input = '10-20-30'\n expected = [0.1, 0.2, 0.3]\n assert Strategy.validate_orders(test_input) == expected\n\n # Integer input\n test_input = 10\n expected = [0.1]\n assert Strategy.validate_orders(test_input) == expected\n\n # Percent > 100\n test_input = '30-50-40'\n with pytest.raises(ValueError):\n Strategy.validate_orders(test_input)\n\n # Incorrect values\n test_input = 'a-b-c'\n with pytest.raises(ValueError):\n Strategy.validate_orders(test_input)\n\n # Incorrect delimiter\n test_input = '10+5+1'\n with pytest.raises(ValueError):\n Strategy.validate_orders(test_input)\n\n\ndef test_check_cp_shift_is_too_big(strategy_worker):\n worker = strategy_worker\n worker.price_change_threshold = 1 / 100\n\n assert worker.check_cp_shift_is_too_big(1, 2) is True\n assert worker.check_cp_shift_is_too_big(2, 1) is True\n\n old_cp = 1\n new_cp = old_cp * (1 + worker.price_change_threshold / 2)\n assert worker.check_cp_shift_is_too_big(old_cp, new_cp) is False\n\n\ndef test_calc_ratios(strategy_worker):\n worker = strategy_worker\n\n a, b = worker.calc_ratios(3)\n assert a + b == 1\n\n\ndef test_calc_center_price_external(strategy_worker, monkeypatch):\n def mocked_cp(*args):\n return 1\n\n def mocked_cp_bad(*args):\n return None\n\n worker = strategy_worker\n worker.external_feed = True\n worker.external_price_source = 'binance'\n\n # Normal case\n monkeypatch.setattr(worker, 'get_external_market_center_price', mocked_cp)\n assert worker.calc_center_price() == 1\n\n # Fail\n monkeypatch.setattr(worker, 'get_external_market_center_price', mocked_cp_bad)\n with pytest.raises(TypeError):\n worker.calc_center_price()\n\n\ndef test_calc_center_price_last_trade(strategy_worker, monkeypatch):\n def mocked_cp(*args):\n return 1\n\n def mocked_cp_bad(*args):\n return None\n\n def mocked_last_trade(*args):\n return {'base': 1, 'quote': 1, 'price': 1}\n\n # Normal case\n worker = strategy_worker\n worker['bootstrapped'] = True\n worker.cp_from_last_trade = True\n monkeypatch.setattr(worker, 'get_own_last_trade', mocked_last_trade)\n assert worker.calc_center_price() == 1\n\n # Fallback to market CP\n monkeypatch.setattr(worker, 'get_own_last_trade', mocked_cp_bad)\n monkeypatch.setattr(worker, 'get_market_center_price', mocked_cp)\n assert worker.calc_center_price() == 1\n\n # Fallback didn't work\n monkeypatch.setattr(worker, 'get_market_center_price', mocked_cp_bad)\n with pytest.raises(TypeError):\n worker.calc_center_price()\n\n\ndef test_calc_center_price_market(strategy_worker, monkeypatch):\n def mocked_cp(*args, **kwargs):\n return 1\n\n def mocked_cp_bad(*args, **kwargs):\n return None\n\n worker = strategy_worker\n monkeypatch.setattr(worker, 'get_market_center_price', mocked_cp)\n assert worker.calc_center_price() == 1\n\n monkeypatch.setattr(worker, 'get_market_center_price', mocked_cp_bad)\n with pytest.raises(TypeError):\n worker.calc_center_price()\n\n\ndef test_filter_closest_orders(strategy_worker):\n worker = strategy_worker\n worker.place_market_buy_order(1, 0.9)\n closesest_buy = worker.place_market_buy_order(1, 0.95)\n\n worker.place_market_sell_order(1, 1.1)\n closest_sell = worker.place_market_sell_order(1, 1.05)\n\n closest = worker.filter_closest_orders(worker.own_orders)\n assert closest[0]['id'] == closesest_buy['id']\n assert closest[1]['id'] == closest_sell['id']\n\n\ndef test_filter_closest_orders_missing(strategy_worker):\n worker = strategy_worker\n worker.place_market_buy_order(1, 0.9)\n closesest_buy = worker.place_market_buy_order(1, 0.95)\n\n closest = worker.filter_closest_orders(worker.own_orders)\n assert closest[0]['id'] == closesest_buy['id']\n\n\ndef test_place_orders(strategy_worker, monkeypatch, bitshares):\n def mocked_cp(*args, **kwargs):\n return 1\n\n def mocked_cp_bad(*args, **kwargs):\n return None\n\n def ratio_zero_base(*args):\n return 0, 1\n\n def ratio_zero_quote(*args):\n return 1, 0\n\n def zero_balance_base(asset):\n if asset == worker.market['base']:\n return Amount(0, asset, bitshares_instance=bitshares)\n else:\n return Amount(100, asset, bitshares_instance=bitshares)\n\n def zero_balance_quote(asset):\n if asset == worker.market['quote']:\n return Amount(0, asset, bitshares_instance=bitshares)\n else:\n return Amount(100, asset, bitshares_instance=bitshares)\n\n worker = strategy_worker\n num_orders_expected = len(worker.buy_orders_percentages) + len(worker.sell_orders_percentages)\n\n # Good cp\n monkeypatch.setattr(worker, 'get_market_center_price', mocked_cp)\n worker.place_orders()\n assert len(worker.own_orders) == num_orders_expected\n\n # Bad cp\n monkeypatch.setattr(worker, 'get_market_center_price', mocked_cp_bad)\n worker.place_orders()\n assert len(worker.own_orders) == 0\n\n # Buy stop\n monkeypatch.setattr(worker, 'get_market_center_price', mocked_cp)\n worker.buy_stop_ratio = 1\n worker.place_orders()\n assert len(worker.get_own_buy_orders()) == 0\n worker.buy_stop_ratio = 0.5\n\n # Sell stop\n worker.sell_stop_ratio = 1\n worker.place_orders()\n assert len(worker.get_own_sell_orders()) == 0\n worker.sell_stop_ratio = 0.5\n\n # No buy order if BASE balance == 0\n worker.buy_stop_ratio = 0\n monkeypatch.setattr(worker, 'balance', zero_balance_base)\n worker.place_orders()\n assert len(worker.get_own_buy_orders()) == 0\n worker.buy_stop_ratio = 0.5\n\n # No sell order if QUOTE balance == 0\n worker.sell_stop_ratio = 0\n monkeypatch.setattr(worker, 'balance', zero_balance_quote)\n worker.place_orders()\n assert len(worker.get_own_sell_orders()) == 0\n worker.sell_stop_ratio = 0.5\n\n\ndef test_maintain_strategy(strategy_worker, other_worker, other_orders):\n worker = strategy_worker\n worker2 = other_worker\n\n num_orders_expected = len(worker.buy_orders_percentages) + len(worker.sell_orders_percentages)\n\n # Fresh run no orders\n worker.maintain_strategy()\n assert len(worker.own_orders) == num_orders_expected\n\n # Simulate order filled\n order = worker.get_own_buy_orders()[0]\n worker.cancel_orders(order)\n worker.maintain_strategy()\n assert len(worker.own_orders) == num_orders_expected\n\n # Order partially filled\n order = worker.get_own_buy_orders()[0]\n to_sell = order['quote']['amount'] * worker.partial_fill_threshold * 1.02\n sell_price = order['price'] / 1.01\n log.debug('Sell {} @ {}'.format(to_sell, sell_price))\n worker2.place_market_sell_order(to_sell, sell_price)\n worker.maintain_strategy()\n for order in worker.own_orders:\n assert order['base']['amount'] == order['for_sale']['amount']\n\n # Center price change\n worker.is_reset_on_price_change = True\n orders_before = worker.own_orders\n order = worker.get_own_buy_orders()[0]\n to_sell = order['quote']['amount']\n # We're placing a sell order close to worker buy to shift center price\n sell_price = order['price'] * 1.02\n log.debug('Sell {} @ {}'.format(to_sell, sell_price))\n worker2.place_market_sell_order(to_sell, sell_price)\n worker.maintain_strategy()\n orders_after = worker.own_orders\n assert orders_before != orders_after\n\n\ndef test_maintain_strategy_transition_from_staggered_orders(strategy_worker, other_worker, other_orders, monkeypatch):\n def mocked_orders():\n return {'fdfgf-hghgsfdf-hghg': {'id': 'fdfgf-hghgsfdf-hghg'}}\n\n def mocked_filter(*args):\n return args[0]\n\n worker = strategy_worker\n num_orders_expected = len(worker.buy_orders_percentages) + len(worker.sell_orders_percentages)\n\n monkeypatch.setattr(worker, 'fetch_orders', mocked_orders)\n monkeypatch.setattr(worker, 'filter_closest_orders', mocked_filter)\n worker.maintain_strategy()\n assert len(worker.own_orders) == num_orders_expected\n", "sub_path": "tests/strategies/flexible_orders/test_flexible_orders.py", "file_name": "test_flexible_orders.py", "file_ext": "py", "file_size_in_byte": 8274, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 9, "usage_type": "attribute"}, {"api_name": "dexbot.strategies.flexible_orders.Strategy.validate_orders", "line_number": 16, "usage_type": "call"}, {"api_name": "dexbot.strategies.flexible_orders.Strategy", "line_number": 16, "usage_type": "name"}, {"api_name": "dexbot.strategies.flexible_orders.Strategy.validate_orders", "line_number": 21, "usage_type": "call"}, {"api_name": "dexbot.strategies.flexible_orders.Strategy", "line_number": 21, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 25, "usage_type": "call"}, {"api_name": "dexbot.strategies.flexible_orders.Strategy.validate_orders", "line_number": 26, "usage_type": "call"}, {"api_name": "dexbot.strategies.flexible_orders.Strategy", "line_number": 26, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 30, "usage_type": "call"}, {"api_name": "dexbot.strategies.flexible_orders.Strategy.validate_orders", "line_number": 31, "usage_type": "call"}, {"api_name": "dexbot.strategies.flexible_orders.Strategy", "line_number": 31, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 35, "usage_type": "call"}, {"api_name": "dexbot.strategies.flexible_orders.Strategy.validate_orders", "line_number": 36, "usage_type": "call"}, {"api_name": "dexbot.strategies.flexible_orders.Strategy", "line_number": 36, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 75, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 103, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 119, "usage_type": "call"}, {"api_name": "bitshares.amount.Amount", "line_number": 160, "usage_type": "call"}, {"api_name": "bitshares.amount", "line_number": 160, "usage_type": "name"}, {"api_name": "bitshares.amount.Amount", "line_number": 162, "usage_type": "call"}, {"api_name": "bitshares.amount", "line_number": 162, "usage_type": "name"}, {"api_name": "bitshares.amount.Amount", "line_number": 166, "usage_type": "call"}, {"api_name": "bitshares.amount", "line_number": 166, "usage_type": "name"}, {"api_name": "bitshares.amount.Amount", "line_number": 168, "usage_type": "call"}, {"api_name": "bitshares.amount", "line_number": 168, "usage_type": "name"}]} +{"seq_id": "651149464", "text": "\n\n\nimport os \nimport numpy as np \nimport pandas as pd \nimport string \nimport string \nimport re\nimport unicodedata \nimport json \nfrom collections import Counter\n\n\nimport gc \nimport tqdm \nfrom collections import defaultdict \nimport scipy.sparse as sp \n\n\n# In[ ]:\n\n\n# Update to point data_dir to folder contain train_set and test_set\ndata_dir = \"/home/[]\" \n\ntrain_set = pd.read_csv(os.path.join(data_dir, 'ift6390_arxiv/train.csv')) \ntest_set = pd.read_csv(os.path.join(data_dir, 'ift6390_arxiv/test.csv')) \n\n# process\ndef filter_printable(input): \n printable = set(string.printable) \n return(''.join(filter(lambda x: x in printable, input))) \n\ndef process(df, col): \n df[col] = df[col].apply(lambda x: x.lower()) \n df[col] = df[col].apply(lambda x: re.sub('\\n', ' ', x) )\n df[col] = df[col].apply(lambda x: re.sub('-', ' ', x) )\n df[col] = df[col].apply(lambda x: re.sub('\\\\(', ' ', x) )\n df[col] = df[col].apply(lambda x: re.sub('\\\\)', ' ', x) ) \n # in many cases it's a number in side < > for reference or footnote\n df[col] = df[col].apply(lambda x: re.sub(\"<.*?>\", \" \", x) )\n # punctuation\n df[col] = df[col].apply(lambda x: re.sub(r'\\W',' ',x) ) \n df[col] = df[col].apply(lambda x: re.sub(r'\\s+', ' ', x))\n df[col] = df[col].apply(lambda x: filter_printable(x)) \n # added removal of pure numbers. This was not applied previously, and was added later after submission ended\n df[col] = df[col].apply(lambda x: re.sub(r' \\d+', ' ', x)) \n return df\n\n\n# In[ ]:\n\n\n# \ntrain_processed = process(train_set, 'Abstract') \ntest_processed = process(test_set, 'Abstract') \n\"\"\"\n# Generate and export a copy of dictionary for exploration if needed\n# dictionary of word frequency \ndef word_freq_dict(train_processed): \n tmp_words = train_processed['Abstract'].apply(lambda x: x.split(' '))\n words = np.concatenate(np.array(tmp_words))\n words = list(words)\n word_dict = Counter(words)\n return word_dict \n\n\nword_dict = word_freq_dict(train_processed) \n# save \njson.dump(word_dict, open('word_dict.json', 'w')) \n\"\"\"\n\n\n# In[ ]:\n\n\n# Bernoulli \nclass BernoulliVectorizer: \n def __init__(self): \n self.word_dict = {}\n self.word_dict_query = {} \n self.wordvecs = []\n def word_freq_dict(self, dict_data): \n \n tmp_words = dict_data['Abstract'].apply(lambda x: x.split(' '))\n words = np.concatenate(np.array(tmp_words))\n words = list(words)\n self.word_dict = Counter(words)\n return self.word_dict \n def word_freq_dict_query(self, low_freq, high_freq): \n for wd, value in self.word_dict.items(): \n if value >= low_freq: \n if value <= high_freq: \n if len(wd) > 3: \n self.word_dict_query[wd] = value\n \n def transform(self, input_data): \n # by document. 0/1, no freq\n # use part of the word dict\n self.wordvecs = np.zeros(shape=((len(input_data), len(self.word_dict_query))))\n i = 0\n for doc in input_data['Abstract']: \n tokens = doc.split(' ') \n for word_idx in range(len(self.word_dict_query)): \n if list(self.word_dict_query.keys())[word_idx] in tokens: \n self.wordvecs[i, word_idx] = 1\n else: \n pass\n i += 1 \n if i % 100 == 0: \n print(i)\n return self.wordvecs\n def fit_transform(self, dict_data, input_data, low_freq=5, high_freq=2000): \n self.word_freq_dict(dict_data) \n self.word_freq_dict_query(low_freq, high_freq) \n return self.transform(input_data) \n\n\n# In[ ]:\n\n\n# \nbv = BernoulliVectorizer() \n\n\ntrain_wordvecs = bv.fit_transform(train_processed, train_processed, low_freq=50, high_freq=2000)\ntest_wordvecs = bv.fit_transform(train_processed, test_processed, low_freq=50, high_freq=2000)\n\n\n# In[ ]:\n\n\n###########\nclass BernoulliNB: \n def __init__(self, adj = 1): \n self.adj = adj \n \n def fit(self, X, y): \n self.n_classes = len(np.unique(y)) \n \n self.classes = Counter(list(y)).keys() \n self.counts = Counter(list(y)).values() \n self.counts = np.array(list(self.counts)) / len(y) \n \n # summarize data by class\n self.word_prob = np.zeros((self.n_classes, X.shape[1])) \n for class_idx in range(len(self.classes)): \n class_name = list(self.classes)[class_idx]\n x_idx = y[y == class_name].index\n for idx in x_idx: \n self.word_prob[class_idx] += X[idx] \n \n \n self.word_prob += self.adj \n class_sums = self.word_prob.sum(axis=1) + self.adj * self.n_classes\n self.word_prob = self.word_prob / class_sums[:, np.newaxis]\n \n def predict(self, X): \n \n P = np.dot(X, np.log(self.word_prob).T) \n P += np.log(self.counts) \n P_max = np.argmax(P, axis=1) \n P_max_class = [list(self.classes)[x] for x in P_max]\n return P_max_class\n\n\n# In[ ]:\n\n\n# Apply \nbnb = BernoulliNB(adj=1) \nbnb.fit(train_wordvecs, train_processed['Category'])\ntrain_tmp_results = bnb.predict(train_wordvecs)\n\nnp.mean(train_tmp_results == train_processed['Category']) \n\n# predict test data\ntest_predict_results = bnb.predict(test_wordvecs)\noutput_prediction = pd.DataFrame(test_predict_results).reset_index().rename(\n columns={'index':'Id', 0:'Category'}) \n\n# Please update file path if output needed\n# This exported list is uploaded to kaggle for submission \noutput_prediction.to_csv('/home/[]/test_prediction_5.csv', index = False)\n\n\n\n\n\n\n\n# In[ ]:\n\n\n# Let's look at what's not matched in training data \ntmp_lst = train_processed['Category'][train_processed['Category'] != train_tmp_results]\nCounter(tmp_lst)\n\"\"\"\nCounter({'astro-ph': 220,\n 'hep-th': 50,\n 'astro-ph.SR': 46,\n 'gr-qc': 69,\n 'stat.ML': 58,\n 'astro-ph.CO': 171,\n 'astro-ph.GA': 79,\n 'cs.LG': 46,\n 'physics.optics': 25,\n 'quant-ph': 66,\n 'cond-mat.mes-hall': 75,\n 'hep-ph': 47,\n 'cond-mat.mtrl-sci': 73,\n 'math.AP': 10,\n 'math.CO': 8})\n\"\"\"\n\n\n# In[ ]:\n\n\nimport pandas as pd \n\n\n# extract dictionary of word frequency \ndef word_freq_dict(train_processed): \n tmp_words = train_processed['Abstract'].apply(lambda x: x.split(' '))\n words = np.concatenate(np.array(tmp_words))\n words = list(words)\n word_dict = Counter(words)\n return word_dict \n# all categories in training\nword_dict = word_freq_dict(train_processed) \n\n# \nall_words_df = pd.DataFrame(list(zip(word_dict.keys(), word_dict.values())), \n columns = ['word', 'all_cat']) \nall_words_df = all_words_df.assign(length = all_words_df['word'].str.len()) \n\n\nall_cats = pd.unique(train_processed.loc[:, 'Category']) \nfor cat in all_cats: \n cat_dict = word_freq_dict(\n train_processed.query(\n ' Category == @cat '\n )\n )\n cat_word_df = pd.DataFrame(list(zip(cat_dict.keys(), cat_dict.values())), \n columns = ['word', cat]) \n all_words_df = pd.merge(all_words_df, cat_word_df, \n how='left', on = 'word') \n \n# A word that shows up in one category > 10 times, is defined as detected in the category. \n# if a word shows up in a category <= 10, it is treated as not detected in the cat. \nall_words_df = all_words_df.assign(\n n_cat = np.sum(all_words_df.loc[:, ['astro-ph', 'hep-ph', 'cs.LG', 'math.CO',\n 'cond-mat.mes-hall', 'hep-th', 'stat.ML', 'physics.optics',\n 'astro-ph.CO', 'gr-qc', 'astro-ph.SR', 'math.AP', 'cond-mat.mtrl-sci',\n 'quant-ph', 'astro-ph.GA']] > 10, axis=1)\n )\n\nall_words_summary = all_words_df.assign(\n # Compute whether a word shows up in one category > 50% of chances by counts\n # This will be used as a criteria to include low-frequency words. \n over_half_in_cat = np.any(np.divide(all_words_df.loc[:, ['astro-ph', 'hep-ph', 'cs.LG', 'math.CO',\n 'cond-mat.mes-hall', 'hep-th', 'stat.ML', 'physics.optics',\n 'astro-ph.CO', 'gr-qc', 'astro-ph.SR', 'math.AP', 'cond-mat.mtrl-sci',\n 'quant-ph', 'astro-ph.GA']], all_words_df.loc[:, 'all_cat'][:, np.newaxis] )\n > 0.5, axis=1)\n ).query(\n # total counts of a word in all categories\n 'all_cat > 5'\n ).query(\n # words shorter or equal to 2 characters are not very meaningful/useful in predicting\n # While it was tempting to use 3 here, some words like \"gas\" did distinguish some categories well from others. \n 'length > 2'\n ).query(\n # If a word has total counts <= 10, it has to be in one category predominantly (> 50%)\n 'n_cat > 0 | over_half_in_cat == True'\n ).query(\n # no features found in all categories\n 'n_cat < 15'\n )\n\n \n\n\n\n\n# In[ ]:\n\n\n# generate a word dict in format required by \"BernoulliVectorizer\"\nhand_crafted_word_dict = dict(\n zip(all_words_summary.loc[:, 'word'], all_words_summary.loc[:, 'all_cat'])\n )\n\nbv_2 = BernoulliVectorizer() \n\nbv_2.word_dict_query = hand_crafted_word_dict\n\n# split train into train and validation \n# \n\ntrain_wordvecs = bv_2.transform(train_processed.iloc[:6000, :]) # since paper categories are already randomized\nval_wordvecs = bv_2.transform(train_processed.iloc[6000:, :])\n\nbnb = BernoulliNB(adj=1) \nbnb.fit(train_wordvecs, train_processed.loc[:5999, 'Category'])\nval_tmp_results = bnb.predict(val_wordvecs)\n\nnp.mean(val_tmp_results == train_processed.loc[6000:, 'Category']) \n# 0.7893333333333333 before removing pure number through pre-processing \n# 0.7906666666666666 \n# This is a good improvement without using low frequency words, leading to overfitting. \n\n\n# In[ ]:\n\n\n# Other models\n# example code\n\nimport numpy as np \nimport pandas as pd \nimport os \nimport re \nimport nltk \nfrom sklearn.datasets import load_files \nnltk.download('stopwords') \nimport pickle \nfrom nltk.corpus import stopwords \nfrom nltk.stem import WordNetLemmatizer \nimport string\n\n### test multinomial Naive Bayes \nfrom sklearn.feature_extraction.text import CountVectorizer \n\nvectorizer = CountVectorizer(max_features=None, min_df = 5, \n max_df = 0.8, \n ngram_range=(1,3), \n strip_accents='unicode', \n stop_words=stopwords.words('english'))\n\n\ntrain_dict = vectorizer.fit(train_processed['Abstract'], y = train_processed['Category'])\n\ntrain_vecs = vectorizer.transform(train_processed['Abstract']).toarray() \ntest_vecs = vectorizer.transform(test_processed['Abstract']).toarray()\n\nfrom sklearn.feature_extraction.text import TfidfTransformer \ntfidfconverter = TfidfTransformer() \n\ntrain_vecs = tfidfconverter.fit_transform(train_vecs).toarray()\ntest_vecs = tfidfconverter.fit_transform(test_vecs).toarray()\n#\nfrom sklearn.naive_bayes import MultinomialNB \n\nMNNB_classifier = MultinomialNB(alpha=0.95, fit_prior=False) \n\nMNNB_classifier.fit(train_vecs, train_processed['Category']) \n\ntest_pred = MNNB_classifier.predict(test_vecs)\n# export \noutput_prediction = pd.DataFrame(test_pred).reset_index().rename(\n columns={'index':'Id', 0:'Category'})\n# Update file path if needed\noutput_prediction.to_csv('/home/jx/Documents/IFT_6390/competition_1/test_prediction_8.csv', index = False)\n\n\n", "sub_path": "Deep_Learning/paper_abstract_classification.py", "file_name": "paper_abstract_classification.py", "file_ext": "py", "file_size_in_byte": 11431, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pandas.read_csv", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "string.printable", "line_number": 32, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 37, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 38, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 39, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 40, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 42, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 44, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 45, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 87, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 140, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 142, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 157, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 176, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 180, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 227, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 229, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 235, "usage_type": "call"}, {"api_name": "pandas.unique", "line_number": 240, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 247, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 267, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 311, "usage_type": "call"}, {"api_name": "nltk.download", "line_number": 329, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 338, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 342, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 342, "usage_type": "name"}, {"api_name": "sklearn.feature_extraction.text.TfidfTransformer", "line_number": 351, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.MultinomialNB", "line_number": 358, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 364, "usage_type": "call"}]} +{"seq_id": "94638971", "text": "from kivy_ios.toolchain import Recipe, shprint\nfrom os.path import join\nimport sh\n\n\narch_mapper = {'x86_64': 'darwin64-x86_64-cc',\n 'arm64': 'ios64-cross'}\n\n\nclass OpensslRecipe(Recipe):\n version = \"1.1.1l\"\n url = \"http://www.openssl.org/source/openssl-{version}.tar.gz\"\n libraries = [\"libssl.a\", \"libcrypto.a\"]\n include_dir = \"include\"\n include_per_arch = True\n\n def build_arch(self, arch):\n build_env = arch.get_env()\n target = arch_mapper[arch.arch]\n shprint(sh.env, _env=build_env)\n sh.perl(join(self.build_dir, \"Configure\"),\n target,\n _env=build_env)\n if target.endswith('-cross'):\n with open('Makefile', 'r') as makefile:\n filedata = makefile.read()\n filedata = filedata.replace('$(CROSS_TOP)/SDKs/$(CROSS_SDK)', arch.sysroot)\n with open('Makefile', 'w') as makefile:\n makefile.write(filedata)\n shprint(sh.make, \"clean\")\n shprint(sh.make, self.ctx.concurrent_make, \"build_libs\")\n\n\nrecipe = OpensslRecipe()\n", "sub_path": "kivy_ios/recipes/openssl/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1086, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "kivy_ios.toolchain.Recipe", "line_number": 10, "usage_type": "name"}, {"api_name": "kivy_ios.toolchain.shprint", "line_number": 20, "usage_type": "call"}, {"api_name": "sh.env", "line_number": 20, "usage_type": "attribute"}, {"api_name": "sh.perl", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "kivy_ios.toolchain.shprint", "line_number": 30, "usage_type": "call"}, {"api_name": "sh.make", "line_number": 30, "usage_type": "attribute"}, {"api_name": "kivy_ios.toolchain.shprint", "line_number": 31, "usage_type": "call"}, {"api_name": "sh.make", "line_number": 31, "usage_type": "attribute"}]} +{"seq_id": "150241665", "text": "from jina.types.sets import DocumentSet\nfrom jina.types.sets.document_set import MultimodalDocumentSet\nfrom jina.types.document.multimodal import MultimodalDocument\n\n\ndef test_from_documents_set():\n docs = []\n for i in range(0, 3):\n doc = MultimodalDocument.from_modality_content_mapping({'modA': f'textA {i}', 'modB': f'textB {i}'})\n docs.append(doc)\n\n for doc in MultimodalDocumentSet(docs):\n assert len(doc.chunks) == 2\n\n for doc in MultimodalDocumentSet(DocumentSet(docs)):\n assert len(doc.chunks) == 2\n", "sub_path": "tests/unit/types/test_multimodaldocumentset.py", "file_name": "test_multimodaldocumentset.py", "file_ext": "py", "file_size_in_byte": 547, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "jina.types.document.multimodal.MultimodalDocument.from_modality_content_mapping", "line_number": 9, "usage_type": "call"}, {"api_name": "jina.types.document.multimodal.MultimodalDocument", "line_number": 9, "usage_type": "name"}, {"api_name": "jina.types.sets.document_set.MultimodalDocumentSet", "line_number": 12, "usage_type": "call"}, {"api_name": "jina.types.sets.document_set.MultimodalDocumentSet", "line_number": 15, "usage_type": "call"}, {"api_name": "jina.types.sets.DocumentSet", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "588433461", "text": "import pprint\nfrom json import loads\n\n\ndef assert_json(json, ethalon):\n \"\"\"\n Основной метод сравнения json структур\n :param json: json, который нужно сравнить с эталоном\n :param ethalon: эталоный json\n :raises Exception: исключение выбрасывается, если словарь mismatches не пустой\n \"\"\"\n mismatches = comparison(json, ethalon)\n if mismatches != {}:\n raise Exception(pprint.pformat(mismatches))\n\n\ndef comparison(json, ethalon):\n \"\"\"\n Метод для сравнения двух json-структур\n :param json: json, который нужно сравнить с эталоном\n :param ethalon: эталоный json\n :return: словарь mismatches с несоответствиями json'а эталону\n :rtype: dict\n \"\"\"\n\n mismatches = {}\n # Проверка на совпадение типов json'ов\n if type(json) != type(ethalon):\n mismatches[\"Type mismatch\"] = \"{0} expected, got {1}\".format(type(ethalon), type(json))\n else:\n # Если тип json'а - список, вызываем метод для сравнения списков\n if type(json) == list:\n mismatches = list_comparison(json, ethalon)\n # Если тип json'а - список, вызываем метод для сравнения словарей\n elif type(json) == dict:\n mismatches = dict_comparison(json, ethalon)\n\n return mismatches\n\n\ndef dict_comparison(json, ethalon):\n \"\"\"\n Метод для сравнения словарей\n :param json: dict: объект json\n :param ethalon: dict: эталон\n :return: словарь с несовпадениями mismatches\n :rtype: dict\n \"\"\"\n mismatches = {}\n for key in ethalon.keys():\n # Если ключ отсутствует в json, записываем его в словарь mismatches с текстом ошибки\n if key not in json.keys():\n mismatches[key] = \"No such key in json\"\n # Если ключ присутствует в json, сравниваем тип значения из эталона и json'а по данному ключу\n else:\n mism_child = comparison(json[key], ethalon[key])\n if mism_child != {}:\n # Если словарь mism_child не пустой, записываем его в словарь mismatches\n mismatches[key] = mism_child\n\n # Идем циклом по всем ключам в json'е\n for key in json.keys():\n # Если ключ отсутствует в эталоне, записываем ошибку в словарь mismatches\n if key not in ethalon.keys():\n mismatches[key] = \"No such key in ethalon\".format(key)\n\n return mismatches\n\n\ndef list_comparison(json, ethalon):\n \"\"\"\n Метод для сравнения списков\n :param json: list: список объектов из json'а\n :param ethalon: list: список объектов из эталона\n :return: словарь с несовпадениями mismatches\n :rtype: dict\n \"\"\"\n mismatches = {}\n # Сравниваем длину списков: если длина не совпадает, записываем в mismatches\n if len(json) != len(ethalon):\n mismatches[\"Length mismatch\"] = \"Ethalon expected {0} elements, got {1}\".format(len(ethalon), len(json))\n else:\n # Идем циклом по всем элементам эталона\n for i, value in enumerate(ethalon):\n mism_child = comparison(json[i], value)\n # Если словарь mism_child не пустой, записываем его в словарь mismatches\n if mism_child != {}:\n mismatches[\"Object at index {0}\".format(i)] = mism_child\n\n return mismatches\n\n\ndef read_file(filename: str):\n \"\"\"\n Чтение файла .json\n :param filename: str - название файла с json'ом\n :return: данные из файла .json\n :rtype: str\n \"\"\"\n file_obj = open(filename + '.json', 'r')\n return file_obj.read().replace('\\n', '')\n\n\njson = loads(read_file(\"example_json\"))\nethalon = loads(read_file(\"example_ethalon\"))\nassert_json(json, ethalon)", "sub_path": "json_validation.py", "file_name": "json_validation.py", "file_ext": "py", "file_size_in_byte": 4465, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pprint.pformat", "line_number": 14, "usage_type": "call"}, {"api_name": "json.keys", "line_number": 52, "usage_type": "call"}, {"api_name": "json.keys", "line_number": 62, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 104, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 105, "usage_type": "call"}]} +{"seq_id": "123316752", "text": "from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass ReporterAstronaut(models.Model):\n reporter = models.ForeignKey(\n verbose_name=_('Astronaut'),\n to='auth.User',\n db_index=True,\n limit_choices_to={'groups__name': 'Astronauts'})\n\n class Meta:\n abstract = True\n\n\nclass ReporterAnyone(models.Model):\n reporter = models.ForeignKey(\n verbose_name=_('Reporter'),\n db_index=True,\n to='auth.User')\n\n class Meta:\n abstract = True\n", "sub_path": "habitat/_common/models/user.py", "file_name": "user.py", "file_ext": "py", "file_size_in_byte": 538, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "django.db.models.Model", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 5, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 6, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 6, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 7, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "226636669", "text": "#!/usr/bin/env python2.7\n# -*- coding:UTF-8 -*-2\nu\"\"\"general.py\n\nCopyright(c)2019 Yukio Kuro\nThis software is released under BSD license.\n\n汎用レベルモジュール。\n\"\"\"\nimport random as __random\nimport inventory as __inventory\nimport utils.const as _const\n__REWARD_SP = 20\n__selected_2p = 0\n__versus_level = 0\n__deck = ()\n__equips = ()\n\n\nclass Level(object):\n u\"\"\"レベルデータ。\n \"\"\"\n __slots__ = \"_deck\", \"_equip\", \"_player\", \"_skill\"\n\n def __init__(self, equip, skill, deck):\n u\"\"\"コンストラクタ。\n \"\"\"\n self._equip = equip\n if 4 < len(self._equip):\n raise ValueError(\"Exceed the limit of equip.\")\n self._skill = skill\n self._deck = deck\n if _const.DECK_CAPACITY < len(self._deck):\n raise ValueError(\"Exceed the limit of deck.\")\n\n @property\n def player(self):\n u\"\"\"プレイヤー番号・レベル取得。\n return: number, rank\n \"\"\"\n return self._player\n\n @property\n def equip(self):\n u\"\"\"装備の取得。\n \"\"\"\n return self._equip\n\n @property\n def skill(self):\n u\"\"\"スキルの取得。\n \"\"\"\n return self._skill\n\n @property\n def deck(self):\n u\"\"\"デッキの取得。\n \"\"\"\n return self._deck\n\n\nclass _General(Level):\n u\"\"\"汎用レベルデータ。\n \"\"\"\n __slots__ = ()\n\n def __init__(self, equip, skill, deck, player):\n u\"\"\"コンストラクタ。\n \"\"\"\n self._player = player\n super(_General, self).__init__(equip, skill, deck)\n\n def __repr__(self):\n u\"\"\"文字列表現取得。\n \"\"\"\n return u\"<プレイヤー番号:{0}, 装備番号:{1}, スキル番号:{2}, デッキ番号:{3}>\".format(\n self._player[0], self._equip, self._skill, self._deck)\n\n @property\n def player(self):\n u\"\"\"プレイヤー番号・ランク取得。\n return: number, rank\n \"\"\"\n if not hasattr(self, \"_player\"):\n self._player = divmod(self.number, _const.PLAYER_NUMBER+1)[::-1]\n return self._player\n\n @property\n def rewards(self):\n u\"\"\"特典カード取得。\n \"\"\"\n return self.__rewards\n\n @property\n def is_playable(self):\n u\"\"\"対戦可能な場合に真。\n \"\"\"\n return True\n\n\ndef get_endless():\n u\"\"\"エンドレスレベル取得。\n 呼び出しの時にエンドレス進行状況とデッキを設定する。\n \"\"\"\n import armament.units as __units\n\n def _get_skills():\n u\"\"\"エンドレススキル取得。\n \"\"\"\n def __get_skills(player):\n u\"\"\"プレイヤーとスロットによってスキル取得。\n \"\"\"\n import armament.skill as __skill\n return (\n skill for skill in (\n __skill.get(learn) for\n learn in __units.get_player(player).learnable) if\n skill.slot <= slot)\n slot = int(\n float(progress)/float(_const.ENDLESS_LIMIT) *\n float(_const.SKILL_CAPACITY))\n result = []\n choices = list(__get_skills(player))\n while 0 < slot and choices:\n skill = choices.pop(__random.randint(0, len(choices)-1))\n result.append(skill)\n slot -= skill.slot\n choices = [\n skill for skill in __get_skills(player) if skill not in result]\n return tuple(result)\n\n def _get_deck():\n u\"\"\"エンドレスデッキ取得。\n \"\"\"\n def __get_deck():\n u\"\"\"種類とランクによってデッキ取得。\n \"\"\"\n import armament.collectible as __collectible\n return (collection for collection in (\n _collection for _collection in __collectible.get_all() if\n _collection.type in (\n _const.SUMMON_TYPE, _const.SORCERY_TYPE,\n _const.SHIELD_TYPE)) if collection.rank <=\n progress/(_const.ENDLESS_LIMIT >> 2)+1)\n result = []\n slot = progress/10*4+4\n _slot = slot if slot < _const.DECK_CAPACITY else _const.DECK_CAPACITY\n choices = [card for card in __get_deck()]\n limit = 3\n while 0 < _slot and choices:\n card = __random.choice(choices)\n number = __random.randint(0, limit if limit < _slot else _slot)\n result.extend((card,)*number)\n _slot -= number\n choices = [card for card in __get_deck() if card not in result]\n return result\n\n def _get_equip():\n u\"\"\"エンドレス装備取得。\n \"\"\"\n def __get_equip(progress):\n u\"\"\"SPと種類によって装備取得。\n \"\"\"\n import armament.equip as __equip\n return (\n equip for equip in (\n equip for equip in __equip.get_all() if equip.sp <=\n progress*__REWARD_SP\n ) if equip.category in (\n caegory for caegory in __units.get_player(\n player).equippable+__inventory.Skill.add_equippable(\n skills) if caegory in _caegorys))\n result = []\n for _caegorys in (\n _const.WEAPON_CATEGORYS, _const.HEAD_CATEGORYS,\n _const.BODY_CATEGORYS, _const.ACCESSORY_CATEGORYS\n ):\n equips = tuple(\n equip for equip in __get_equip(progress) if equip.number != 0)\n result.append(__random.choice(equips).number if equips else 0)\n return tuple(result)\n global __deck, __equips\n progress = __inventory.Utils.get_endless()+1\n progress = (\n progress if progress < _const.ENDLESS_LIMIT else\n _const.ENDLESS_LIMIT)\n __inventory.Utils.set_endless(progress)\n player = __random.randint(\n 0, _const.PLAYER_NUMBER-1 if progress < _const.ENDLESS_LIMIT else\n _const.PLAYER_NUMBER)\n skills = _get_skills()\n __equips = _get_equip()\n __deck = tuple(card.number for card in _get_deck())\n rank = progress/10\n return _General(\n __equips, tuple(skill.number for skill in skills), __deck,\n (player, _const.RANK_LIMIT if _const.RANK_LIMIT < rank else rank))\n\n\ndef get_reward():\n u\"\"\"褒賞アイテムを取得。\n \"\"\"\n result = tuple(equip for equip in __equips if equip != 0)\n if result:\n return __random.choice(result)\n return 0\n\n\ndef get_deck():\n u\"\"\"エンドレスデッキ取得。\n \"\"\"\n return __deck\n\n\ndef get_1p(rank):\n u\"\"\"1Pレベル取得。\n \"\"\"\n return _General(\n __inventory.Equip.get_all(), __inventory.Skill.get_equiped(),\n __inventory.Deck.get_all(), (__inventory.Utils.get_player(), rank))\n\n\ndef get_versus_level():\n u\"\"\"ヴァーサスレベル番号取得。\n \"\"\"\n return __versus_level\n\n\ndef set_versus_level(value):\n u\"\"\"ヴァーサスレベル番号設定。\n \"\"\"\n global __versus_level\n __versus_level = int(value)\n\n\ndef get_selected_2p():\n u\"\"\"2P選択状態取得。\n \"\"\"\n return __selected_2p\n\n\ndef set_selected_2p(value):\n u\"\"\"2P選択状態設定。\n \"\"\"\n global __selected_2p\n __selected_2p = int(value)\n\n\ndef get_2p():\n u\"\"\"2Pレベル取得。\n \"\"\"\n old = __inventory.Utils.get_player()\n set_player = __inventory.Utils.set_player\n set_player(get_selected_2p())\n result = _General(\n __inventory.Equip.get_all(),\n __inventory.Skill.get_equiped(),\n __inventory.Deck.get_all(),\n (__inventory.Utils.get_player(), __versus_level))\n set_player(old)\n return result\n", "sub_path": "Source/armament/level/general.py", "file_name": "general.py", "file_ext": "py", "file_size_in_byte": 7692, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "utils.const.DECK_CAPACITY", "line_number": 33, "usage_type": "attribute"}, {"api_name": "utils.const", "line_number": 33, "usage_type": "name"}, {"api_name": "utils.const.PLAYER_NUMBER", "line_number": 85, "usage_type": "attribute"}, {"api_name": "utils.const", "line_number": 85, "usage_type": "name"}, {"api_name": "armament.skill.get", "line_number": 116, "usage_type": "call"}, {"api_name": "armament.skill", "line_number": 116, "usage_type": "name"}, {"api_name": "armament.units.get_player", "line_number": 117, "usage_type": "call"}, {"api_name": "armament.units", "line_number": 117, "usage_type": "name"}, {"api_name": "utils.const.ENDLESS_LIMIT", "line_number": 120, "usage_type": "attribute"}, {"api_name": "utils.const", "line_number": 120, "usage_type": "name"}, {"api_name": "utils.const.SKILL_CAPACITY", "line_number": 121, "usage_type": "attribute"}, {"api_name": "utils.const", "line_number": 121, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 125, "usage_type": "call"}, {"api_name": "armament.collectible.get_all", "line_number": 140, "usage_type": "call"}, {"api_name": "armament.collectible", "line_number": 140, "usage_type": "name"}, {"api_name": "utils.const.SUMMON_TYPE", "line_number": 142, "usage_type": "attribute"}, {"api_name": "utils.const", "line_number": 142, "usage_type": "name"}, {"api_name": "utils.const.SORCERY_TYPE", "line_number": 142, "usage_type": "attribute"}, {"api_name": "utils.const.SHIELD_TYPE", "line_number": 143, "usage_type": "attribute"}, {"api_name": "utils.const", "line_number": 143, "usage_type": "name"}, {"api_name": "utils.const.ENDLESS_LIMIT", "line_number": 144, "usage_type": "attribute"}, {"api_name": "utils.const", "line_number": 144, "usage_type": "name"}, {"api_name": "utils.const.DECK_CAPACITY", "line_number": 147, "usage_type": "attribute"}, {"api_name": "utils.const", "line_number": 147, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 151, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 152, "usage_type": "call"}, {"api_name": "armament.equip.get_all", "line_number": 167, "usage_type": "call"}, {"api_name": "armament.equip", "line_number": 167, "usage_type": "name"}, {"api_name": "armament.units.get_player", "line_number": 170, "usage_type": "call"}, {"api_name": "armament.units", "line_number": 170, "usage_type": "name"}, {"api_name": "inventory.Skill.add_equippable", "line_number": 171, "usage_type": "call"}, {"api_name": "inventory.Skill", "line_number": 171, "usage_type": "attribute"}, {"api_name": "utils.const.WEAPON_CATEGORYS", "line_number": 175, "usage_type": "attribute"}, {"api_name": "utils.const", "line_number": 175, "usage_type": "name"}, {"api_name": "utils.const.HEAD_CATEGORYS", "line_number": 175, "usage_type": "attribute"}, {"api_name": "utils.const.BODY_CATEGORYS", "line_number": 176, "usage_type": "attribute"}, {"api_name": "utils.const", "line_number": 176, "usage_type": "name"}, {"api_name": "utils.const.ACCESSORY_CATEGORYS", "line_number": 176, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 180, "usage_type": "call"}, {"api_name": "inventory.Utils.get_endless", "line_number": 183, "usage_type": "call"}, {"api_name": "inventory.Utils", "line_number": 183, "usage_type": "attribute"}, {"api_name": "utils.const.ENDLESS_LIMIT", "line_number": 185, "usage_type": "attribute"}, {"api_name": "utils.const", "line_number": 185, "usage_type": "name"}, {"api_name": "utils.const.ENDLESS_LIMIT", "line_number": 186, "usage_type": "attribute"}, {"api_name": "utils.const", "line_number": 186, "usage_type": "name"}, {"api_name": "inventory.Utils.set_endless", "line_number": 187, "usage_type": "call"}, {"api_name": "inventory.Utils", "line_number": 187, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 188, "usage_type": "call"}, {"api_name": "utils.const.ENDLESS_LIMIT", "line_number": 189, "usage_type": "attribute"}, {"api_name": "utils.const", "line_number": 189, "usage_type": "name"}, {"api_name": "utils.const.PLAYER_NUMBER", "line_number": 189, "usage_type": "attribute"}, {"api_name": "utils.const.PLAYER_NUMBER", "line_number": 190, "usage_type": "attribute"}, {"api_name": "utils.const", "line_number": 190, "usage_type": "name"}, {"api_name": "utils.const.RANK_LIMIT", "line_number": 197, "usage_type": "attribute"}, {"api_name": "utils.const", "line_number": 197, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 205, "usage_type": "call"}, {"api_name": "inventory.Equip.get_all", "line_number": 219, "usage_type": "call"}, {"api_name": "inventory.Equip", "line_number": 219, "usage_type": "attribute"}, {"api_name": "inventory.Skill.get_equiped", "line_number": 219, "usage_type": "call"}, {"api_name": "inventory.Skill", "line_number": 219, "usage_type": "attribute"}, {"api_name": "inventory.Deck.get_all", "line_number": 220, "usage_type": "call"}, {"api_name": "inventory.Deck", "line_number": 220, "usage_type": "attribute"}, {"api_name": "inventory.Utils.get_player", "line_number": 220, "usage_type": "call"}, {"api_name": "inventory.Utils", "line_number": 220, "usage_type": "attribute"}, {"api_name": "inventory.Utils.get_player", "line_number": 252, "usage_type": "call"}, {"api_name": "inventory.Utils", "line_number": 252, "usage_type": "attribute"}, {"api_name": "inventory.Utils", "line_number": 253, "usage_type": "attribute"}, {"api_name": "inventory.Equip.get_all", "line_number": 256, "usage_type": "call"}, {"api_name": "inventory.Equip", "line_number": 256, "usage_type": "attribute"}, {"api_name": "inventory.Skill.get_equiped", "line_number": 257, "usage_type": "call"}, {"api_name": "inventory.Skill", "line_number": 257, "usage_type": "attribute"}, {"api_name": "inventory.Deck.get_all", "line_number": 258, "usage_type": "call"}, {"api_name": "inventory.Deck", "line_number": 258, "usage_type": "attribute"}, {"api_name": "inventory.Utils.get_player", "line_number": 259, "usage_type": "call"}, {"api_name": "inventory.Utils", "line_number": 259, "usage_type": "attribute"}]} +{"seq_id": "342800388", "text": "import unittest\n\nimport numpy as np\nimport pytest\nfrom sympl import (\n Prognostic, ensure_no_shared_keys, SharedKeyError, DataArray,\n combine_dimensions, set_direction_names, Implicit, Diagnostic,\n TendencyInDiagnosticsWrapper)\nfrom sympl._core.util import (\n update_dict_by_adding_another, get_component_aliases)\n\n\ndef same_list(list1, list2):\n return (len(list1) == len(list2) and all(\n [item in list2 for item in list1] + [item in list1 for item in list2]))\n\n\nclass MockPrognostic(Prognostic):\n\n def __init__(self):\n self._num_updates = 0\n\n def __call__(self, state):\n self._num_updates += 1\n return {}, {'num_updates': self._num_updates}\n\n\nclass MockImplicit(Implicit):\n\n def __init__(self):\n self._a = 1\n\n def __call__(self, state):\n return self._a\n\n\nclass MockDiagnostic(Diagnostic):\n\n def __init__(self):\n self._a = 1\n\n def __call__(self, state):\n return self._a\n\n\ndef test_update_dict_by_adding_another_adds_shared_arrays():\n old_a = np.array([1., 1.])\n dict1 = {'a': old_a}\n dict2 = {'a': np.array([2., 3.]), 'b': np.array([0., 1.])}\n update_dict_by_adding_another(dict1, dict2)\n assert 'b' in dict1.keys()\n assert dict1['a'] is old_a\n assert np.all(dict1['a'] == np.array([3., 4.]))\n assert np.all(dict2['a'] == np.array([2., 3.]))\n assert np.all(dict2['b'] == np.array([0., 1.]))\n assert len(dict1.keys()) == 2\n assert len(dict2.keys()) == 2\n\n\ndef test_update_dict_by_adding_another_adds_shared_arrays_reversed():\n old_a = np.array([1., 1.])\n dict1 = {'a': np.array([2., 3.])}\n dict2 = {'a': old_a, 'b': np.array([0., 1.])}\n update_dict_by_adding_another(dict2, dict1)\n assert 'b' not in dict1.keys()\n assert dict2['a'] is old_a\n assert np.all(dict2['a'] == np.array([3., 4.]))\n assert np.all(dict1['a'] == np.array([2., 3.]))\n assert np.all(dict2['b'] == np.array([0., 1.]))\n assert len(dict1.keys()) == 1\n assert len(dict2.keys()) == 2\n\n\nclass DummyPrognostic(Prognostic):\n input_properties = {'temperature': {'alias': 'T'}}\n diagnostic_properties = {'pressure': {'alias': 'P'}}\n tendency_properties = {'temperature': {}}\n\n def __init__(self):\n self._a = 1\n\n def __call__(self, state):\n return self._a\n\n\ndef test_get_component_aliases_with_no_args():\n aliases = get_component_aliases()\n assert type(aliases) == dict\n assert len(aliases.keys()) == 0\n\n\ndef test_get_component_aliases_with_single_component_arg():\n components = [MockPrognostic(), MockImplicit(), MockDiagnostic(),\n TendencyInDiagnosticsWrapper(DummyPrognostic(), 'dummy')]\n for c, comp in enumerate(components):\n aliases = get_component_aliases(comp)\n assert type(aliases) == dict\n if c == 3:\n assert len(aliases.keys()) == 2\n for k in ['T', 'P']:\n assert k in list(aliases.values())\n else:\n assert len(aliases.keys()) == 0\n\n\ndef test_get_component_aliases_with_two_component_args():\n components = [MockDiagnostic(), MockImplicit(), MockDiagnostic(),\n TendencyInDiagnosticsWrapper(DummyPrognostic(), 'dummy')]\n for comp in components[:3]:\n aliases = get_component_aliases(comp, components[-1])\n assert type(aliases) == dict\n assert len(aliases.keys()) == 2\n for k in ['T', 'P']:\n assert k in list(aliases.values())\n\n\nclass DummyProg1(Prognostic):\n input_properties = {'temperature': {'alias': 'T'}}\n tendency_properties = {'temperature': {'alias': 'TEMP'}}\n\n def __init__(self):\n self._a = 1\n\n def __call__(self, state):\n return self._a\n\n\nclass DummyProg2(Prognostic):\n input_properties = {'temperature': {'alias': 't'}}\n\n def __init__(self):\n self._a = 1\n\n def __call__(self, state):\n return self._a\n\n\nclass DummyProg3(Prognostic):\n input_properties = {'temperature': {}}\n diagnostic_properties = {'pressure': {}}\n tendency_properties = {'temperature': {}}\n\n def __init__(self):\n self._a = 1\n\n def __call__(self, state):\n return self._a\n\n\ndef test_get_component_aliases_with_different_values():\n # two different aliases in the same Component:\n aliases = get_component_aliases(DummyProg1())\n assert len(aliases.keys()) == 1\n assert aliases['temperature'] == 'TEMP'\n # two different aliases in different Components:\n aliases = get_component_aliases(DummyProg1(), DummyProg2())\n assert len(aliases.keys()) == 1\n assert aliases['temperature'] == 't'\n # NO aliases in component\n aliases = get_component_aliases(DummyProg3)\n assert len(aliases.keys()) == 0\n\n\ndef test_ensure_no_shared_keys_empty_dicts():\n ensure_no_shared_keys({}, {})\n\n\ndef test_ensure_no_shared_keys_one_empty_dict():\n ensure_no_shared_keys({'a': 1, 'b': 2}, {})\n ensure_no_shared_keys({}, {'a': 1, 'b': 2})\n\n\ndef test_ensure_no_shared_keys_with_no_shared_keys():\n ensure_no_shared_keys({'a': 1, 'b': 2}, {'c': 2, 'd': 1})\n ensure_no_shared_keys({'c': 2, 'd': 1}, {'a': 1, 'b': 2})\n\n\ndef test_ensure_no_shared_keys_with_shared_keys():\n try:\n ensure_no_shared_keys({'a': 1, 'b': 2}, {'e': 2, 'a': 1})\n except SharedKeyError:\n pass\n except Exception as err:\n raise err\n else:\n raise AssertionError(\n 'No exception raised but expected SharedKeyError.')\n\n\nclass CombineDimensionsTests(unittest.TestCase):\n\n def setUp(self):\n self.array_1d = DataArray(np.zeros((2,)), dims=['lon'])\n self.array_2d = DataArray(np.zeros((2, 2)), dims=['lat', 'lon'])\n self.array_3d = DataArray(np.zeros((2, 2, 2)),\n dims=['lon', 'lat', 'interface_levels'])\n set_direction_names(\n x=['lon'], y=['lat'], z=['mid_levels', 'interface_levels'])\n\n def tearDown(self):\n set_direction_names(x=[], y=[], z=[])\n\n def test_combine_dimensions_2d_and_3d(self):\n dims = combine_dimensions(\n [self.array_2d, self.array_3d], out_dims=('x', 'y', 'z'))\n assert same_list(dims, ['lon', 'lat', 'interface_levels'])\n\n def test_combine_dimensions_2d_and_3d_z_y_x(self):\n dims = combine_dimensions(\n [self.array_2d, self.array_3d], out_dims=('z', 'y', 'x'))\n assert same_list(dims, ['interface_levels', 'lat', 'lon'])\n\n def combine_dimensions_1d_shared(self):\n dims = combine_dimensions(\n [self.array_1d, self.array_1d], out_dims=['x'])\n assert same_list(dims, ['lon'])\n\n def combine_dimensions_1d_not_shared(self):\n array_1d_x = DataArray(np.zeros((2,)), dims=['lon'])\n array_1d_y = DataArray(np.zeros((2,)), dims=['lat'])\n dims = combine_dimensions([array_1d_x, array_1d_y], out_dims=['x', 'y'])\n assert same_list(dims, ['lon', 'lat'])\n\n def combine_dimensions_1d_wrong_direction(self):\n try:\n combine_dimensions(\n [self.array_1d, self.array_1d], out_dims=['z'])\n except ValueError:\n pass\n except Exception as err:\n raise err\n else:\n raise AssertionError('No exception raised but expected ValueError.')\n\n def combine_dimensions_1d_and_2d_extra_direction(self):\n try:\n combine_dimensions(\n [self.array_1d, self.array_2d], out_dims=['y'])\n except ValueError:\n pass\n except Exception as err:\n raise err\n else:\n raise AssertionError('No exception raised but expected ValueError.')\n\n\nif __name__ == '__main__':\n pytest.main([__file__])\n", "sub_path": "tests/test_util.py", "file_name": "test_util.py", "file_ext": "py", "file_size_in_byte": 7641, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "sympl.Prognostic", "line_number": 18, "usage_type": "name"}, {"api_name": "sympl.Implicit", "line_number": 28, "usage_type": "name"}, {"api_name": "sympl.Diagnostic", "line_number": 37, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 49, "usage_type": "call"}, {"api_name": "sympl._core.util.update_dict_by_adding_another", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "sympl._core.util.update_dict_by_adding_another", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 69, "usage_type": "call"}, {"api_name": "sympl.Prognostic", "line_number": 74, "usage_type": "name"}, {"api_name": "sympl._core.util.get_component_aliases", "line_number": 87, "usage_type": "call"}, {"api_name": "sympl.TendencyInDiagnosticsWrapper", "line_number": 94, "usage_type": "call"}, {"api_name": "sympl._core.util.get_component_aliases", "line_number": 96, "usage_type": "call"}, {"api_name": "sympl.TendencyInDiagnosticsWrapper", "line_number": 108, "usage_type": "call"}, {"api_name": "sympl._core.util.get_component_aliases", "line_number": 110, "usage_type": "call"}, {"api_name": "sympl.Prognostic", "line_number": 117, "usage_type": "name"}, {"api_name": "sympl.Prognostic", "line_number": 128, "usage_type": "name"}, {"api_name": "sympl.Prognostic", "line_number": 138, "usage_type": "name"}, {"api_name": "sympl._core.util.get_component_aliases", "line_number": 152, "usage_type": "call"}, {"api_name": "sympl._core.util.get_component_aliases", "line_number": 156, "usage_type": "call"}, {"api_name": "sympl._core.util.get_component_aliases", "line_number": 160, "usage_type": "call"}, {"api_name": "sympl.ensure_no_shared_keys", "line_number": 165, "usage_type": "call"}, {"api_name": "sympl.ensure_no_shared_keys", "line_number": 169, "usage_type": "call"}, {"api_name": "sympl.ensure_no_shared_keys", "line_number": 170, "usage_type": "call"}, {"api_name": "sympl.ensure_no_shared_keys", "line_number": 174, "usage_type": "call"}, {"api_name": "sympl.ensure_no_shared_keys", "line_number": 175, "usage_type": "call"}, {"api_name": "sympl.ensure_no_shared_keys", "line_number": 180, "usage_type": "call"}, {"api_name": "sympl.SharedKeyError", "line_number": 181, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 190, "usage_type": "attribute"}, {"api_name": "sympl.DataArray", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 193, "usage_type": "call"}, {"api_name": "sympl.DataArray", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 194, "usage_type": "call"}, {"api_name": "sympl.DataArray", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 195, "usage_type": "call"}, {"api_name": "sympl.set_direction_names", "line_number": 197, "usage_type": "call"}, {"api_name": "sympl.set_direction_names", "line_number": 201, "usage_type": "call"}, {"api_name": "sympl.combine_dimensions", "line_number": 204, "usage_type": "call"}, {"api_name": "sympl.combine_dimensions", "line_number": 209, "usage_type": "call"}, {"api_name": "sympl.combine_dimensions", "line_number": 214, "usage_type": "call"}, {"api_name": "sympl.DataArray", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 219, "usage_type": "call"}, {"api_name": "sympl.DataArray", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 220, "usage_type": "call"}, {"api_name": "sympl.combine_dimensions", "line_number": 221, "usage_type": "call"}, {"api_name": "sympl.combine_dimensions", "line_number": 226, "usage_type": "call"}, {"api_name": "sympl.combine_dimensions", "line_number": 237, "usage_type": "call"}, {"api_name": "pytest.main", "line_number": 248, "usage_type": "call"}]} +{"seq_id": "260526311", "text": "#/usr/local/bin/python3\nimport ase.io as io\nfrom ase.build import cut\nfrom ase.spacegroup import crystal\n\na = 9.04\nskutterudite = crystal(('Co', 'Sb'),\n basis=[(0.25, 0.25, 0.25), (0.0, 0.335, 0.158)],\n spacegroup=204,\n cellpar=[a, a, a, 90, 90, 90])\n\n# Create a new atoms instance with Co at origo including all atoms on the\n# surface of the unit cell\ncosb3 = cut(skutterudite, origo=(0.25, 0.25, 0.25), extend=1.01)\n\n# Define the atomic bonds to show\nbondatoms = []\nsymbols = cosb3.get_chemical_symbols()\nfor i in range(len(cosb3)):\n for j in range(i):\n if (symbols[i] == symbols[j] == 'Co' and\n cosb3.get_distance(i, j) < 4.53):\n bondatoms.append((i, j))\n elif (symbols[i] == symbols[j] == 'Sb' and\n cosb3.get_distance(i, j) < 2.99):\n bondatoms.append((i, j))\n\n# Create nice-looking image using povray\nio.write('spacegroup-cosb3.pov', cosb3,\n transparent=False,\n display=False,\n run_povray=True,\n camera_type='perspective',\n canvas_width=320,\n radii=0.4,\n rotation='90y',\n bondlinewidth=0.07,\n bondatoms=bondatoms)\n", "sub_path": "LasAndClf-dev/processing_methods/plot_paper_figrues/plot_structura_figures/pov_examples/spacegroup-cosb3.py", "file_name": "spacegroup-cosb3.py", "file_ext": "py", "file_size_in_byte": 1219, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "ase.spacegroup.crystal", "line_number": 7, "usage_type": "call"}, {"api_name": "ase.build.cut", "line_number": 14, "usage_type": "call"}, {"api_name": "ase.io.write", "line_number": 29, "usage_type": "call"}, {"api_name": "ase.io", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "307399325", "text": "from django.conf.urls import url\r\n\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n\turl(r'^$', views.hrView, name=\"HR\"),\r\n\turl(r'^manuals/', views.manualsView, name='manuals'),\r\n\turl(r'^agreements/', views.usageView, name='usage'),\r\n\turl(r'^instructions/', views.instructionsView, name='instructions'),\r\n]", "sub_path": "hr/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 298, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "320218406", "text": "import os\r\nimport cv2\r\nimport csv\r\nimport joblib\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn import svm\r\nfrom sklearn import metrics\r\nfrom skimage import feature\r\nfrom skimage import exposure\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.utils import shuffle\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\n\r\n#-------------------------------- CSV treatment\r\n\r\n\"\"\"\r\n ; it changes row\r\n \\n jump lines\r\n\r\n\"\"\"\r\n\r\n\r\n\r\ndef csv_write(csv_name, number_pix):\r\n \"\"\"\r\n Wrtting head of csv\r\n number of pixels\r\n label of data\r\n \"\"\"\r\n\r\n #open csv\r\n with open(csv_name, 'w') as file:\r\n writer = csv.writer(file)\r\n\r\n #first case write label\r\n file.write(\"label;\")\r\n\r\n #write pixel + number ex: pixel50\r\n for i in range(0, number_pix):\r\n file.write(\"pixel\"+str(i)+\";\")\r\n\r\n file.write(\"\\n\")\r\n\r\n\r\n\r\n\r\ndef to_list(thresh):\r\n \"\"\"\r\n Transform picture to only 0 or 1\r\n if pix > 120 => 1\r\n else pix < 120 => 0\r\n \"\"\"\r\n\r\n data = []\r\n\r\n #We course the binarized picture right to left, top to bot\r\n for i in range(thresh.shape[0]):\r\n for j in range(thresh.shape[1]):\r\n\r\n #if value of pixel > 120 we put 1 else 0\r\n \r\n if thresh[i, j] > 120:\r\n nb = 1\r\n else:\r\n nb = 0\r\n\r\n data.append(nb)\r\n\r\n #our picture\r\n return data\r\n\r\n\r\n\r\ndef write_data_into_csv(csv_name, data, label):\r\n \"\"\"\r\n Fill the csv\r\n - lign to pixel value (0 or 1)\r\n - label number (name of label ex: 1->dog 2-> cat)\r\n \"\"\"\r\n\r\n #open csv\r\n with open(csv_name, 'a') as file:\r\n writer = csv.writer(file)\r\n\r\n #write label scored (0 cat, 1 dog)\r\n file.write(label+\";\")\r\n\r\n #for value from picture we write it\r\n for i in data:\r\n file.write(str(i)+\";\")\r\n\r\n file.write(\"\\n\")\r\n\r\n\r\n\r\n#-------------------------------- Picture treatment\r\n\r\ndef open_picture(image):\r\n \"\"\"\r\n Open picture\r\n \"\"\"\r\n\r\n img = cv2.imread(image)\r\n return img\r\n\r\ndef show_picture(name, image, mode, destroy):\r\n \"\"\"\r\n Show picture\r\n mode 0 -> input a key\r\n mode 1 -> it display and unindisplay all 0.1 sec\r\n destroy = y -> windows destroy\r\n \"\"\"\r\n\r\n cv2.imshow(name, image)\r\n\r\n #It wait an entrance for pass to next\r\n cv2.waitKey(mode)\r\n\r\n #It wait x times to pass to next\r\n if mode == 1:\r\n time.sleep(0.1)\r\n\r\n #It destroy the current window\r\n if destroy == \"y\":\r\n cv2.destroyAllWindows()\r\n\r\n\r\ndef blanck_picture(img):\r\n \"\"\"\r\n Create a empty black picture\r\n \"\"\"\r\n #Make a picture: width, height and channel in unint8\r\n blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8)\r\n #Make picture 0 to width 0 to height black\r\n blank_image[0:img.shape[0], 0:img.shape[1]] = 0, 0, 0\r\n return blank_image\r\n\r\n\r\n\r\ndef make_contours(img):\r\n \"\"\"\r\n Gray one channel\r\n thresh binarize gray\r\n find the contour with RETR (for binarized picture)\r\n EXTER (all points)\r\n filled it for a filled contour\r\n \"\"\"\r\n\r\n #Create a black picture\r\n blanck = blanck_picture(img)\r\n\r\n #one channel\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\n #make threshold filter\r\n _,thresh = cv2.threshold(gray,245,255,cv2.THRESH_BINARY_INV)\r\n\r\n #Search contours from thresh\r\n contours,h=cv2.findContours(thresh, cv2.RETR_EXTERNAL,\r\n cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n #Filled contours to white on our black empty picture\r\n for cnts in contours:\r\n cv2.fillPoly(blanck, pts=[cnts], color=(255,255,255))\r\n\r\n return blanck\r\n\r\n\r\ndef take_max_contour_to_csv(blanck, width, height):\r\n \"\"\"\r\n Recup the max contour (imutil function better)\r\n and redraw it on a new picture\r\n \"\"\"\r\n #show_picture(\"blanck\", blanck, 0, \"\")\r\n #Grayscal picture for one channel\r\n grayblanck = cv2.cvtColor(blanck, cv2.COLOR_BGR2GRAY)\r\n\r\n #Find contours\r\n contours,h=cv2.findContours(grayblanck,cv2.RETR_EXTERNAL,\r\n cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n #max cnt; bad picture so pass;\r\n maxi = 0; non = False;\r\n\r\n\r\n #ici\r\n blanck1 = blanck_picture(blanck)\r\n\r\n #take the max contour\r\n for cnts in contours:\r\n\r\n #but if area of contour > 11000: bad background !\r\n if cv2.contourArea(cnts) > 11000:\r\n non = True\r\n\r\n else:\r\n if cv2.contourArea(cnts) > maxi:\r\n maxi = cv2.contourArea(cnts)\r\n\r\n #Not a good picture if True\r\n if non is False:\r\n\r\n #Draw the max contour on a black picture\r\n blanck1 = blanck_picture(blanck)\r\n for cnts in contours:\r\n if cv2.contourArea(cnts) == maxi:\r\n cv2.fillPoly(blanck1, pts =[cnts], color=(255,255,255))\r\n x, y, w, h = cv2.boundingRect(cnts)\r\n \r\n blanck1 = blanck1 [y:y+h, x:x+w]\r\n blanck1 = cv2.cvtColor(blanck1, cv2.COLOR_BGR2GRAY)\r\n blanck1 = cv2.resize(blanck1, (int(width), int(height)))\r\n\r\n return blanck1\r\n else : return None \r\n\r\n\r\ndef picture_treatment(csv_name, picture, w, h, label):\r\n \"\"\"\r\n We open picture,\r\n resize it\r\n draw contours from a binarized function\r\n re draw it for the max contour (only one contour)\r\n transform it to 0 and 1\r\n wirte it on csv file\r\n \"\"\"\r\n\r\n\r\n img = open_picture(picture)\r\n #(\"dza\", img, 0, \"\")\r\n\r\n blanck = make_contours(img)\r\n blanck1 = take_max_contour_to_csv(blanck, w, h)\r\n\r\n if blanck1 is not None:\r\n data = to_list(blanck1)\r\n write_data_into_csv(csv_name, data, str(label))\r\n\r\n\r\n\r\n#-------------------------------- Model treatment\r\n\r\ndef csv_to_list(csv_name):\r\n \"\"\"\r\n Recup data from csv\r\n we only take dataframe[1:]\r\n because dataframe[1] is the label\r\n we add data on list\r\n X for 0 or 1 pix of picture\r\n Y for label\r\n \"\"\"\r\n\r\n file = open(csv_name, 'r')\r\n\r\n #dataframe[1] == label !\r\n dataframe = file.readlines()\r\n dataframe = dataframe[1:]\r\n\r\n X = []; Y = [];\r\n\r\n for i in dataframe:\r\n\r\n #it serve to make a list of list it's a work list\r\n liste_w = []\r\n\r\n for j in i:\r\n #if element is a jump add\r\n #work list to final list and\r\n #reinitialize it\r\n if j == \"\\n\":\r\n X.append(liste_w)\r\n liste_w = []\r\n\r\n else:\r\n #pass if it's a delemiter or \" \"\r\n if j == \";\" or j == \" \":\r\n pass\r\n\r\n else:\r\n #If we put a str(label)\r\n try:\r\n j = int(j)\r\n liste_w.append(int(j))\r\n except:\r\n liste_w.append(str(j))\r\n\r\n #Here we recup label\r\n for i in X:\r\n Y.append(i[0])\r\n del i[0]\r\n\r\n return X, Y\r\n\r\n\r\n\r\ndef training(X, Y, model_name):\r\n \"\"\"\r\n We define train data and test data\r\n We call SVC who's linear function\r\n We define model name\r\n And make the prediction\r\n \"\"\"\r\n\r\n #define test and train data\r\n X_train, Y_train = X, Y\r\n X_test,Y_test = X, Y\r\n\r\n #call SVC function\r\n model = svm.SVC(kernel=\"linear\",C=2)\r\n #model = KNeighborsClassifier(n_neighbors=3)\r\n #fit method\r\n\r\n\r\n model.fit(X_train,Y_train)\r\n\r\n #create model\r\n joblib.dump(model, model_name)\r\n\r\n #predict it!\r\n predictions = model.predict(X_test)\r\n\r\n print(\"Score\", metrics.accuracy_score(Y_test, predictions))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#A IMPORTER\r\n\r\ndef head_writting(csv_name, number_pix):\r\n\r\n #Write header of csv\r\n csv_write(csv_name, number_pix)\r\n\r\n\r\ndef picture_writting(csv_name, path_folder, path_picture, w, h, label):\r\n\r\n liste = os.listdir(path_folder)\r\n for i in liste:\r\n path_picture = path_folder + \"/\" + str(i)\r\n print(path_picture)\r\n picture_treatment(csv_name, path_picture, w, h, label)\r\n\r\n\r\ndef train(csv_name, model_name):\r\n\r\n #Recup data from csv\r\n X, Y = csv_to_list(csv_name)\r\n\r\n #And make a model\r\n training(X, Y, model_name)\r\n\r\n", "sub_path": "training/training.py", "file_name": "training.py", "file_ext": "py", "file_size_in_byte": 8357, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "csv.writer", "line_number": 35, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 85, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 105, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 116, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 119, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 135, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 155, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 155, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 158, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY_INV", "line_number": 158, "usage_type": "attribute"}, {"api_name": "cv2.findContours", "line_number": 161, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 161, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 162, "usage_type": "attribute"}, {"api_name": "cv2.fillPoly", "line_number": 166, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 178, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 178, "usage_type": "attribute"}, {"api_name": "cv2.findContours", "line_number": 181, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 181, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 182, "usage_type": "attribute"}, {"api_name": "cv2.contourArea", "line_number": 195, "usage_type": "call"}, {"api_name": "cv2.contourArea", "line_number": 199, "usage_type": "call"}, {"api_name": "cv2.contourArea", "line_number": 200, "usage_type": "call"}, {"api_name": "cv2.contourArea", "line_number": 208, "usage_type": "call"}, {"api_name": "cv2.fillPoly", "line_number": 209, "usage_type": "call"}, {"api_name": "cv2.boundingRect", "line_number": 210, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 213, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 213, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 214, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 311, "usage_type": "call"}, {"api_name": "sklearn.svm", "line_number": 311, "usage_type": "name"}, {"api_name": "joblib.dump", "line_number": 319, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 324, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 324, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 342, "usage_type": "call"}]} +{"seq_id": "248271150", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/taurus/qt/qtgui/util/ui.py\n# Compiled at: 2019-08-19 15:09:30\n\"\"\"utilities to load ui files for widgets\"\"\"\nfrom builtins import object\nimport os, sys, functools\nfrom taurus.external.qt import Qt\nfrom taurus.external.qt import uic\n__all__ = [\n 'loadUi',\n 'UILoadable']\n\nclass __UI(object):\n pass\n\n\ndef loadUi(obj, filename=None, path=None, with_ui=None):\n \"\"\"\n Loads a QtDesigner .ui file into the given widget.\n If no filename is given, it tries to load from a file name which is the\n widget class name plus the extension \".ui\" (example: if your\n widget class is called MyWidget it tries to find a MyWidget.ui).\n If path is not given it uses the directory where the python file which\n defines the widget is located plus a *ui* directory (example: if your widget\n is defined in a file /home/homer/workspace/taurusgui/my_widget.py then it uses\n the path /home/homer/workspace/taurusgui/ui)\n\n :param filename: the QtDesigner .ui file name [default: None, meaning\n calculate file name with the algorithm explained before]\n :type filename: str\n :param path: directory where the QtDesigner .ui file is located\n [default: None, meaning calculate path with algorithm explained\n before]\n :type path: str\n :param with_ui: if True, the objects defined in the ui file will be\n accessible as submembers of an ui member of the widget. If\n False, such objects will directly be members of the widget.\n :type with_ui: bool\n \"\"\"\n if path is None:\n obj_file = sys.modules[obj.__module__].__file__\n path = os.path.join(os.path.dirname(obj_file), 'ui')\n if filename is None:\n filename = obj.__class__.__name__ + os.path.extsep + 'ui'\n full_name = os.path.join(path, filename)\n if with_ui is not None:\n ui_obj = __UI()\n setattr(obj, with_ui, ui_obj)\n previous_members = set(dir(obj))\n uic.loadUi(full_name, baseinstance=obj)\n post_members = set(dir(obj))\n new_members = post_members.difference(previous_members)\n for member_name in new_members:\n member = getattr(obj, member_name)\n setattr(ui_obj, member_name, member)\n delattr(obj, member_name)\n\n else:\n uic.loadUi(full_name, baseinstance=obj)\n return\n\n\ndef UILoadable(klass=None, with_ui=None):\n \"\"\"\n A class decorator intended to be used in a Qt.QWidget to make its UI\n loadable from a predefined QtDesigner UI file.\n This decorator will add a :func:`loadUi` method to the decorated class and\n optionaly a property with a name given by *with_ui* parameter.\n\n The folowing example assumes the existence of the ui file\n :file:`/ui/MyWidget.ui` which is a QWidget panel with *at\n least* a QPushButton with objectName *my_button* ::\n\n from taurus.external.qt import Qt\n from taurus.qt.qtgui.util.ui import UILoadable\n\n @UILoadable\n class MyWidget(Qt.QWidget):\n\n def __init__(self, parent=None):\n Qt.QWidget.__init__(self, parent)\n self.loadUi()\n self.my_button.setText(\"This is MY button\")\n\n Another example using a :file:`superUI.ui` file in the same directory as\n the widget. The widget UI components can be accessed through the widget\n member *_ui* ::\n\n import os.path\n\n from taurus.external.qt import Qt\n from taurus.qt.qtgui.util.ui import UILoadable\n\n @UILoadable(with_ui=\"_ui\")\n class MyWidget(Qt.QWidget):\n\n def __init__(self, parent=None):\n Qt.QWidget.__init__(self, parent)\n self.loadUi(filename=\"superUI.ui\", path=os.path.dirname(__file__))\n self._ui.my_button.setText(\"This is MY button\")\n\n :param with_ui: assigns a member to the decorated class from which you\n can access all UI components [default: None, meaning no\n member is created]\n :type with_ui: str\n\n .. warning::\n the current implementation (Jul14) doesn't prevent Qt from overloading\n any members you might have defined previously by the widget object names\n from the UI file. This happens even if *with_ui* parameter is given.\n For example, if the UI contains a QPushButton with objectName\n *my_button*::\n\n @UILoadable(with_ui=\"_ui\")\n class MyWidget(Qt.QWidget):\n\n def __init__(self, parent=None):\n Qt.QWidget.__init__(self, parent)\n self.my_button = \"hello\"\n self.loadUi()\n widget = MyWidget()\n print widget.my_button\n \n\n This little problem should be solved in the next taurus version.\n \"\"\"\n if klass is None:\n return functools.partial(UILoadable, with_ui=with_ui)\n else:\n klass_name = klass.__name__\n klass_file = sys.modules[klass.__module__].__file__\n klass_path = os.path.join(os.path.dirname(klass_file), 'ui')\n\n def _loadUi(self, filename=None, path=None):\n if filename is None:\n filename = klass_name + os.path.extsep + 'ui'\n if path is None:\n path = klass_path\n return loadUi(self, filename=filename, path=path, with_ui=with_ui)\n\n klass.loadUi = _loadUi\n return klass\n\n\ndef main():\n from taurus.qt.qtgui.application import TaurusApplication\n app = TaurusApplication([], cmd_line_parser=None)\n\n @UILoadable(with_ui='ui')\n class A(Qt.QWidget):\n\n def __init__(self, parent=None):\n Qt.QWidget.__init__(self, parent)\n import taurus.qt.qtgui.panel.ui\n path = os.path.dirname(taurus.qt.qtgui.panel.ui.__file__)\n self.loadUi(filename='TaurusMessagePanel.ui', path=path)\n\n gui = A()\n gui.show()\n app.exec_()\n return\n\n\nif __name__ == '__main__':\n main()", "sub_path": "pycfiles/taurus-4.6.1-py2.7/ui.py", "file_name": "ui.py", "file_ext": "py", "file_size_in_byte": 6196, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "builtins.object", "line_number": 16, "usage_type": "name"}, {"api_name": "sys.modules", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "taurus.external.qt.uic.loadUi", "line_number": 53, "usage_type": "call"}, {"api_name": "taurus.external.qt.uic", "line_number": 53, "usage_type": "name"}, {"api_name": "taurus.external.qt.uic.loadUi", "line_number": 62, "usage_type": "call"}, {"api_name": "taurus.external.qt.uic", "line_number": 62, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 131, "usage_type": "call"}, {"api_name": "sys.modules", "line_number": 134, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path", "line_number": 135, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path", "line_number": 139, "usage_type": "attribute"}, {"api_name": "taurus.qt.qtgui.application.TaurusApplication", "line_number": 150, "usage_type": "call"}, {"api_name": "taurus.external.qt.Qt.QWidget", "line_number": 153, "usage_type": "attribute"}, {"api_name": "taurus.external.qt.Qt", "line_number": 153, "usage_type": "name"}, {"api_name": "taurus.external.qt.Qt.QWidget.__init__", "line_number": 156, "usage_type": "call"}, {"api_name": "taurus.external.qt.Qt.QWidget", "line_number": 156, "usage_type": "attribute"}, {"api_name": "taurus.external.qt.Qt", "line_number": 156, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path", "line_number": 158, "usage_type": "attribute"}, {"api_name": "taurus.external.qt.qt", "line_number": 158, "usage_type": "attribute"}, {"api_name": "taurus.external.qt", "line_number": 158, "usage_type": "name"}, {"api_name": "{'taurus.qt.qtgui.panel.ui': 'taurus.qt.qtgui.panel.ui'}", "line_number": 161, "usage_type": "call"}]} +{"seq_id": "106101673", "text": "#!/usr/bin/env python\n#coding=utf-8\n\n'''\n@author: xiang.ye\nCreated on 2011-9-13\nLast Modified By xiang.ye#renren-inc.com on 2011-9-23\n'''\n\nimport os, time, re, string, codecs, traceback\nimport logger\n\nfrom xml.dom import minidom \n\n'''\n手机报警\n'''\nalert=\"wget -q -O /dev/null \\\"http://10.22.198.81:2000/receiver?number=\"\n\n'''\n路径生成器的,保存../etc/inspector.xml中的路径参数,此类用于最终生成对应用户文件的各种路径:\n工作路径(用以保存临时生成的文件,包括task.xml)\n用户原始文件备份路径\n生成的视频文件的存储路径\n生成的缩略图文件的存储路径\n'''\nclass PathFactory():\n def __init__(self,spath,wpath,lpath,bpath,vpath,ipath):\n self.spath=spath\n self.wpath=wpath\n self.lpath=lpath\n self.bpath=bpath\n self.vpath=vpath\n self.ipath=ipath\n\n '''\n 文件名去掉扩展名\n '''\n def getpurename(self,filename):\n index=filename.rfind('.')\n if index>0 and index&1 | grep \\'Duration\\''\n f=os.popen(cmd)\n hu=f.readlines()[0]\n yuan=re.compile('Duration: (.*?),')\n hang=yuan.findall(hu)\n ye=re.split(r\":|\\.\",hang[0])\n xiang=string.atoi(ye[0])*3600+string.atoi(ye[1])*60+string.atoi(ye[2])\n return str(xiang)\n except: \n traceback.print_exc()\n return '0'\n\n def createurlinfo(self,pi):\n '''切分uid和itemcode'''\n args=pi.purename.split('_')\n userid=args[0]\n itemcode=args[1]\n #infoCode = arg[2]\n '''拼完整的video url和image url'''\n urlvfull=self.urlv+pi.tpath+pi.vidname\n urlifull=self.urli+pi.tpath+pi.imgname\n '''调用durantion.pl计算totaltime'''\n totaltime=self.getduration4f4v(pi.vidfile)\n '''0表示文件类型是flv'''\n mediatype='0'\n return UrlInfo(userid,itemcode,urlvfull,urlifull,totaltime,mediatype,self.urldomain,self.urlsuccpath,self.urlfailpath)\n\n\n'''\nUrl生成器的产物,对应于某一次task\n'''\nclass UrlInfo():\n def __init__(self, userid,itemcode,urlvfull,urlifull,totaltime,mediatype,urldomain,urlsuccpath,urlfailpath):\n self.userId=userid\n self.itemCode=itemcode\n self.outerPlayerUrl=urlvfull\n self.itemUrl=urlvfull\n self.picUrl=urlifull\n self.totalTime=totaltime\n self.mediaType=mediatype\n self.urldomain=urldomain\n self.urlsuccpath=urlsuccpath\n self.urlfailpath=urlfailpath\n\n\n'''\ntaskxml工厂,此类用于生成和解析task.xml文件,这些操作都跟status有关,经过这个类的封装之后,taskmanager就不用再关心具体逻辑,只要照着TaskInfo中的内容做就行了\n'''\nclass TaskFactory():\n\n '''初始化\n @参数: wpath:任务地址\n '''\n def __init__(self,name,wpath):\n self.name=name\n self.wpath=wpath\n\n '''\n 生成xml节点\n '''\n def createnode(self, purename, dom, nodeName, listSubName=None, listSubText=None):\n if(dom==None):\n logger.error( '['+self.name+'] ['+purename+'] xml_dom_not_exist.' )\n if(listSubName==None):\n ele=dom.createElement(nodeName)\n else:\n ele=dom.createElement(nodeName)\n for b in range(0,len(listSubName)):\n child=dom.createElement(listSubName[b])\n text=dom.createTextNode(listSubText[b])\n child.appendChild(text)\n ele.appendChild(child)\n return ele\n\n '''\n 根据status,PathInfo和UrlInfo生成task.xml文件\n '''\n def encode(self, status, pi, ui):\n try: \n impl = minidom.getDOMImplementation() \n dom = impl.createDocument(None, u'task', None)\n root = dom.documentElement\n \n statusnode = dom.createElement('status')\n statusnode.appendChild(dom.createTextNode(\"%d\"%status))\n root.appendChild(statusnode)\n \n listSubName=[u'src',u'tgt']\n\n listSubText=[pi.tskfile,pi.tsktarget]\n tfilenode=self.createnode(pi.purename,dom,u'tfile',listSubName,listSubText)\n root.appendChild(tfilenode)\n \n \n if (status == 0):\n listSubText=[pi.bakfile,pi.bakfailtarget]\n bfilenode=self.createnode(pi.purename,dom,u'bfile',listSubName,listSubText)\n root.appendChild(bfilenode)\n \n httpList = [ u'domain', u'urlpath', u'userId', u'itemCode', u'deleteType' ]\n httpInfoList = [ ui.urldomain, ui.urlfailpath, ui.userId, ui.itemCode, '4' ]\n httpnode=self.createnode(pi.purename,dom,u'http',httpList,httpInfoList)\n root.appendChild(httpnode)\n \n elif (status >= 1):\n listSubText=[pi.bakfile,pi.baksucctarget]\n bfilenode=self.createnode(pi.purename,dom,u'bfile',listSubName,listSubText)\n root.appendChild(bfilenode)\n \n listSubText=[pi.vidfile,pi.vidtarget]\n vfilenode=self.createnode(pi.purename,dom,u'vfile',listSubName,listSubText)\n root.appendChild(vfilenode)\n \n listSubText=[pi.mobfile,pi.mobtarget]\n mfilenode=self.createnode(pi.purename,dom,u'mfile',listSubName,listSubText)\n root.appendChild(mfilenode)\n \n if (status >= 2):\n listSubText=[pi.imgfile,pi.imgtarget]\n ifilenode=self.createnode(pi.purename,dom,u'ifile',listSubName,listSubText)\n root.appendChild(ifilenode)\n \n httpList = [ u'domain', u'urlpath', u'userId', u'itemCode', u'audio', u'picUrl', u'outerPlayerUrl', u'itemUrl', u'totalTime', u'mediaType' ]\n httpInfoList = [ ui.urldomain, ui.urlsuccpath, ui.userId, ui.itemCode, '0', ui.picUrl, ui.outerPlayerUrl, ui.itemUrl, ui.totalTime, ui.mediaType ]\n \n httpnode=self.createnode(pi.purename,dom,u'http',httpList,httpInfoList)\n root.appendChild(httpnode)\n \n else:\n httpList = [ u'domain', u'urlpath', u'userId', u'itemCode', u'audio', u'outerPlayerUrl', u'itemUrl', u'totalTime', u'mediaType' ]\n \n httpInfoList = [ ui.urldomain, ui.urlsuccpath, ui.userId, ui.itemCode, '1', ui.outerPlayerUrl, ui.itemUrl, ui.totalTime, ui.mediaType ]\n \n httpnode=self.createnode(pi.purename,dom,u'http',httpList,httpInfoList)\n root.appendChild(httpnode)\n \n f = open(pi.tskfile, 'w') \n writer = codecs.lookup('utf-8')[3](f) \n dom.writexml(writer, encoding='utf-8') \n writer.close() \n f.close() \n \n logger.info( '['+self.name+'] ['+pi.purename+'] tfile_encode_succ: '+ pi.tskfile )\n\n except: \n traceback.print_exc()\n logger.error( '['+self.name+'] ['+pi.purename+'] tfile_encode_fail: '+ pi.tskfile )\n\n\n '''这里是task.xml文件的例子\n \n \n 2\n \n \n \n \n \n \n \n \n \n \n \n \n \n ../data/inspector/a.avi.bak\n /data/mfs/bakup/a.avi\n \n \n ../data/inspector/a.f4v\n /data/mfs/vpool/a.f4v\n \n \n ../data/inspector/a.mp4\n /data/mfs/vpool/a.mp4\n \n \n ../data/inspector/a.jpg\n /data/mfs/vpool/a.jpg\n \n \n ../data/inspector/a/task.xml\n ../data/log/inspector/a.xml\n \n \n '''\n\n '''\n 解析task.xml文件,生成TaskInfo\n '''\n def decode(self,purename):\n filename = self.wpath+purename+'/'+'task.xml'\n try:\n xmlfile = open( filename, 'r' )\n try:\n invalid_place = 'not_a_xmlfile'\n doc = minidom.parse( xmlfile )\n root = doc.documentElement\n \n invalid_place = 'cannt_read_status'\n statusNode = root.getElementsByTagName('status')[0]\n status=int( statusNode.childNodes[0].nodeValue )\n \n bsrcfile = None\n btgtfile = None\n vsrcfile = None\n vtgtfile = None\n msrcfile = None\n mtgtfile = None\n isrcfile = None\n itgtfile = None\n \n if(status>=0):\n invalid_place = 'cannt_read_tfile'\n tfilenode = root.getElementsByTagName('tfile')[0]\n tsrcfile = tfilenode.getElementsByTagName('src')[0].childNodes[0].nodeValue\n ttgtfile = tfilenode.getElementsByTagName('tgt')[0].childNodes[0].nodeValue\n \n invalid_place = 'cannt_read_bfile'\n bfilenode = root.getElementsByTagName('bfile')[0]\n bsrcfile = bfilenode.getElementsByTagName('src')[0].childNodes[0].nodeValue\n btgtfile = bfilenode.getElementsByTagName('tgt')[0].childNodes[0].nodeValue\n \n if(status>=1):\n invalid_place = 'cannt_read_vfile'\n vfilenode = root.getElementsByTagName('vfile')[0]\n vsrcfile = vfilenode.getElementsByTagName('src')[0].childNodes[0].nodeValue\n vtgtfile = vfilenode.getElementsByTagName('tgt')[0].childNodes[0].nodeValue\n \n invalid_place = 'cannt_read_mfile'\n mfilenode = root.getElementsByTagName('mfile')[0]\n msrcfile = mfilenode.getElementsByTagName('src')[0].childNodes[0].nodeValue\n mtgtfile = mfilenode.getElementsByTagName('tgt')[0].childNodes[0].nodeValue\n \n '''\n http的解码部分\n '''\n invalid_place='cannt_read_http'\n httpnode=root.getElementsByTagName('http')[0]\n domain=httpnode.getElementsByTagName('domain')[0].childNodes[0].nodeValue\n urlpath=httpnode.getElementsByTagName('urlpath')[0].childNodes[0].nodeValue\n userId=httpnode.getElementsByTagName('userId')[0].childNodes[0].nodeValue\n itemCode=httpnode.getElementsByTagName('itemCode')[0].childNodes[0].nodeValue\n audio=httpnode.getElementsByTagName('audio')[0].childNodes[0].nodeValue\n outerPlayerUrl=httpnode.getElementsByTagName('outerPlayerUrl')[0].childNodes[0].nodeValue\n itemUrl=httpnode.getElementsByTagName('itemUrl')[0].childNodes[0].nodeValue\n totalTime=httpnode.getElementsByTagName('totalTime')[0].childNodes[0].nodeValue\n mediaType=httpnode.getElementsByTagName('mediaType')[0].childNodes[0].nodeValue\n\n if(status>=2):\n invalid_place = 'cannt_read_vfile'\n ifilenode = root.getElementsByTagName('ifile')[0]\n isrcfile = ifilenode.getElementsByTagName('src')[0].childNodes[0].nodeValue\n itgtfile = ifilenode.getElementsByTagName('tgt')[0].childNodes[0].nodeValue\n picUrl=httpnode.getElementsByTagName('picUrl')[0].childNodes[0].nodeValue\n reqs=['userId=',userId,'&','itemCode=',itemCode,'&','audio=',audio,'&','picUrl=',picUrl,'&','outerPlayerUrl=',outerPlayerUrl,'&','itemUrl=',itemUrl,'&','totalTime=',totalTime,'&','mediaType=',mediaType]\n else:\n reqs=['userId=',userId,'&','itemCode=',itemCode,'&','audio=',audio,'&','outerPlayerUrl=',outerPlayerUrl,'&','itemUrl=',itemUrl,'&','totalTime=',totalTime,'&','mediaType=',mediaType]\n\n httpreq=''.join(reqs)\n \n else:\n '''\n http的解码部分\n '''\n invalid_place='cannt_read_http'\n httpnode=root.getElementsByTagName('http')[0]\n domain=httpnode.getElementsByTagName('domain')[0].childNodes[0].nodeValue\n urlpath=httpnode.getElementsByTagName('urlpath')[0].childNodes[0].nodeValue\n userId=httpnode.getElementsByTagName('userId')[0].childNodes[0].nodeValue\n itemCode=httpnode.getElementsByTagName('itemCode')[0].childNodes[0].nodeValue\n \n deleteType=httpnode.getElementsByTagName('deleteType')[0].childNodes[0].nodeValue\n reqs=['userId=',userId,'&','itemCode=',itemCode,'&','deleteType=',deleteType]\n \n httpreq=''.join(reqs)\n \n logger.info( '['+self.name+'] ['+purename+'] tfile_decod_succ: '+ filename )\n return TaskInfo(self.wpath+purename,domain,urlpath,tsrcfile,ttgtfile,bsrcfile,btgtfile,vsrcfile,vtgtfile,msrcfile,mtgtfile,isrcfile,itgtfile,httpreq)\n \n except:\n traceback.print_exc()\n logger.warning( '['+self.name+'] ['+purename+'] tfile_decod_fail: '+ invalid_place )\n return TaskInfo(None,None,None,None,None,None,None,None,None,None,None,None,None,None)\n finally:\n xmlfile.close()\n \n except:\n traceback.print_exc()\n logger.error( '['+self.name+'] ['+purename+'] tfile_open_failed: '+ filename)\n return TaskInfo(None,None,None,None,None,None,None,None,None,None,None,None,None,None)\n\n\n'''\nTask生成器的产物,对应于某一次task\n有了这个,taskmanager就可以直接进行后续操作而不用关心编码的具体细节\n'''\nclass TaskInfo():\n def __init__(self,wwpath,domain,urlpath,tsksrcfile,tsktgtfile,baksrcfile,baktgtfile,vidsrcfile,vidtgtfile,mobsrcfile,mobtgtfile,imgsrcfile,imgtgtfile,httpreq):\n self.wwpath=wwpath\n self.domain=domain\n self.urlpath=urlpath\n self.tsksrcfile=tsksrcfile\n self.tsktgtfile=tsktgtfile\n self.baksrcfile=baksrcfile\n self.baktgtfile=baktgtfile\n self.vidsrcfile=vidsrcfile\n self.vidtgtfile=vidtgtfile\n self.mobsrcfile=mobsrcfile\n self.mobtgtfile=mobtgtfile\n self.imgsrcfile=imgsrcfile\n self.imgtgtfile=imgtgtfile\n self.httpreq=httpreq\n\n", "sub_path": "pylib_dist/taskdata.py", "file_name": "taskdata.py", "file_ext": "py", "file_size_in_byte": 18345, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "time.strftime", "line_number": 49, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 49, "usage_type": "call"}, {"api_name": "time.time", "line_number": 49, "usage_type": "call"}, {"api_name": "os.popen", "line_number": 121, "usage_type": "call"}, {"api_name": "string.atof", "line_number": 127, "usage_type": "call"}, {"api_name": "os.popen", "line_number": 134, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 136, "usage_type": "call"}, {"api_name": "re.split", "line_number": 138, "usage_type": "call"}, {"api_name": "string.atoi", "line_number": 139, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 142, "usage_type": "call"}, {"api_name": "logger.error", "line_number": 195, "usage_type": "call"}, {"api_name": "xml.dom.minidom.getDOMImplementation", "line_number": 212, "usage_type": "call"}, {"api_name": "xml.dom.minidom", "line_number": 212, "usage_type": "name"}, {"api_name": "codecs.lookup", "line_number": 270, "usage_type": "call"}, {"api_name": "logger.info", "line_number": 275, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 278, "usage_type": "call"}, {"api_name": "logger.error", "line_number": 279, "usage_type": "call"}, {"api_name": "xml.dom.minidom.parse", "line_number": 330, "usage_type": "call"}, {"api_name": "xml.dom.minidom", "line_number": 330, "usage_type": "name"}, {"api_name": "logger.info", "line_number": 411, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 415, "usage_type": "call"}, {"api_name": "logger.warning", "line_number": 416, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 422, "usage_type": "call"}, {"api_name": "logger.error", "line_number": 423, "usage_type": "call"}]} +{"seq_id": "428644383", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('unit', '0003_auto_20150713_1639'),\n ('match', '0002_challengematch_next_id'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='challengematch',\n name='policy',\n field=models.ForeignKey(default=1, verbose_name=b'\\xe6\\x88\\x98\\xe6\\x9c\\xaf', to='unit.Policy'),\n preserve_default=False,\n ),\n ]\n", "sub_path": "apps/match/migrations/0003_challengematch_policy.py", "file_name": "0003_challengematch_policy.py", "file_ext": "py", "file_size_in_byte": 548, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "365669408", "text": "import requests\nimport json\nimport csv\n\n\nclass Admin_panel_auth(object):\n\n def __init__(self, url):\n self.url = str(url)\n self.session = requests.Session()\n self.tokens = {}\n\n # Авторизация в супер-админке\n def super_admin(self, login, password):\n r = self.session.post(\n self.url + \"authorizate\",\n json={\"login\": login, \"pass\": password},\n headers={\n \"Authorization\": \"Basic OXVESTh4YTU6V1NIOHNQQ0JQTktZR2lHcml6Rmtnb3A=\"\n })\n print(r.json())\n print('Статус код: {0}'.format(r.status_code))\n if r.status_code == 200:\n print('Зашли как супер-админ!')\n token = r.json()[\"data\"][\"success\"][\"access_token\"]\n self.session.headers.update({\"Authorization\": \"Bearer \" + token})\n elif r.status_code == 404:\n print('Ресурс не найден')\n\n # Авторизация в компании\n def login_company_id(self, id):\n r = self.session.post(\n self.url + 'authorizate/company/' + str(id))\n print(r.json())\n if r.status_code == 200:\n print('Перешли в компанию {}'.format(id))\n elif r.status_code == 404:\n print('Ресурс не найден')\n return r.json()\n\n\n # Создание пользователей\n def create_user(self, file_obj):\n print(\"Создаем пользователей\")\n\n try:\n \"\"\"\n Чтение CSV файла используя класс csv.DictReader\n \"\"\"\n reader = csv.DictReader(file_obj, delimiter=',')\n for line in reader:\n data = {\n \"login\": line['login'], # Логин\n \"password\": line['password'], # Пароль\n \"phone\": line['phone'], # Телефон\n \"verified_phone\": line['verified_phone'], # Телефон подтвержден\n \"email\": line['email'], # Email\n \"verified_email\": line['verified_email'], # Email подтвержден\n \"fname\": line['fname'], # Фамилия\n \"sname\": line['sname'], # Имя\n \"chief_email\": line['chief_email'], # Email руководителя\n \"license_agree\": line['license_agree'], # Лицензионное соглашение принято\n \"is_invited\": line['is_invited'], # Пользователь приглашен\n \"is_chief\": line['is_chief'], # Пользователь является руководителем\n \"notify\": line['notify'], # Получать email-уведомления о действиях подчиненных (Если is_chief = 1)\n \"allow_skip_material\": line['allow_skip_material'],\n # Разрешить прохождение материалов в любом порядке\n \"groups\": {\n \"region\": line['region'], # Регион\n \"city\": line['city'], # Город\n \"role\": line['role'], # Роль\n \"position\": line['position'], # Должность\n \"team\": line['team'], # Команда\n \"department\": line['department'], # Департамент\n \"function\": line['function'] # Назначения\n }\n }\n r = self.session.post(\n self.url + 'user',\n json=data,\n headers=self.session.headers)\n\n if r.status_code == 422:\n print('[Error 422] Ошибка с пользователем: {0}, json= {1}'.format(line['login'], r.json()))\n continue\n else:\n print(\"Создал пользователя id: {0}\".format(r.json()['data']['id']))\n print('Создал всех пользователей из файла')\n except Exception:\n print('[Error] Что то пошло не так. Ответ сервера: {0}, json = {1}'.format(r, r.json()))\n\n\nif __name__ == \"__main__\":\n admin = Admin_panel('https://mapi.test-eq.ru/v1/')\n admin.super_admin('andreev.s@r-mades.com', 'HFnqVjcatp7D3EFss')\n admin.login_company_id('1')\n csv_path = \"users_csv.csv\"\n with open(csv_path) as f_obj:\n admin.create_user(f_obj)", "sub_path": "users/createusers.py", "file_name": "createusers.py", "file_ext": "py", "file_size_in_byte": 4671, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "requests.Session", "line_number": 10, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "249812615", "text": "from OpenSSL import crypto, SSL\nfrom socket import gethostname\nfrom pprint import pprint\nfrom time import gmtime, mktime\n\nCERT_FILE = \"ssl.crt\"\nKEY_FILE = \"ssl.key\"\n\ndef create_self_signed_cert():\n\n # create a key pair\n k = crypto.PKey()\n k.generate_key(crypto.TYPE_RSA, 1024)\n\n # create a self-signed cert\n cert = crypto.X509()\n cert.get_subject().C = \"CA\"\n cert.get_subject().ST = \"BC\"\n cert.get_subject().L = \"Vancouver\"\n cert.get_subject().O = \"Self Signed Madness\"\n cert.get_subject().OU = \"Self Signyness\"\n cert.get_subject().CN = gethostname()\n cert.set_serial_number(1000)\n cert.gmtime_adj_notBefore(0)\n cert.gmtime_adj_notAfter(10*365*24*60*60)\n cert.set_issuer(cert.get_subject())\n cert.set_pubkey(k)\n cert.sign(k, 'sha1')\n\n open(CERT_FILE, \"wt\").write(\n crypto.dump_certificate(crypto.FILETYPE_PEM, cert))\n open(KEY_FILE, \"wt\").write(\n crypto.dump_privatekey(crypto.FILETYPE_PEM, k))\n\ncreate_self_signed_cert()\n", "sub_path": "tests/walkydata/generate-self-signed-certs.py", "file_name": "generate-self-signed-certs.py", "file_ext": "py", "file_size_in_byte": 993, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "OpenSSL.crypto.PKey", "line_number": 12, "usage_type": "call"}, {"api_name": "OpenSSL.crypto", "line_number": 12, "usage_type": "name"}, {"api_name": "OpenSSL.crypto.TYPE_RSA", "line_number": 13, "usage_type": "attribute"}, {"api_name": "OpenSSL.crypto", "line_number": 13, "usage_type": "name"}, {"api_name": "OpenSSL.crypto.X509", "line_number": 16, "usage_type": "call"}, {"api_name": "OpenSSL.crypto", "line_number": 16, "usage_type": "name"}, {"api_name": "socket.gethostname", "line_number": 22, "usage_type": "call"}, {"api_name": "OpenSSL.crypto.dump_certificate", "line_number": 31, "usage_type": "call"}, {"api_name": "OpenSSL.crypto", "line_number": 31, "usage_type": "name"}, {"api_name": "OpenSSL.crypto.FILETYPE_PEM", "line_number": 31, "usage_type": "attribute"}, {"api_name": "OpenSSL.crypto.dump_privatekey", "line_number": 33, "usage_type": "call"}, {"api_name": "OpenSSL.crypto", "line_number": 33, "usage_type": "name"}, {"api_name": "OpenSSL.crypto.FILETYPE_PEM", "line_number": 33, "usage_type": "attribute"}]} +{"seq_id": "82194769", "text": "# 分析得出,对于HasNationalityOf这样的关系,关系模式应该较为单一(模式数量和复杂度低)\r\n# 所以暂时先尝试该方法\r\n\r\nfrom tqdm import tqdm\r\nfrom LTP_operation import LTP\r\n\r\nltp = LTP()\r\nfile_1 = 'D:\\文件夹汇总\\项目\\军事知识图谱\\爬虫\\环球军事网\\huanqiu_1\\\\news_mixed.txt'\r\nfile_2 = 'D:\\文件夹汇总\\项目\\军事知识图谱\\\\bootstraping_extraction\\HasNationalityOf\\\\seed_triple.txt'\r\nfile_3 = 'D:\\文件夹汇总\\项目\\军事知识图谱\\\\bootstraping_extraction\\HasNationalityOf\\\\seed_sent.txt'\r\nfile_4 = 'D:\\文件夹汇总\\项目\\军事知识图谱\\\\bootstraping_extraction\\HasNationalityOf\\\\pattern.txt'\r\n\r\nfh_1 = open(file_1, 'r', encoding='UTF-8')\r\nfh_2 = open(file_2, 'a+', encoding='UTF-8')\r\nfh_3 = open(file_3, 'a+', encoding='UTF-8')\r\nfh_4 = open(file_4, 'a+', encoding='UTF-8')\r\n\r\n# 先读取所有的新闻\r\nlines_1 = fh_1.readlines()\r\ntxt = ''\r\nfor line in tqdm(lines_1):\r\n txt = txt + line.strip()\r\n\r\n# 将所有的新闻分句\r\nsents = ltp.sent_split(txt)\r\n\r\nsents_len = len(sents)\r\n\r\n\r\n# 将句子中的不必要信息删除\r\n\r\nfor i in tqdm(range(sents_len)):\r\n if '】' in sents[i]:\r\n left = int(sents[i].index('】'))\r\n length = len(sents[i])\r\n # print(left, length)\r\n # print(sent)\r\n sents[i] = sents[i][int(left + 1):]\r\n\r\n# 对每个句子进行ner,先收集大概600对实体对,\r\nfor i in tqdm(range(601, sents_len)):\r\n\r\n netags = list(ltp.ner(sents[i]))\r\n words = ltp.seg(sents[i]).split('/')\r\n # print(type(list(netags)),list(netags))\r\n if 'S-Nh' in netags and 'S-Ns' in netags:\r\n # print(words[netags.index('S-Nh')],words[netags.index('S-Ns')])\r\n fh_2.write(words[netags.index('S-Nh')])\r\n fh_2.write(' ')\r\n fh_2.write(words[netags.index('S-Ns')])\r\n fh_2.write(' ')\r\n fh_2.write(sents[i].strip())\r\n fh_2.write('\\n')\r\n if sents.index(sents[i]) == 15000:\r\n break\r\nfh_2.close()\r\n", "sub_path": "Military_KG/Bootstrapping/seed_for_HasNationalityOf.py", "file_name": "seed_for_HasNationalityOf.py", "file_ext": "py", "file_size_in_byte": 1981, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "LTP_operation.LTP", "line_number": 7, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 21, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 32, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "117092437", "text": "import webapp2\nfrom google.appengine.ext import db\nimport json\n\n# for csvs\nfrom datetime import date\nimport cStringIO\nimport csv\nimport types # for figuring out which properties of entities are primitive\n # types and thus easily updatable\n\nfrom util import Handler\n# Tasker classes\nfrom entities import InvalidIdError\nfrom entities import EntityDNEError\nfrom entities import Model\nfrom entities import Researcher\nfrom entities import Task\nfrom entities import ResponseSet\nfrom entities import Participant\n\n# USEFUL EXCEPTION DETAILS\n# db.BadKeyError A key was given to db.get() that google doesn't\n# recognize. Shouldn't happen, and indicates a major\n# problem with Model.get_by_id().\n# db.KindError A key was given to db.get() of a kind that google\n# doesn't recognize. See above.\n# ValueError Raised by json.loads() when it encounters invalid\n# json syntax, among other things.\n# TypeError Raised by json.loads() when you give it something\n# other than a string, among other things.\n# InvalidIdError Badly formed tasker id, see entities.py.\n# EntityDNEError Raised by Model.get_by_id() if db.get() returns\n# None.\n\n\nclass ApiHandler(Handler):\n def out(self, success=False, message='', data=''):\n \"\"\"Write a standardized json message in response to the api call.\"\"\"\n self.write_json({'success': success, 'message': message, 'data': data})\n\n def exception(self, e):\n \"\"\"Convenient way to put out exception messages.\"\"\"\n self.out(message=\"{}: {}\".format(e.__class__.__name__, e))\n\n\nclass ApiRecordParticipantHandler(ApiHandler):\n def get(self):\n \"\"\"Save a dictionary to the participant's attribute blob.\n\n Is not intelligent about preserving keys of nested dictionaries, it\n just overwrites root-level keys.\"\"\"\n json_string = self.request.get('json')\n if json_string == '':\n self.out(message=\"No json data.\")\n return\n\n try:\n json_data = json.loads(json_string)\n except (ValueError, TypeError):\n self.out(message=\"Invalid json syntax.\")\n return\n\n if type(json_data) is not dict:\n self.out(message=\"JSON data must be a dictionary.\")\n return\n\n if u'pid' not in json_data:\n self.out(message=\"Missing pid.\")\n return\n\n try:\n pid = json_data[u'pid']\n p = Participant.get_by_id(pid)\n except (db.BadArgumentError, db.BadKeyError, db.KindError,\n InvalidIdError, EntityDNEError) as e:\n self.exception(e)\n return\n\n # mix the incoming data with existing data, overwriting existing keys\n for k, v in json_data.items():\n p.attr_dict(k, v)\n p.put()\n\n self.out(success=True)\n\n def post(self):\n \"\"\"Same as the get method above, but for POST. No json involved.\"\"\"\n keys = self.request.arguments()\n if u'pid' not in keys:\n self.out(message=\"pid required\")\n return\n pid = self.request.get(u'pid')\n try:\n pid = self.request.get(u'pid')\n p = Participant.get_by_id(pid)\n except (db.BadArgumentError, db.BadKeyError, db.KindError,\n InvalidIdError, EntityDNEError) as e:\n self.exception(e)\n return\n for key in keys:\n p.attr_dict(key, self.request.get(key))\n p.put()\n self.out(success=True)\n\n\nclass ApiGetHandler(ApiHandler):\n def get(self, ids):\n entities = []\n for id in ids.split(','):\n try:\n entity = Model.get_by_id(id)\n except (db.BadKeyError, InvalidIdError, EntityDNEError) as e:\n self.exception(e)\n return\n if not entity.active:\n self.out(message=\"This entity is inactive.\")\n return\n else:\n entities.append(entity)\n\n if len(entities) == 1:\n to_return = entities[0].dict()\n else:\n to_return = [e.dict() for e in entities]\n\n self.out(success=True, data=to_return)\n\n\nclass ApiUpdateHandler(ApiHandler):\n \"\"\"Allows primitive data types of entities to be modified.\"\"\"\n updatableTypes = [\n types.BooleanType,\n types.IntType,\n types.StringType,\n types.UnicodeType,\n ]\n\n def get(self, id):\n try:\n entity = Model.get_by_id(id)\n except (db.BadKeyError, InvalidIdError, EntityDNEError) as e:\n self.exception(e)\n return\n if not entity.active:\n self.out(message=\"This entity is inactive.\")\n return\n\n changes_made = False\n for key in self.request.arguments():\n if not hasattr(entity, key):\n self.out(message=\"Property does not exist: {}.\".format(key))\n return\n prop_type = type(getattr(entity, key))\n if prop_type in self.updatableTypes:\n coerced_value = prop_type(self.request.get(key))\n setattr(entity, key, coerced_value)\n changes_made = True\n else:\n self.out(message=\"Cannot update non-primitive property: {}.\".format(key))\n return\n\n if changes_made:\n entity.put()\n\n self.out(success=True, data=entity.dict())\n\n\n# @todo: make all the exception handling like this\nclass ApiListHandler(ApiHandler):\n def get(self, kind, id=None):\n r = Researcher.get_current()\n if kind == 'tasks' or kind == 'response_sets':\n self.out(success=True, data=[o.dict() for o in getattr(r, kind)()])\n elif kind == 'task_participants':\n try:\n t = Task.get_by_id(id)\n except (db.BadArgumentError, db.BadKeyError, db.KindError,\n InvalidIdError, EntityDNEError) as e:\n self.exception(e)\n return\n self.out(success=True, data=[p.dict() for p in t.participants()])\n elif kind == 'response_set_participants':\n try:\n rs = ResponseSet.get_by_id(id)\n except (db.BadArgumentError, db.BadKeyError, db.KindError) as e:\n self.exception(e)\n return\n self.out(success=True, data=[p.dict() for p in rs.participants()])\n else:\n self.out(message=\"Bad kind to get: {}.\".format(kind))\n\n\n# @todo: make a generic csv handler that this responseset- and task-specific\n# one uses\nclass ApiCsvHandler(ApiHandler):\n def get(self, id):\n try:\n entity = Model.get_by_id(id)\n except (db.BadKeyError, InvalidIdError, EntityDNEError):\n self.out(message=\"Invalid id: {}.\".format(id))\n return\n if not entity.active:\n self.out(message=\"The entity is inactive.\")\n return\n if not hasattr(entity, 'csv'):\n self.out(message=\"Entity of class {} has no csv method.\".format(\n entity.__class__.__name__))\n return\n\n # from here assume everything is valid\n data = entity.csv()\n filename = '{}_{}_{}.csv'.format(\n entity.__class__.__name__, entity.name, date.today())\n\n # Set up headers for browser to correctly recognize the file\n self.response.headers['Content-Type'] = 'text/csv'\n c_dip = 'attachment; filename=\"{}\"'.format(filename)\n self.response.headers['Content-Disposition'] = c_dip\n\n # write the csv to a file like string\n csv_file = cStringIO.StringIO()\n csv_writer = csv.writer(csv_file)\n\n # add data\n for row in data:\n csv_writer.writerow(row)\n\n # Emit the files directly to HTTP response stream\n self.write(csv_file.getvalue())\n\n\nclass ApiCreateHandler(ApiHandler):\n def get(self, kind):\n if kind == 'task':\n url = self.request.get('url')\n name = self.request.get('name')\n if name == '' or url == '':\n self.out(message=\"Invalid parameters.\")\n return\n\n t = Task.create(name=name, url=url)\n self.out(success=True, data=t.dict())\n\n elif kind == 'response_set':\n name = self.request.get('name')\n task_id = self.request.get('task_id')\n if name == '' or task_id == '':\n self.out(message=\"Invalid parameters.\")\n return\n\n try:\n t = Task.get_by_id(task_id)\n except (db.BadKeyError, db.KindError, InvalidIdError,\n EntityDNEError):\n self.out(message=\"Invalid task_id: {}.\".format(task_id))\n return\n\n if not t.active:\n self.out(message=\"Task is inactive.\")\n return\n\n rs = ResponseSet.create(name=name, task=t)\n self.out(success=True, data=rs.dict())\n\n elif kind == 'participant':\n rs_id = self.request.get('response_set_id')\n if rs_id == '':\n self.out(message=\"Invalid parameters.\")\n return\n\n try:\n rs = ResponseSet.get_by_id(rs_id)\n except (db.BadKeyError, db.KindError, InvalidIdError,\n EntityDNEError):\n self.out(message=\"Invalid response_set_id: {}.\".format(rs_id))\n return\n\n if rs.active:\n p = Participant.create(response_set=rs)\n self.out(success=True, data=p.dict())\n else:\n self.out(message=\"Response set is inactive.\")\n\n else:\n self.out(message=\"Invalid kind to create: {}.\".format(kind))\n\n\nclass ApiDeleteHandler(ApiHandler):\n def get(self, id):\n try:\n entity = Model.get_by_id(id)\n except (db.BadKeyError, InvalidIdError, EntityDNEError):\n self.out(message=\"Invalid id: {}.\".format(id))\n return\n\n Klass = entity.__class__\n if Klass in [Task, ResponseSet, Participant]:\n Klass.delete(entity)\n success = True\n message = ''\n else:\n success = False\n message = \"\"\"Only Tasks, ResponseSets, and Participants allowed.\n Provided id was of class {}.\"\"\".format(Klass.__name__)\n\n self.out(success=success, message=message)\n\n\nclass ApiShareHandler(ApiHandler):\n \"\"\"Manage permissions of various tasks and response sets.\"\"\"\n def get(self, add_or_remove, share_what_id, share_with_id_or_email):\n add = add_or_remove == 'share'\n remove = add_or_remove == 'unshare'\n\n # load the entity to be shared\n try:\n entity = Model.get_by_id(share_what_id)\n except (db.BadKeyError, db.KindError, InvalidIdError,\n EntityDNEError):\n self.out(message=\"Invalid id: {}.\".format(share_what_id))\n return\n\n # figure out what kind of entity it is\n if entity.__class__ == Task:\n task = entity\n response_sets = task.response_sets()\n elif entity.__class__ == ResponseSet:\n task = None\n response_sets = [entity]\n else:\n self.out(message=\"Only tasks and response sets can be shared.\")\n return\n\n # figure out the reseracher to share with\n try:\n if '@' in share_with_id_or_email:\n message = \"No researcher with email {} could be found.\".format(\n share_with_id_or_email)\n share_with_r = Researcher.get_by_email(share_with_id_or_email)\n else:\n message = \"Invalid id: {}.\".format(share_with_id_or_email)\n share_with_r = Researcher.get_by_id(share_with_id_or_email)\n except (db.BadKeyError, db.KindError, InvalidIdError,\n EntityDNEError):\n self.out(message=message)\n return\n\n # make sure the thing being shared is owned by the current user,\n # i.e. has rights to share\n if task:\n if task.owner != Researcher.get_current():\n message = \"You do not own this task and cannot share it.\"\n self.out(success=False, message=message)\n return\n else:\n if response_sets[0].owner != Researcher.get_current():\n message = \"You do not own this response set and cannot share it.\"\n self.out(success=False, message=message)\n return\n\n # everything is ok, actually do the sharing (or unsharing)\n if task:\n if add:\n share_with_r.add_task(task)\n if remove:\n share_with_r.remove_task(task)\n for rs in response_sets:\n if add:\n share_with_r.add_response_set(rs)\n if remove:\n share_with_r.remove_response_set(rs)\n share_with_r.put()\n\n # I would re-fetch the entity to get it's updated version, but the\n # gae datastore doesn't respond that fast. So a custom, in-memory\n # version is actually the most accurate\n data = entity.dict()\n r_id = share_with_r.id()\n if add and r_id not in data['assc_researcher_ids']:\n data['assc_researcher_ids'].append(share_with_r.id())\n if remove and r_id in data['assc_researcher_ids']:\n data['assc_researcher_ids'].remove(share_with_r.id())\n self.out(success=True, message='', data=data)\n\n\napp = webapp2.WSGIApplication([\n ('/api/get/(.*)', ApiGetHandler),\n ('/api/list/([^/]*)$', ApiListHandler),\n ('/api/list/(.*?)/(.*)', ApiListHandler),\n ('/api/update/(.*)', ApiUpdateHandler),\n ('/api/csv/(.*)', ApiCsvHandler),\n ('/api/create/(.*)', ApiCreateHandler),\n ('/api/delete/(.*)', ApiDeleteHandler),\n ('/api/record?.*', ApiRecordParticipantHandler),\n ('/api/(share|unshare)/(.*?)/(.*)', ApiShareHandler),\n], debug=True)\n", "sub_path": "api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 14191, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "util.Handler", "line_number": 37, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 59, "usage_type": "call"}, {"api_name": "entities.Participant.get_by_id", "line_number": 74, "usage_type": "call"}, {"api_name": "entities.Participant", "line_number": 74, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.BadArgumentError", "line_number": 75, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 75, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.BadKeyError", "line_number": 75, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db.KindError", "line_number": 75, "usage_type": "attribute"}, {"api_name": "entities.InvalidIdError", "line_number": 76, "usage_type": "name"}, {"api_name": "entities.EntityDNEError", "line_number": 76, "usage_type": "name"}, {"api_name": "entities.Participant.get_by_id", "line_number": 96, "usage_type": "call"}, {"api_name": "entities.Participant", "line_number": 96, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.BadArgumentError", "line_number": 97, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 97, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.BadKeyError", "line_number": 97, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db.KindError", "line_number": 97, "usage_type": "attribute"}, {"api_name": "entities.InvalidIdError", "line_number": 98, "usage_type": "name"}, {"api_name": "entities.EntityDNEError", "line_number": 98, "usage_type": "name"}, {"api_name": "entities.Model.get_by_id", "line_number": 112, "usage_type": "call"}, {"api_name": "entities.Model", "line_number": 112, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.BadKeyError", "line_number": 113, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 113, "usage_type": "name"}, {"api_name": "entities.InvalidIdError", "line_number": 113, "usage_type": "name"}, {"api_name": "entities.EntityDNEError", "line_number": 113, "usage_type": "name"}, {"api_name": "entities.append", "line_number": 120, "usage_type": "call"}, {"api_name": "types.BooleanType", "line_number": 133, "usage_type": "attribute"}, {"api_name": "types.IntType", "line_number": 134, "usage_type": "attribute"}, {"api_name": "types.StringType", "line_number": 135, "usage_type": "attribute"}, {"api_name": "types.UnicodeType", "line_number": 136, "usage_type": "attribute"}, {"api_name": "entities.Model.get_by_id", "line_number": 141, "usage_type": "call"}, {"api_name": "entities.Model", "line_number": 141, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.BadKeyError", "line_number": 142, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 142, "usage_type": "name"}, {"api_name": "entities.InvalidIdError", "line_number": 142, "usage_type": "name"}, {"api_name": "entities.EntityDNEError", "line_number": 142, "usage_type": "name"}, {"api_name": "entities.Researcher.get_current", "line_number": 172, "usage_type": "call"}, {"api_name": "entities.Researcher", "line_number": 172, "usage_type": "name"}, {"api_name": "entities.Task.get_by_id", "line_number": 177, "usage_type": "call"}, {"api_name": "entities.Task", "line_number": 177, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.BadArgumentError", "line_number": 178, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 178, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.BadKeyError", "line_number": 178, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db.KindError", "line_number": 178, "usage_type": "attribute"}, {"api_name": "entities.InvalidIdError", "line_number": 179, "usage_type": "name"}, {"api_name": "entities.EntityDNEError", "line_number": 179, "usage_type": "name"}, {"api_name": "entities.ResponseSet.get_by_id", "line_number": 185, "usage_type": "call"}, {"api_name": "entities.ResponseSet", "line_number": 185, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.BadArgumentError", "line_number": 186, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 186, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.BadKeyError", "line_number": 186, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db.KindError", "line_number": 186, "usage_type": "attribute"}, {"api_name": "entities.Model.get_by_id", "line_number": 199, "usage_type": "call"}, {"api_name": "entities.Model", "line_number": 199, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.BadKeyError", "line_number": 200, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 200, "usage_type": "name"}, {"api_name": "entities.InvalidIdError", "line_number": 200, "usage_type": "name"}, {"api_name": "entities.EntityDNEError", "line_number": 200, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 214, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 214, "usage_type": "name"}, {"api_name": "cStringIO.StringIO", "line_number": 222, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 223, "usage_type": "call"}, {"api_name": "entities.Task.create", "line_number": 242, "usage_type": "call"}, {"api_name": "entities.Task", "line_number": 242, "usage_type": "name"}, {"api_name": "entities.Task.get_by_id", "line_number": 253, "usage_type": "call"}, {"api_name": "entities.Task", "line_number": 253, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.BadKeyError", "line_number": 254, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 254, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.KindError", "line_number": 254, "usage_type": "attribute"}, {"api_name": "entities.InvalidIdError", "line_number": 254, "usage_type": "name"}, {"api_name": "entities.EntityDNEError", "line_number": 255, "usage_type": "name"}, {"api_name": "entities.ResponseSet.create", "line_number": 263, "usage_type": "call"}, {"api_name": "entities.ResponseSet", "line_number": 263, "usage_type": "name"}, {"api_name": "entities.ResponseSet.get_by_id", "line_number": 273, "usage_type": "call"}, {"api_name": "entities.ResponseSet", "line_number": 273, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.BadKeyError", "line_number": 274, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 274, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.KindError", "line_number": 274, "usage_type": "attribute"}, {"api_name": "entities.InvalidIdError", "line_number": 274, "usage_type": "name"}, {"api_name": "entities.EntityDNEError", "line_number": 275, "usage_type": "name"}, {"api_name": "entities.Participant.create", "line_number": 280, "usage_type": "call"}, {"api_name": "entities.Participant", "line_number": 280, "usage_type": "name"}, {"api_name": "entities.Model.get_by_id", "line_number": 292, "usage_type": "call"}, {"api_name": "entities.Model", "line_number": 292, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.BadKeyError", "line_number": 293, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 293, "usage_type": "name"}, {"api_name": "entities.InvalidIdError", "line_number": 293, "usage_type": "name"}, {"api_name": "entities.EntityDNEError", "line_number": 293, "usage_type": "name"}, {"api_name": "entities.Task", "line_number": 298, "usage_type": "name"}, {"api_name": "entities.ResponseSet", "line_number": 298, "usage_type": "name"}, {"api_name": "entities.Participant", "line_number": 298, "usage_type": "name"}, {"api_name": "entities.Model.get_by_id", "line_number": 318, "usage_type": "call"}, {"api_name": "entities.Model", "line_number": 318, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.BadKeyError", "line_number": 319, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 319, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.KindError", "line_number": 319, "usage_type": "attribute"}, {"api_name": "entities.InvalidIdError", "line_number": 319, "usage_type": "name"}, {"api_name": "entities.EntityDNEError", "line_number": 320, "usage_type": "name"}, {"api_name": "entities.Task", "line_number": 325, "usage_type": "name"}, {"api_name": "entities.ResponseSet", "line_number": 328, "usage_type": "name"}, {"api_name": "entities.Researcher.get_by_email", "line_number": 340, "usage_type": "call"}, {"api_name": "entities.Researcher", "line_number": 340, "usage_type": "name"}, {"api_name": "entities.Researcher.get_by_id", "line_number": 343, "usage_type": "call"}, {"api_name": "entities.Researcher", "line_number": 343, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.BadKeyError", "line_number": 344, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 344, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.KindError", "line_number": 344, "usage_type": "attribute"}, {"api_name": "entities.InvalidIdError", "line_number": 344, "usage_type": "name"}, {"api_name": "entities.EntityDNEError", "line_number": 345, "usage_type": "name"}, {"api_name": "entities.Researcher.get_current", "line_number": 352, "usage_type": "call"}, {"api_name": "entities.Researcher", "line_number": 352, "usage_type": "name"}, {"api_name": "entities.Researcher.get_current", "line_number": 357, "usage_type": "call"}, {"api_name": "entities.Researcher", "line_number": 357, "usage_type": "name"}, {"api_name": "webapp2.WSGIApplication", "line_number": 387, "usage_type": "call"}]} +{"seq_id": "332850097", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup\n\nimport kindlepush\n\ntry:\n import pypandoc\n long_description = pypandoc.convert('README.md', 'rst')\nexcept(IOError, ImportError):\n with open('README.md') as f:\n long_description = f.read()\n\nsetup(\n name='kindlepush',\n version=kindlepush.__version__,\n description='Automatically send your doc to your kindle without clicking the deliver button for 3G device',\n long_description=long_description,\n url='https://github.com/lord63/kindledxpush',\n author='lord63',\n author_email='lord63.j@gmail.com',\n license='MIT',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Operating System :: POSIX',\n 'Operating System :: POSIX :: Linux',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n ],\n keywords='kindle push automatically 3G',\n packages=['kindlepush'],\n install_requires=['requests', 'beautifulsoup4', 'terminal'],\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n 'kindlepush=kindlepush.kindle:main',\n ],\n },\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1259, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pypandoc.convert", "line_number": 10, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 15, "usage_type": "call"}, {"api_name": "kindlepush.__version__", "line_number": 17, "usage_type": "attribute"}]} +{"seq_id": "18916201", "text": "import httplib\nimport unittest\n\nimport mock\n\nfrom abstract_test_case import AbstractTestCase\nfrom fake_requests import mock_response\n\n\nclass TestThermostat(unittest.TestCase, AbstractTestCase):\n def setUp(self):\n self.build_client()\n\n @mock.patch('netatmo_client.client.requests')\n def test_get_thermostat_data(self, fake_requests):\n fake_requests.get = mock_response('https://api.netatmo.net/api/getthermostatsdata',\n 'get',\n None,\n httplib.OK,\n None,\n 'api', 'getthermostatsdata', 'GET.json')\n result = self.client.thermostat.get_thermostat_data()\n self.assertEqual(result['user']['mail'], 'someone@somewhere.com')\n self.assertEqual(len(result['devices']), 2)\n\n @mock.patch('netatmo_client.client.requests')\n def test_get_thermostat_data_by_device(self, fake_requests):\n device_id = 'test-device-id'\n\n def _check_data(**kwargs):\n self.assertIsNotNone(kwargs.get('params'))\n self.assertEqual(kwargs.get('params').get('device_id'), device_id)\n\n fake_requests.get = mock_response('https://api.netatmo.net/api/getthermostatsdata',\n 'get',\n _check_data,\n httplib.OK,\n None,\n 'api', 'getthermostatsdata', 'GET_{id}.json')\n\n result = self.client.thermostat.get_thermostat_data(device_id=device_id)\n self.assertEqual(result['user']['mail'], 'someone@somewhere.com')\n self.assertEqual(len(result['devices']), 1)\n\n @mock.patch('netatmo_client.client.requests')\n def test_create_schedule(self, fake_requests):\n device_id = 'test-device-id'\n module_id = 'test-module-id'\n name = 'test-name-id'\n\n def _check_form(**kwargs):\n self.assertIsNotNone(kwargs.get('data'))\n form = kwargs.get('data')\n self.assertEqual(form.get('device_id'), device_id)\n self.assertEqual(form.get('module_id'), module_id)\n self.assertEqual(form.get('name'), name)\n self.assertIsNotNone(form.get('zones'))\n self.assertIsNotNone(form.get('timetable'))\n\n fake_requests.post = mock_response('https://api.netatmo.net/api/createnewschedule',\n 'post',\n _check_form,\n httplib.OK,\n None,\n 'api', 'createnewschedule', 'POST.json')\n\n result = self.client.thermostat.create_new_schedule(device_id=device_id,\n module_id=module_id,\n name=name,\n zones=[],\n timetable=[])\n self.assertEqual(result['schedule_id'], '53a056ba55ee4f57198b4569')\n\n @mock.patch('netatmo_client.client.requests')\n def test_set_therm_point(self, fake_requests):\n device_id = 'test-device-id'\n module_id = 'test-module-id'\n setpoint_mode = 'manual'\n setpoint_endtime = 666\n setpoint_temp = 19\n\n def _check_form(**kwargs):\n self.assertIsNotNone(kwargs.get('data'))\n form = kwargs.get('data')\n self.assertEqual(form.get('device_id'), device_id)\n self.assertEqual(form.get('module_id'), module_id)\n self.assertEqual(form.get('setpoint_mode'), setpoint_mode)\n self.assertEqual(form.get('setpoint_endtime'), setpoint_endtime)\n self.assertEqual(form.get('setpoint_temp'), setpoint_temp)\n\n fake_requests.post = mock_response('https://api.netatmo.net/api/setthermpoint',\n 'post',\n _check_form,\n httplib.OK,\n None,\n 'api', 'setthermpoint', 'POST.json')\n\n self.client.thermostat.set_therm_point(device_id=device_id,\n module_id=module_id,\n setpoint_mode=setpoint_mode,\n setpoint_endtime=setpoint_endtime,\n setpoint_temp=setpoint_temp)\n\n @mock.patch('netatmo_client.client.requests')\n def test_switch_schedule(self, fake_requests):\n device_id = 'test-device-id'\n module_id = 'test-module-id'\n schedule_id = 'test-schedule-id'\n\n def _check_form(**kwargs):\n self.assertIsNotNone(kwargs.get('data'))\n form = kwargs.get('data')\n self.assertEqual(form.get('device_id'), device_id)\n self.assertEqual(form.get('module_id'), module_id)\n self.assertEqual(form.get('schedule_id'), schedule_id)\n\n fake_requests.post = mock_response('https://api.netatmo.net/api/switchschedule',\n 'post',\n _check_form,\n httplib.OK,\n None,\n 'api', 'switchschedule', 'POST.json')\n\n self.client.thermostat.switch_schedule(device_id=device_id,\n module_id=module_id,\n schedule_id=schedule_id)\n\n @mock.patch('netatmo_client.client.requests')\n def test_sync_schedule(self, fake_requests):\n device_id = 'test-device-id'\n module_id = 'test-module-id'\n\n def _check_form(**kwargs):\n self.assertIsNotNone(kwargs.get('data'))\n form = kwargs.get('data')\n self.assertEqual(form.get('device_id'), device_id)\n self.assertEqual(form.get('module_id'), module_id)\n self.assertIsNotNone(form.get('zones'))\n self.assertIsNotNone(form.get('timetable'))\n\n fake_requests.post = mock_response('https://api.netatmo.net/api/syncschedule',\n 'post',\n _check_form,\n httplib.OK,\n None,\n 'api', 'syncschedule', 'POST.json')\n\n self.client.thermostat.sync_schedule(device_id=device_id,\n module_id=module_id,\n zones=[],\n timetable=[])\n", "sub_path": "test/test_thermostat.py", "file_name": "test_thermostat.py", "file_ext": "py", "file_size_in_byte": 7069, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "unittest.TestCase", "line_number": 10, "usage_type": "attribute"}, {"api_name": "abstract_test_case.AbstractTestCase", "line_number": 10, "usage_type": "name"}, {"api_name": "fake_requests.get", "line_number": 16, "usage_type": "attribute"}, {"api_name": "fake_requests.mock_response", "line_number": 16, "usage_type": "call"}, {"api_name": "httplib.OK", "line_number": 19, "usage_type": "attribute"}, {"api_name": "mock.patch", "line_number": 14, "usage_type": "call"}, {"api_name": "fake_requests.get", "line_number": 34, "usage_type": "attribute"}, {"api_name": "fake_requests.mock_response", "line_number": 34, "usage_type": "call"}, {"api_name": "httplib.OK", "line_number": 37, "usage_type": "attribute"}, {"api_name": "mock.patch", "line_number": 26, "usage_type": "call"}, {"api_name": "fake_requests.post", "line_number": 60, "usage_type": "attribute"}, {"api_name": "fake_requests.mock_response", "line_number": 60, "usage_type": "call"}, {"api_name": "httplib.OK", "line_number": 63, "usage_type": "attribute"}, {"api_name": "mock.patch", "line_number": 45, "usage_type": "call"}, {"api_name": "fake_requests.post", "line_number": 91, "usage_type": "attribute"}, {"api_name": "fake_requests.mock_response", "line_number": 91, "usage_type": "call"}, {"api_name": "httplib.OK", "line_number": 94, "usage_type": "attribute"}, {"api_name": "mock.patch", "line_number": 74, "usage_type": "call"}, {"api_name": "fake_requests.post", "line_number": 117, "usage_type": "attribute"}, {"api_name": "fake_requests.mock_response", "line_number": 117, "usage_type": "call"}, {"api_name": "httplib.OK", "line_number": 120, "usage_type": "attribute"}, {"api_name": "mock.patch", "line_number": 104, "usage_type": "call"}, {"api_name": "fake_requests.post", "line_number": 141, "usage_type": "attribute"}, {"api_name": "fake_requests.mock_response", "line_number": 141, "usage_type": "call"}, {"api_name": "httplib.OK", "line_number": 144, "usage_type": "attribute"}, {"api_name": "mock.patch", "line_number": 128, "usage_type": "call"}]} +{"seq_id": "65301975", "text": "#!/usr/bin/env python\n'''\nWe will see\n\n'''\n\nimport rospy\nimport array\nfrom std_msgs.msg import Int64, Float64MultiArray, Float64, Float32MultiArray, Bool\n\nfrom simple_pid import PID\n\n# from camera_radar_msg.msg import fused_data\n# from camera_radar_msg.msg import fused_data_\nimport sys\nimport time\n\nclass Controls(object):\n\n\n\tdef __init__(self):\n\t\t#Initialize ROS node\n\t\trospy.init_node('test_controls', anonymous=True)\n\n\t\t#Steering\n\t\tself.BodyAngle = 0.0\n\t\tself.RadianAngle = 0.0\n\t\tself.PrevRadianAngle = 1.0\n\t\tself.RadianFactor = 57.3248\n\t\tself.MotorToBodyFactor = 36\n\t\t\n\t\t#Objects for PID class\n\t\t# self.LK2 = PID(self.Accel2Fact, self.Accel2Param, self.Accel2Constrain)\n\t\t# self.steer = PID(self.SteerFact,self.SteerParam, self.SteerConstrain)\n\t\t# self.Brake1 = PID(self.Brake1Fact, self.Brake1Param, self.Brake1Constrain)\n\t\t\n\t\t#Motor Speed \n\t\tself.AccelMotorSpeed = 1\n\t\tself.BrakeMotorSpeed = 2\n\t\tself.SteerMotorSpeed = 1\n\t\tself.ClutchMotorSpeed = 1\n\t\t\n\t\t#Speed related \n\t\tself.ObjectPresent = False\n\t\tself.ReferenceSpeed = 0\n\t\tself.CurrentSum = 0\n\t\tself.index = 0\n\t\tself.CurrentReadings = [0,0,0,0,0,0,0,0,0,0,0]\n\t\tself.CurrentValue = 0\n\n\n\t\tself.BrakeAngle = 0.0\n\t\tself.ClutchAngle = 0.0\n\t\tself.CurrentVelocity = 0.0\n\t\tself.EmergencyBrakeMotorSpeed = 0.05\n\t\tself.EmergencyBrakeAngle = 129\n\t\t# self.STOPBoardDistance = 15.0\n\n\t\t#Limiting Variables\n\t\tself.AccelMax = 70\n\t\tself.BrakeMax = 135\t\t#100 \n\t\tself.ClutchMax = 330\n\t\tself.halfClutch = 210 \t#210-200\n\n\t\tself.NormalAccel = 1\n\t\tself.ThrottlePos = 1\n\t\t\n\t\tself.Brake = Float32MultiArray()\n\t\tself.Accel = Float32MultiArray()\n\t\tself.Steer = Float32MultiArray()\n\t\tself.Clutch = Float32MultiArray()\n\t\t\n\t\tself.BrakeEmergency = Float32MultiArray()\n\n\t\tself.FBSteeringAngle = 0\n\t\tself.CurrentSteeringAngle = 0\n\t\tself.i =0\n\t\tself.CurrentSteeringAngleBuf = []\n\t\tfor self.i in range(0,100):\n\t\t\tself.CurrentSteeringAngleBuf.append(0)\n\t\tself.i = 0\n\n\t\t#Accel PID Parameters\n\t\tself.AccelKp = 6.0\n\t\tself.AccelKi = 1.2\n\t\tself.AccelKd = 1.5\n\t\tself.Accelsetpoint = self.ReferenceSpeed\n\n\t\tself.AccelPID = PID(Kp = self.AccelKp, Ki = self.AccelKi, Kd = self.AccelKd, setpoint = self.Accelsetpoint)\n\t\tself.AccelPID.output_limits = (0, 50)\n\n\t\t#Brake PID Parameters\n\t\tself.BrakeKp = -8.0 \n\t\tself.BrakeKi = -0.0\n\t\tself.BrakeKd = -5.0\n\t\tself.Brakesetpoint = self.ReferenceSpeed\n\n\t\tself.BrakePID = PID(Kp = self.BrakeKp, Ki = self.BrakeKi, Kd = self.BrakeKd, setpoint = self.Brakesetpoint)\n\t\tself.BrakePID.output_limits = (0, self.BrakeMax)\n\t\t# self.BrakePID.proportional_on_measurement = True\n\n\n\t\tif not rospy.is_shutdown():\n\t\t\t#Subscribers\n\t\t\t# self.OffsetValue = rospy.Subscriber(\"Vision/Lane_Offset\", Int64, self.cal_angle)\n\t\t\trospy.Subscriber(\"Input/Inputs\", Float64MultiArray, self.get_data , queue_size=1)\n\t\t\t# rospy.Subscriber(\"/fused_data\", fused_data_, self.update_dist)\n\t\t\trospy.Subscriber(\"/manual_speed\", Float64, self.call_manualSPeed)\n\t\t\t# rospy.Subscriber(\"/object_dis\", Float64, self.get_obj_dis)\n\t\t\t# rospy.Subscriber(\"/speedCommand\", Float64, self.get_MPCspeed, queue_size=1)\n\t\t\t# rospy.Subscriber(\"/steerCommand\", Float64, self.get_MPCsteer, queue_size=1)\n\t\t\t# rospy.Subscriber(\"/planning/gideon/emergencyFlag\", Bool, self.get_emergency, queue_size=1)\n\t\t\t#rospy.Subscriber(\"/Controls/debug\", , self.get_emergency, queue_size=1)\n\n\t\t\t#Publisher\n\t\t\tself.SteerPub = rospy.Publisher('/Output/Steering_Angle', Float32MultiArray, queue_size=1) # For steering motor\n\t\t\tself.BrakePub = rospy.Publisher('/Output/Brake_Angle', Float32MultiArray, queue_size=1) # For brake motor\n\t\t\tself.ClutchPub = rospy.Publisher('/Output/Clutch_Angle', Float32MultiArray, queue_size=1) # For brake motor\t\t\t\n\t\t\tself.AccelPub = rospy.Publisher('/Output/Speed_Angle', Float32MultiArray, queue_size=1) # For accelerator motor\n\t\t\tself.SpeedPub = rospy.Publisher('/OBD', Float64, queue_size=1) # For accelerator motor\n\n\n\tdef call_manualSPeed(self, data):\n\t\tself.ReferenceSpeed = data.data\n\t\tprint(\"manual_speed\",self.ReferenceSpeed)\n\n\tdef get_MPCspeed(self, msg):\n\t\tif (self.ObjectPresent == False):\n\t\t\tself.ReferenceSpeed = (msg.data) #Data is converted from m/s to km/h\n\t\t#\tprint(\"Velocity Status:\",self.ReferenceSpeed, self.CurrentVelocity)\n\t\telse:\n\t\t\tself.ReferenceSpeed = 0;\n\tdef get_MPCsteer(self, msg):\n\t\tself.RadianAngle = msg.data\n\n\tdef get_emergency(self, msg):\n\t\t# self.ReferenceSpeed = 0\n\t\tself.ObjectPresent = msg.data\n\t\tif self.ObjectPresent == True:\n\t\t\tself.ReferenceSpeed = 0\n\n\tdef get_data(self, data):\n\t\t# self.CurrentVelocity = data.data[5]\n\t\tself.CurrentSum = self.CurrentSum - self.CurrentReadings[self.index]\n\t\tself.CurrentValue = data.data[5]\n\t\tself.CurrentReadings[self.index] = self.CurrentValue\n\t\tself.CurrentSum = self.CurrentSum + self.CurrentValue\n\t\tself.index = (self.index+1)%10\n\t\tself.CurrentVelocity = (self.CurrentSum/10)/2\n\n\t\tself.ThrottlePos = data.data[11]\n\t\tself.BrakePot = data.data[4]\n\t\tself.OBD = Float64()\n\t\tself.OBD.data = self.CurrentVelocity / 3.6\n\t\tself.SpeedPub.publish(self.OBD)\n\n\tdef EmergencyBraking(self):\n\t\tself.Accel.data = [1, 1]\n\t\tself.AccelPub.publish(self.Accel)\n\t\ttime.sleep(0.05)\n\t\t\n\t\tprint(\"Obstacle Present, Emergency Braking ! ! !\")\n\t\tself.BrakeEmergency.data = [self.EmergencyBrakeAngle, self.EmergencyBrakeMotorSpeed]\n\t\tself.BrakePub.publish(self.BrakeEmergency)\n\t\ttime.sleep(0.05)\n\t\t\n\n\tdef cruizer(self):\n\n\t\tprint(self.ObjectPresent,self.CurrentVelocity,self.ReferenceSpeed,self.NormalAccel,int(self.BrakeAngle),self.ClutchAngle)\n\t\tif(self.ObjectPresent):\n\t\t\tprint(\"Going to Emergency \")\n\t\t\tself.EmergencyBraking()\n\t\t\t\n\t\telse:\n\t\t\tif(self.ReferenceSpeed >= 100):\t\n\t\t\t\tself.BrakeAngle = 1\n\t\t\t\tself.BrakeMotorSpeed = 1\n\t\t\t\tself.Brake.data = [self.BrakeAngle, self.BrakeMotorSpeed]\n\t\t\t\tself.BrakePub.publish(self.Brake)\n\t\t\t\ttime.sleep(0.05)\n\t\t\t\tself.NormalAccel = 1\n\t\t\t\tself.AccelMotorSpeed = 1\n\t\t\t\tself.Accel.data = [self.NormalAccel, self.AccelMotorSpeed]\n\t\t\t\tself.AccelPub.publish(self.Accel)\n\t\t\t\ttime.sleep(0.05)\n\t\t\t\tself.ClutchAngle = 1\n\t\t\t\tself.ClutchMotorSpeed = 1\n\t\t\t\tself.Clutch.data = [self.ClutchAngle, self.ClutchMotorSpeed]\n\t\t\t\tself.ClutchPub.publish(self.Clutch)\n\t\t\t\ttime.sleep(0.05)\n\t\t\t\treturn 0\n\t\n\t\t\telif(self.ReferenceSpeed <= 0):\n\t\t\t\tself.BrakeAngle = self.BrakeMax\n\t\t\t\tself.BrakeMotorSpeed = 1\n\t\t\t\tself.Brake.data = [self.BrakeAngle, self.BrakeMotorSpeed]\n\t\t\t\tself.BrakePub.publish(self.Brake)\n\t\t\t\ttime.sleep(0.05)\n\t\t\t\tself.NormalAccel = 1\n\t\t\t\tself.AccelMotorSpeed = 1\n\t\t\t\tself.Accel.data = [self.NormalAccel, self.AccelMotorSpeed]\n\t\t\t\tself.AccelPub.publish(self.Accel)\n\t\t\t\ttime.sleep(0.05)\n\t\t\t\tself.ClutchAngle = self.ClutchMax\n\t\t\t\tself.ClutchMotorSpeed = 1\n\t\t\t\tself.Clutch.data = [self.ClutchAngle, self.ClutchMotorSpeed]\n\t\t\t\tself.ClutchPub.publish(self.Clutch)\n\t\t\t\ttime.sleep(0.05)\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tself.BrakePID.auto_mode = True\n\t\t\t\tself.AccelPID.auto_mode = True\n\t\t\t\t\t\n\t\t\t\tself.AccelPID.setpoint = self.ReferenceSpeed\n\t\t\t\tself.BrakePID.setpoint = self.ReferenceSpeed\n\t\t\t\n\t\t\t\tif(1):\n\t\t\t\t\tif(self.CurrentVelocity < 8):\n\t\t\t\t\t\tself.AccelPID.output_limits = (0, 30)\n\t\t\t\t\t\tself.BrakePID.output_limits = (0, self.BrakeMax)\t\t\n\t\t\t\t\t\tself.ClutchAngle = self.halfClutch\n\t\t\t\t\t\tself.ClutchMotorSpeed = 1\n\t\t\t\t\t\tself.Clutch.data = [self.ClutchAngle, self.ClutchMotorSpeed]\n\t\t\t\t\t\tself.ClutchPub.publish(self.Clutch)\n\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.AccelPID.output_limits = (0, 60)\n\t\t\t\t\t\tself.ClutchAngle = 130\n\t\t\t\t\t\tself.ClutchMotorSpeed = 1\n\t\t\t\t\t\tself.Clutch.data = [self.ClutchAngle, self.ClutchMotorSpeed]\n\t\t\t\t\t\tself.ClutchPub.publish(self.Clutch)\n\t\t\t\t\tprint(\"Accelerating\", self.NormalAccel)\n\t\t\t\t\t\n\t\t\t\t\t# self.NormalAccel = 17 + self.AccelPID(self.CurrentVelocity)\t\t\t\t\t\n\t\t\t\t\t# if(self.NormalAccel > self.AccelMax):\n\t\t\t\t\t# \tself.NormalAccel = self.AccelMax\n\t\t\t\t\t# self.AccelMotorSpeed = 1\t\t\t\t\t\n\t\t\t\t\t# self.Accel.data = [self.NormalAccel, self.AccelMotorSpeed]\n\t\t\t\t\t# self.AccelPub.publish(self.Accel)\n\t\t\t\t\t# time.sleep(0.05)\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\tself.BrakeAngle = 100 + self.BrakePID(self.CurrentVelocity)\t\t\t\t\t\n\t\t\t\t\tif(self.BrakeAngle > self.BrakeMax):\n\t\t\t\t\t\tself.BrakeAngle = self.BrakeMax\n\t\t\t\t\tself.BrakeMotorSpeed = 1\t\t\t\t\t\n\t\t\t\t\tself.Brake.data = [int(self.BrakeAngle), self.BrakeMotorSpeed]\n\t\t\t\t\tself.BrakePub.publish(self.Brake)\n\t\t\t\t\ttime.sleep(0.05)\t\t\t\t\t\t\t\t\t\n\ndef main(args):\n\ttry:\n\t\tcontrol = Controls()\n\t\t# rate = rospy.Rate(10)\n\t\twhile (not rospy.is_shutdown()):\n\t\t\t# rate.sleep()\n\t\t\tcontrol.cruizer()\n\t\t\t\n\texcept KeyboardInterrupt():\n\t\tprint(\"Shutting down..\")\n\t\trospy.shutdown()\n\nif __name__ == '__main__':\n\tmain(sys.argv)", "sub_path": "test_controls_new.py", "file_name": "test_controls_new.py", "file_ext": "py", "file_size_in_byte": 8432, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "rospy.init_node", "line_number": 23, "usage_type": "call"}, {"api_name": "std_msgs.msg.Float32MultiArray", "line_number": 68, "usage_type": "call"}, {"api_name": "std_msgs.msg.Float32MultiArray", "line_number": 69, "usage_type": "call"}, {"api_name": "std_msgs.msg.Float32MultiArray", "line_number": 70, "usage_type": "call"}, {"api_name": "std_msgs.msg.Float32MultiArray", "line_number": 71, "usage_type": "call"}, {"api_name": "std_msgs.msg.Float32MultiArray", "line_number": 73, "usage_type": "call"}, {"api_name": "simple_pid.PID", "line_number": 89, "usage_type": "call"}, {"api_name": "simple_pid.PID", "line_number": 98, "usage_type": "call"}, {"api_name": "rospy.is_shutdown", "line_number": 103, "usage_type": "call"}, {"api_name": "rospy.Subscriber", "line_number": 106, "usage_type": "call"}, {"api_name": "std_msgs.msg.Float64MultiArray", "line_number": 106, "usage_type": "argument"}, {"api_name": "rospy.Subscriber", "line_number": 108, "usage_type": "call"}, {"api_name": "std_msgs.msg.Float64", "line_number": 108, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 116, "usage_type": "call"}, {"api_name": "std_msgs.msg.Float32MultiArray", "line_number": 116, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 117, "usage_type": "call"}, {"api_name": "std_msgs.msg.Float32MultiArray", "line_number": 117, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 118, "usage_type": "call"}, {"api_name": "std_msgs.msg.Float32MultiArray", "line_number": 118, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 119, "usage_type": "call"}, {"api_name": "std_msgs.msg.Float32MultiArray", "line_number": 119, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 120, "usage_type": "call"}, {"api_name": "std_msgs.msg.Float64", "line_number": 120, "usage_type": "argument"}, {"api_name": "std_msgs.msg.Float64", "line_number": 153, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 160, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 165, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 181, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 186, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 191, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 199, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 204, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 209, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 250, "usage_type": "call"}, {"api_name": "rospy.is_shutdown", "line_number": 256, "usage_type": "call"}, {"api_name": "rospy.shutdown", "line_number": 262, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 265, "usage_type": "attribute"}]} +{"seq_id": "626541379", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/12/24 14:48\n\nimport requests\nimport json,time\nimport os\n\n\nclass Spider():\n\n def __init__(self):\n self.url1 = \"https://{}.talk-cloud.net/ClientAPI/getconfig\"\n self.url2 = \"https://global.talk-cloud.net/ClientAPI/getserverarea\"\n self.data = {\"serial\":\"981527802\",\"userrole\":0,\"coursename\": None}\n self.server = \"2\" #输入集群几\n\n def getserver(self):\n res = requests.get(self.url2)\n res_json = json.loads(res.text)\n json_serverarealist = res_json.get(\"serverarealist\")\n coursename_list = []\n msg = \"\"\n for i in json_serverarealist:\n chinesedesc = i.get(\"chinesedesc\")\n servernames = i.get(\"serverareaname\")\n # name_server = chinesedesc+\" \"+servernames\n url_ping = servernames+\"{}.talk-cloud.net\".format(self.server)\n re_replase = str(os.popen(\"ping -n 1 %s\"%url_ping).read())\n # print(re_replase)\n ip = re_replase[(re_replase.find(\"[\") + 1):re_replase.find(\"]\")]\n msg += ip\n msg += '\\n'\n with open(\"lines\", 'w', encoding='utf-8') as f:\n f.write(msg)\n coursename_list.append(servernames)\n return coursename_list\n #\n def getconfig(self):\n msg = \"\"\n for i in self.getserver():\n time.sleep(1)\n self.data['coursename'] = i\n res = requests.get(self.url1.format(i), self.data)\n time.sleep(1)\n new_json = json.loads(res.text)\n newcour_seaddrs = new_json.get(\"newcourseaddr\")[0]\n change = newcour_seaddrs.get(\"change\")\n print(change)\n\nif __name__ == '__main__':\n res_lines = Spider()\n res_lines.getserver()\n res_lines.getconfig()\n", "sub_path": "xianlu.py", "file_name": "xianlu.py", "file_ext": "py", "file_size_in_byte": 1822, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "requests.get", "line_number": 19, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 20, "usage_type": "call"}, {"api_name": "os.popen", "line_number": 29, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 42, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 44, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 45, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "393268969", "text": "import configparser\nfrom pprint import pprint\n\ndef main():\n\n iniReader = configparser.ConfigParser(interpolation=None) # Notice! raw output\n\n iniReader.optionxform = str # Notice! upper/lower asis\n # iniReader.read('../etc/ini/directmode.ini')\n iniReader.read('../etc/summary.txt')\n\n contents = {}\n\n for section in iniReader.sections():\n contents[section] = dict(iniReader.items(section))\n\n\n pprint(contents)\n\n\n return\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "test and sample/iniReader.py", "file_name": "iniReader.py", "file_ext": "py", "file_size_in_byte": 492, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "configparser.ConfigParser", "line_number": 6, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "42557037", "text": "import torch\n\n\ndef get_all_preds(model, loader, device):\n\twith torch.no_grad():\n\t\tall_preds = torch.tensor([])\n\n\t\tfor batch_idx, (data, target) in enumerate(loader):\n\t\t\tdata, target = data.to(device), target.to(device)\n\n\t\t\tpreds = model(data)\n\t\t\tall_preds = torch.cat((all_preds, preds.data.cpu()), dim=0)\n\n\t\treturn all_preds\n", "sub_path": "helpers/helpers.py", "file_name": "helpers.py", "file_ext": "py", "file_size_in_byte": 326, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "torch.no_grad", "line_number": 5, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 6, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "376351853", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport sys\nimport os\n\nfrom distutils.core import setup\nfrom distutils.extension import Extension\nfrom Cython.Distutils import build_ext\n\nclassifiers = '''\\\nEnvironment :: Console\nDevelopment Status :: 4 - Beta\nIntended Audience :: Developers\nLicense :: OSI Approved :: License :: OSI Approved :: BSD License\nOperating System :: OS Independent\nProgramming Language :: Python\nTopic :: Multimedia :: Graphics\n'''\n\n#sys.argv.append('build_ext')\nsys.argv.extend(['sdist','--formats=gztar,zip'])\n#sys.argv.append('bdist_wininst')\n\nsetup(\n name = 'svgplotlib',\n version = '0.2',\n description = 'SVG plotting library',\n long_description = '''\\\n**svgplotlib** is a lightweight python package for creating SVG\ngraphs and charts.\n\nThe TEX package and freetype extension have been ported from matplotlib.\nCompared to matplotlib the dependency om numpy have been removed.\n\n**Highlights**\n\n * Pie chart\n * Bar chart\n * Gantt chart\n * XY plot\n * date plot\n * Support a subset of TEX syntax similar to matplotlib.\n * Inlines font glyps in SVG file for consistent results.\n * General SVG support.\n''',\n classifiers = [value for value in classifiers.split(\"\\n\") if value],\n \n author='Runar Tenfjord',\n author_email = 'runar.tenfjord@tenko.no',\n license = 'BSD',\n download_url='http://pypi.python.org/pypi/svgplotlib/',\n url = 'http://code.google.com/p/svgplotlib/',\n \n platforms = ['any'],\n requires = ['pyparsing'],\n \n ext_modules=[\n Extension(\"svgplotlib.freetype\",\n sources=[\"svgplotlib/freetype.pyx\"],\n depends=[\"svgplotlib/freetypeLib.pxd\"],\n include_dirs = ['svgplotlib','svgplotlib/include'],\n library_dirs = ['svgplotlib'],\n libraries=['freetype']),\n ],\n \n cmdclass = {'build_ext': build_ext},\n packages=['svgplotlib', 'svgplotlib.TEX'],\n package_data={'svgplotlib': ['svgplotlib.cfg', 'fonts/*.*']},\n)", "sub_path": "pypi_install_script/svgplotlib-0.2.tar/setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 2001, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "sys.argv.extend", "line_number": 21, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 21, "usage_type": "attribute"}, {"api_name": "distutils.core.setup", "line_number": 24, "usage_type": "call"}, {"api_name": "distutils.extension.Extension", "line_number": 58, "usage_type": "call"}, {"api_name": "Cython.Distutils.build_ext", "line_number": 66, "usage_type": "name"}]} +{"seq_id": "278449720", "text": "import random\nimport math\n\nfrom base_search_class import SearchAlgo\nimport config\n\n\n# Temperature decrease hyper-parameters\nDEFAULT_LIN_COOLING = 1 \nDEFAULT_GEOM_COOLING = 0.9\nDEFAULT_SLOW_COOLING = 2\n\n#Number of Iterations hyper-parameters\nDEFAULT_CONST_ITERS = 1\nDEFAULT_INC_ITERS = 100\n\n#Simulated annealing hyper-parameters\nACCEPTANCE_PARAMETER = config.ACCEPT_PROBABILITY_FACTOR\n\nclass CoolingType:\n \"\"\"\n Describes how temperature decreases with the number of steps\n completed in the algorithm.\n The new temperature depends on the current temperature.\n \"\"\"\n\n def __init__(self, type_name=\"linear\", type_factor_dict={}):\n \"\"\"\n Initialize the object\n\n Args:\n type_name : Type of function chosen.\n type_factor_dict: Hyper-parameters for the function\n \"\"\"\n\n # Shows the types of functions available\n self.dict_types = { \n \"linear\": self.cooling_linear,\n \"geometric\": self.cooling_geometric,\n \"slow_decrease\": self.cooling_slow_decr\n }\n \n self.type_fn = self.dict_types[type_name]\n self.type_factor_dict = type_factor_dict\n \n def get_new_temp(self, cur_temp):\n \"\"\"\n Calculates new temperature based on current temperature using the\n function type and parameters chosen while creating the object.\n\n Args:\n cur_temp: Current Temperature\n\n Returns new temperature.\n \"\"\"\n\n return self.type_fn(cur_temp)\n\n ## Private Functions\n\n def cooling_linear(self, cur_temp):\n \"\"\"\n Linear temperature decrease.\n\n Args:\n cur_temp: Current Temperature\n\n Returns new temperature\n \"\"\"\n\n lin_cooling = self.type_factor_dict.get('lin_cooling',\n DEFAULT_LIN_COOLING)\n return cur_temp - lin_cooling\n\n def cooling_geometric(self, cur_temp):\n \"\"\"\n Geometric temperature decrease.\n\n Args:\n cur_temp: Current Temperature\n\n Returns new temperature\n \"\"\"\n\n geom_cooling = self.type_factor_dict.get('geom_cooling',\n DEFAULT_GEOM_COOLING)\n return cur_temp*geom_cooling\n\n def cooling_slow_decr(self, cur_temp):\n \"\"\"\n Slow temperature decrease. Usually paired with 1 iteration\n per temperature. The motivation is to keep the number of\n iterations per temperature low, but decrease the temperature\n slowly to allow exploration of the state space.\n\n Args:\n cur_temp: Current Temperature\n\n Returns new temperature\n \"\"\"\n\n slow_cooling = self.type_factor_dict.get('slow_cooling', DEFAULT_SLOW_COOLING)\n return float(cur_temp) / (1 + (float(1)/(slow_cooling*cur_temp)))\n\nclass NumItersPerTempType:\n \"\"\"\n Describes how the number of iterations per temperature varies\n with the number of steps completed in the algorithm.\n The number of iterations depends on the current temperature.\n \"\"\"\n\n def __init__(self, type_name=\"constant\", type_factor_dict={}):\n \"\"\"\n Initialize the object\n\n Args:\n type_name : Type of function chosen.\n type_factor_dict: Hyper-parameters for the function.\n \"\"\"\n\n # Shows the types of functions available.\n self.dict_types = {\n \"constant\": self.num_iters_constant,\n \"increasing\": self.num_iters_inc\n }\n\n self.type_fn = self.dict_types[type_name]\n self.type_factor_dict = type_factor_dict\n\n\n def get_num_iters(self, cur_temp):\n \"\"\"\n Returns number of iterations to be performed per temperature value.\n\n Args:\n cur_temp: Current Temperature\n\n Returns number of iterations at the temperature.\n \"\"\"\n\n return self.type_fn(cur_temp)\n\n ##Private Functions\n\n def num_iters_constant(self, cur_temp):\n \"\"\"\n Constant number of iterations at any temperature.\n\n Args:\n cur_temp: Current Temperature\n\n Returns number of iterations at the temperature.\n \"\"\"\n\n const_iter = self.type_factor_dict.get('const_iter',\n DEFAULT_CONST_ITERS)\n return const_iter\n\n def num_iters_inc(self, cur_temp):\n \"\"\"\n Increasing number of iterations at any temperature.\n No. of iterations = (Proportionality constant) / (current temperature)\n\n Args:\n cur_temp: Current Temperature\n\n Returns number of iterations at the temperature.\n \"\"\"\n\n inc_iter_factor = self.type_factor_dict.get('inc_iter', \n DEFAULT_INC_ITERS)\n return int(inc_iter_factor/cur_temp)\n\n\nclass SimulatedAnnealingAlgo(SearchAlgo):\n \"\"\"\n Executes the Simulated Annealing algorithm.\n \"\"\"\n\n def __init__(self, all_features, obj_fn, start_temp=100, final_temp=0,\n num_iter_per_temp_fn=NumItersPerTempType(),\n cooling_fn=CoolingType()):\n \"\"\"\n Initialize the algorithm object.\n\n Args:\n all_features : Argument to the base class - SearchAlgo\n obj_fn : Argument to the base class - SearchAlgo\n start_temp : Starting temperature\n final_temp : Stopping temperature\n num_iter_per_temp_fn: NumItersPerTempType object\n cooling_fn : CoolingType object\n \"\"\"\n\n super(SimulatedAnnealingAlgo, self).__init__(all_features, obj_fn)\n\n # Temperature parameters\n self.start_temp = start_temp\n self.final_temp = final_temp\n self.cooling_fn = cooling_fn\n self.num_iter_per_temp_fn = num_iter_per_temp_fn\n\n # Variables for the algorithm\n self.cur_temp = None\n self.cur_state = None #encoded subset of features.\n self.cur_state_energy = None\n\n def run(self):\n \"\"\"\n Executes the simulated annealing algorithm.\n\n Returns the decoded final state (subset of features) and its score.\n \"\"\"\n\n self.initialize()\n\n # Loop executes till final_temp reached.\n self.energy_list = []\n self.temp_list = []\n self.best_energy_temp = -10\n self.best_energy = -1000\n self.best_energy_state = None\n\n # counter for printing\n counter = 0\n while(True):\n # Print every 1000 temperature iterations\n counter += 1\n if counter%1000 == 0:\n print(\"At temperature \", self.cur_temp,\n \", Current Energy: \", self.cur_state_energy,\n \" Best Energy: \", self.best_energy)\n\n if self.cur_temp <= self.final_temp:\n break\n\n # Iterations per temperature\n for cur_iter in range(self.num_iter_per_temp_fn.get_num_iters(self.cur_temp)):\n\n # Generate random neighbour\n neighbour_state = self.create_random_neighbour()\n neighbour_state_energy = self.getScore(neighbour_state)\n\n if(neighbour_state_energy > self.cur_state_energy):\n # Jump to neighbouring state if it has more energy.\n self.cur_state = neighbour_state\n self.cur_state_energy = neighbour_state_energy\n else:\n # The neighbouring state is worse-off.\n energy_diff = neighbour_state_energy - self.cur_state_energy\n \n # Jump to neighbouring state with probability.\n if random.random() < math.exp(float(energy_diff)/(ACCEPTANCE_PARAMETER*self.cur_temp)):\n self.cur_state = neighbour_state\n self.cur_state_energy = neighbour_state_energy\n\n #Get the best energy state\n if self.cur_state_energy > self.best_energy:\n self.best_energy_temp = self.cur_temp\n self.best_energy = self.cur_state_energy\n self.best_energy_state = self.cur_state\n\n if neighbour_state_energy > -0.1:\n self.energy_list.append(self.cur_state_energy)\n self.temp_list.append(self.cur_temp)\n\n self.cur_temp = self.cooling_fn.get_new_temp(self.cur_temp)\n\n print(\"Ans: \",self.cur_state_energy)\n print(\"Best: \", self.best_energy, self.best_energy_temp)\n\n return self.decodeFeatures(self.cur_state), self.cur_state_energy\n\n def get_graph(self):\n \"\"\"\n Prints the graph\n \"\"\"\n import matplotlib.pyplot as plt\n import numpy as np\n x = self.temp_list\n y = self.energy_list\n plt.plot(x,y)\n plt.xlim(self.start_temp, 0)\n plt.xlabel(\"Temperature\")\n plt.ylabel(\"Final energy value of state\")\n plt.title(\"Simulated Annealing Algo\")\n plt.legend()\n plt.show()\n\n #Private Functions\n\n def initialize(self):\n \"\"\"\n Initializes the variables of the algorithm.\n \"\"\"\n\n self.cur_temp = self.start_temp\n self.cur_state = self.create_random_initial_state()\n self.cur_state_energy = self.getScore(self.cur_state)\n\n\n def create_random_initial_state(self):\n \"\"\"\n Creates random state (random subset of features).\n\n Returns the generated random state.\n \"\"\"\n\n state = []\n\n for elem in self.all_features:\n if random.choice([True, False]):\n state.append(elem)\n\n return self.encodeFeatures(state)\n\n def create_random_neighbour(self):\n \"\"\"\n Creates random neighbour state using the current state.\n\n Returns the generated neighbouring state.\n \"\"\"\n \n current_state = self.cur_state\n\n #randint includes both endpoints.\n feature_to_flip = random.randint(0,len(current_state)-1)\n \n #generating the neighbour.\n neighbour = ''\n for i in range(len(current_state)):\n if i == feature_to_flip:\n neighbour += str((int(current_state[feature_to_flip]) + 1) % 2)\n else: \n neighbour += current_state[i]\n\n return neighbour\n\n", "sub_path": "featureselection/simulated_annealing.py", "file_name": "simulated_annealing.py", "file_ext": "py", "file_size_in_byte": 10453, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "config.ACCEPT_PROBABILITY_FACTOR", "line_number": 18, "usage_type": "attribute"}, {"api_name": "base_search_class.SearchAlgo", "line_number": 175, "usage_type": "name"}, {"api_name": "random.random", "line_number": 253, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 282, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 282, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 283, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 283, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 284, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 284, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 285, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 285, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 286, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 286, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 287, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 287, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 288, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 288, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 312, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 327, "usage_type": "call"}]} +{"seq_id": "613675276", "text": "import hashlib\nfrom typing import Union\n\n\ndef hash_160(x: bytes) -> bytes:\n return ripemd160(sha256(x))\n\ndef sha256(x: Union[bytes, str]) -> bytes:\n x = to_bytes(x, 'utf8')\n return bytes(hashlib.sha256(x).digest())\n\ndef ripemd160(x):\n try:\n md = hashlib.new('ripemd160')\n md.update(x)\n return md.digest()\n except BaseException:\n from . import ripemd\n md = ripemd.new(x)\n return md.digest()\n\ndef to_bytes(something, encoding='utf8') -> bytes:\n \"\"\"\n cast string to bytes() like object, but for python2 support it's bytearray copy\n \"\"\"\n if isinstance(something, bytes):\n return something\n if isinstance(something, str):\n return something.encode(encoding)\n elif isinstance(something, bytearray):\n return bytes(something)\n else:\n raise TypeError(\"Not a string or bytes like object\")\n\ndef base_58_encode(v: bytes) -> str:\n __b58chars = b'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'\n long_value = 0\n power_of_base = 1\n for c in v[::-1]:\n # naive but slow variant: long_value += (256**i) * c\n long_value += power_of_base * c\n power_of_base <<= 8\n result = bytearray()\n while long_value >= 58:\n div, mod = divmod(long_value, 58)\n result.append(__b58chars[mod])\n long_value = div\n result.append(__b58chars[long_value])\n # Bitcoin does a little leading-zero-compression:\n # leading 0-bytes in the input become leading-1s\n nPad = 0\n for c in v:\n if c == 0x00:\n nPad += 1\n else:\n break\n result.extend([__b58chars[0]] * nPad)\n result.reverse()\n return result.decode('ascii')\n", "sub_path": "bch/crypto.py", "file_name": "crypto.py", "file_ext": "py", "file_size_in_byte": 1705, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "typing.Union", "line_number": 8, "usage_type": "name"}, {"api_name": "hashlib.sha256", "line_number": 10, "usage_type": "call"}, {"api_name": "hashlib.new", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "423097514", "text": "# -*- coding: utf-8 -*-\n\nfrom datetime import date\nfrom datetime import timedelta\nimport calendar\n\ndef qt_dias_uteis(data, diasfrente):\n\tdias = 0;\n\tfor x in range(diasfrente):\n\t\tif(data.weekday() != 5 and data.weekday() != 6):\n\t\t\tdias = dias+1\n\t\tdata = data + timedelta(days=1)\n\treturn dias\n\n\nv_hora = float(input(\"Informe o valor da sua hora trabalhada: \"))\n\ndata_atual = date.today()\ndata_mes = date(year=data_atual.year, month=data_atual.month, day=1)\nultimo_dia_mes = calendar.monthrange(data_mes.year, data_mes.month)[1]\nsalario = v_hora * 8 * qt_dias_uteis(data_mes, ultimo_dia_mes)\n\nprint(\"Seu salário esse mês é R$ %f\" %salario)\n", "sub_path": "exercicios/estrutura_sequencial/exercicio_008.py", "file_name": "exercicio_008.py", "file_ext": "py", "file_size_in_byte": 640, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "datetime.timedelta", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 18, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 19, "usage_type": "call"}, {"api_name": "calendar.monthrange", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "505551113", "text": "from flask import flash\n\nfrom flask_app.config.mysqlconnection import connectToMySQL\n\n\nfrom flask_app.models.user import User\nclass Recipe():\n \n def __init__(self,data):\n self.id = data['id']\n self.name = data['name']\n self.date = data['date']\n self.description= data['description']\n self.instruction = data['instruction']\n self.created_at = data['created_at']\n self.updated_at = data['updated_at']\n self.minutes = data['minutes']\n self.users_id = data['users_id']\n self.user = None\n \n \n @classmethod\n def create_recipe(cls, data):\n query = \"INSERT INTO recipes (name, date, description, instruction, created_at, updated_at, minutes, users_id) VALUES (%(name)s, %(date)s, %(description)s, %(instruction)s, NOW(), NOW(), %(minutes)s, %(users_id)s);\"\n \n result = connectToMySQL('belt_schema').query_db(query,data)\n \n @classmethod\n def get_all_recipes(cls):\n query = 'SELECT * FROM recipes JOIN users ON recipes.users_id = users.id;'\n \n results = connectToMySQL ('belt_schema').query_db(query)\n \n recipes = []\n \n for item in results:\n recipe = cls(item)\n user_data = {\n 'id' : item['users_id'],\n 'first_name' : item['first_name'],\n 'last_name' : item['last_name'],\n 'email' : item ['email'],\n 'password' : item['password'],\n 'created_at' : item['users.created_at'],\n 'updated_at' : item ['users.updated_at']\n }\n recipe.user = User(user_data) \n recipes.append(recipe)\n return recipes\n \n @classmethod\n def get_recipe_by_id(cls, data):\n query = 'SELECT * FROM recipes JOIN users ON recipes.users_id = users.id WHERE recipes.id = %(id)s;'\n \n result = connectToMySQL('belt_schema').query_db(query, data)\n \n recipe = cls(result[0])\n user_data = {\n 'id' : result[0]['users_id'],\n 'first_name' : result[0]['first_name'],\n 'last_name' : result[0]['last_name'],\n 'email' : result[0]['email'],\n 'password' : result[0]['password'],\n 'created_at' : result[0]['users.created_at'],\n 'updated_at' : result[0]['users.updated_at']\n }\n recipe.user = User(user_data)\n return recipe\n \n @classmethod\n def update_recipe(cls, data):\n query = 'UPDATE recipes SET name = %(name)s, description = %(description)s, instruction = %(instruction)s, date= %(date)s WHERE id = %(id)s;'\n connectToMySQL('belt_schema').query_db(query, data)\n \n @classmethod\n def delete_recipe(cls, data):\n query = 'DELETE FROM recipes WHERE id = %(id)s;'\n connectToMySQL('belt_schema').query_db(query, data)\n \n @staticmethod\n def validate_recipe(data):\n is_valid = True\n \n if len(data['name']) < 3:\n flash(\"Firstname should be 3 to 32 characters\")\n is_valid = False\n \n \n if len(data['description']) < 3:\n flash(\"Description should be 3 to 32 characters\")\n is_valid = False\n \n if len(data['instruction']) < 3:\n flash(\"Instruction should be 3 to 32 characters\")\n is_valid = False\n \n return is_valid\n ", "sub_path": "flask_app/models/recipe.py", "file_name": "recipe.py", "file_ext": "py", "file_size_in_byte": 3440, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "flask_app.config.mysqlconnection.connectToMySQL", "line_number": 26, "usage_type": "call"}, {"api_name": "flask_app.config.mysqlconnection.connectToMySQL", "line_number": 32, "usage_type": "call"}, {"api_name": "flask_app.models.user.User", "line_number": 47, "usage_type": "call"}, {"api_name": "flask_app.config.mysqlconnection.connectToMySQL", "line_number": 55, "usage_type": "call"}, {"api_name": "flask_app.models.user.User", "line_number": 67, "usage_type": "call"}, {"api_name": "flask_app.config.mysqlconnection.connectToMySQL", "line_number": 73, "usage_type": "call"}, {"api_name": "flask_app.config.mysqlconnection.connectToMySQL", "line_number": 78, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 85, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "178519064", "text": "\"\"\"I CONTROL THE WORLD. MWAHAHAHAHA.\"\"\"\n\nimport itertools\nimport logging\nimport time\n\nfrom django.utils import simplejson \nfrom model import player as player_model\nfrom model import world as world_model\nfrom google.appengine.api import users\nfrom google.appengine.ext import db\nfrom google.appengine.ext import webapp\n\n\nclass WorldHandler(webapp.RequestHandler):\n\n def get(self):\n self._OutputWorld(self._GetWorld())\n \n def put(self):\n logging.debug(\"World put request.\")\n def txn(request_json):\n logging.debug(\"World update transaction.\")\n world = self._GetWorld()\n if self._IsRecent(request_json, world):\n world_dict = self._GetWorldDict(world)\n world_dict = self._UpdateWorldDict(world_dict)\n self._PutWorldDict(world, world_dict)\n else:\n # Status 409 Conflict\n logging.info(\"Invalid game state updated, returning conflict.\")\n self.response.set_status(409)\n return world\n\n world = db.run_in_transaction(txn, self._GetRequestJson())\n self._OutputWorld(world)\n \n def _GetRequestJson(self):\n return simplejson.loads(self.request.body)\n\n def _GetWorld(self):\n w = world_model.World.get_by_key_name(\"1\")\n if w is None:\n w = world_model.World(key_name=\"1\")\n\n assert len(w.keys) == len(w.values)\n if len(w.keys) == 0:\n w.keys.append(\"timestamp\")\n w.values.append(str(int(time.time() * 1000)))\n\n w.put()\n return w\n \n def _GetWorldDict(self, w):\n d = {}\n for key, value in itertools.izip(w.keys, w.values):\n d[key] = value\n logging.debug(\"Constructed world dictionary: %s\" % d)\n return d\n \n def _PutWorldDict(self, world, world_dict):\n logging.debug(\"_PutWorldDict - Updating world to: %s\" % world_dict)\n world.keys = []\n world.values = []\n \n for key in world_dict:\n world.keys.append(key)\n world.values.append(world_dict[key])\n \n world.put()\n \n def _IsRecent(self, request_json, world):\n \"\"\"Return true if the incoming request's payload has a timestamp that's\n equal to the timestamp of the world.\"\"\"\n world_dict = self._GetWorldDict(world)\n payload = simplejson.loads(self.request.body)\n valid = \"timestamp\" in payload and \\\n payload[\"timestamp\"] >= int(world_dict[\"timestamp\"])\n if not valid:\n logging.debug(\"Invalid request timestamp. Payload: %s; World: %s\" %\n (payload, world_dict))\n else:\n logging.debug(\"Request passed world timestamp check. Payload: %s; \"\n \"World: %s\" % (payload, world_dict))\n return valid\n \n def _UpdateWorldDict(self, world_dict):\n \"\"\"In-place updates the given world with the request's body.\"\"\"\n logging.debug(\"Updating world.\")\n payload = self._GetRequestJson()\n \n for key in payload:\n value = payload[key]\n logging.debug(\"Updating attribute '%s' to '%s'\" % (key, value))\n if value is None:\n logging.debug(\"Deleting attribute '%s'\" % key)\n del world_dict[key]\n else:\n dumped = simplejson.dumps(value)\n logging.debug(\"Writing value '%s'\" % dumped)\n world_dict[key] = dumped\n\n timestamp = str(int(time.time() * 1000))\n logging.debug(\"Updating world timestamp to %s\" % timestamp)\n world_dict[\"timestamp\"] = timestamp\n \n return world_dict\n \n def _OutputWorld(self, world):\n assert world is not None\n assert len(world.keys) == len(world.values)\n \n self.response.headers[\"Content-Type\"] = \"application/json; charset=utf-8\"\n \n self.response.out.write(\"{\")\n \n elements = []\n for key, value in itertools.izip(world.keys, world.values):\n elements.append(\"\\\"%s\\\": %s\" % (key, value))\n \n players = player_model.Player.all().fetch(1000)\n p_list = []\n for player in players:\n p_d = {\"email\": player.user.email(), \n \"nickname\": player.user.nickname(),\n \"id\": int(player.user.user_id())}\n if player.location is not None:\n p_d[\"lat\"] = player.location.lat\n p_d[\"lon\"] = player.location.lon\n p_list.append(p_d)\n elements.append(\"\\\"%s\\\": %s\" % (\"players\", simplejson.dumps(p_list)))\n \n self.response.out.write(\",\".join(elements))\n \n self.response.out.write(\"}\")", "sub_path": "controller/api/world.py", "file_name": "world.py", "file_ext": "py", "file_size_in_byte": 4230, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "google.appengine.ext.webapp.RequestHandler", "line_number": 15, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.webapp", "line_number": 15, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 23, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 31, "usage_type": "call"}, {"api_name": "google.appengine.ext.db.run_in_transaction", "line_number": 35, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 35, "usage_type": "name"}, {"api_name": "django.utils.simplejson.loads", "line_number": 39, "usage_type": "call"}, {"api_name": "django.utils.simplejson", "line_number": 39, "usage_type": "name"}, {"api_name": "model.world.World.get_by_key_name", "line_number": 42, "usage_type": "call"}, {"api_name": "model.world.World", "line_number": 42, "usage_type": "attribute"}, {"api_name": "model.world", "line_number": 42, "usage_type": "name"}, {"api_name": "model.world.World", "line_number": 44, "usage_type": "call"}, {"api_name": "model.world", "line_number": 44, "usage_type": "name"}, {"api_name": "time.time", "line_number": 49, "usage_type": "call"}, {"api_name": "itertools.izip", "line_number": 56, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 58, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 62, "usage_type": "call"}, {"api_name": "django.utils.simplejson.loads", "line_number": 76, "usage_type": "call"}, {"api_name": "django.utils.simplejson", "line_number": 76, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 80, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 83, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 89, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 94, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 96, "usage_type": "call"}, {"api_name": "django.utils.simplejson.dumps", "line_number": 99, "usage_type": "call"}, {"api_name": "django.utils.simplejson", "line_number": 99, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 100, "usage_type": "call"}, {"api_name": "time.time", "line_number": 103, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 104, "usage_type": "call"}, {"api_name": "itertools.izip", "line_number": 118, "usage_type": "call"}, {"api_name": "model.player.Player.all", "line_number": 121, "usage_type": "call"}, {"api_name": "model.player.Player", "line_number": 121, "usage_type": "attribute"}, {"api_name": "model.player", "line_number": 121, "usage_type": "name"}, {"api_name": "django.utils.simplejson.dumps", "line_number": 131, "usage_type": "call"}, {"api_name": "django.utils.simplejson", "line_number": 131, "usage_type": "name"}]} +{"seq_id": "488118504", "text": "\nimport os\nimport urllib\nimport zipfile\nimport errno\nimport pandas as pd\n#\nfrom . import url\n\ndef load_raw(year):\n \"\"\"\n Downloads the data from eCO2mix if it is not saved locally\n then reads the downloaded dataframes.\n \n :param year: The year of the data to load\n :type year: int\n :return: The selected eCO2mix data\n :rtype: pd.DataFrame\n \"\"\"\n\n fname_xls = url.fname_xls.format(year = year)\n fpath_xls = os.path.join(url.folder_raw,\n fname_xls,\n )\n try:\n if not os.path.isfile(fpath_xls): # try to download\n os.makedirs(url.folder_raw, exist_ok = True)\n fname_zip = url.fname_zip.format(year = year)\n fpath_zip = os.path.join(url.folder_raw,\n fname_zip,\n )\n url_zip = os.path.join(url.website,\n fname_zip,\n )\n urllib.request.urlretrieve(url_zip,\n fpath_zip,\n )\n with zipfile.ZipFile(fpath_zip, 'r') as zipObj:\n zipObj.extractall(os.path.dirname(fpath_xls))\n assert os.path.isfile(fpath_xls)\n df = pd.read_csv(fpath_xls,\n header = 0,\n index_col = False, \n sep = '\\t', \n encoding = 'latin-1',\n na_values = ['ND'],\n skipinitialspace = True,\n low_memory = False,\n )\n except FileNotFoundError: \n raise FileNotFoundError(\n errno.ENOENT,\n '\\nFile not found : {0}\\n'\n 'It can sbe downloaded from \\n'\n '{1}\\n'\n 'and stored in\\n'\n '{2}'.format(fname_xls,\n url.website,\n url.folder_raw,\n ))\n return df", "sub_path": "pub_data_visualization/load/load/eco2mix/load_raw/load_raw.py", "file_name": "load_raw.py", "file_ext": "py", "file_size_in_byte": 2097, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "urllib.request.urlretrieve", "line_number": 35, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 35, "usage_type": "attribute"}, {"api_name": "zipfile.ZipFile", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 41, "usage_type": "call"}, {"api_name": "errno.ENOENT", "line_number": 52, "usage_type": "attribute"}]} +{"seq_id": "192299847", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.common.exceptions import NoSuchElementException\nfrom flask import Flask, request, render_template\nimport time, datetime, os\n\norderDate = datetime.date.today() - datetime.timedelta(days=7)\n\ndef check_if_elem_exist_by_id (driver, e_id):\n try:\n driver.find_element_by_id(e_id)\n except NoSuchElementException:\n return False\n else:\n return True\ndef check_if_elem_exist_by_class_name (driver, e_class_name):\n try:\n driver.find_element_by_class_name(e_class_name)\n except NoSuchElementException:\n return False\n else:\n return True\ndef check_if_elem_exist_by_xpath (driver, e_xpath):\n try:\n driver.find_element_by_xpath(e_xpath)\n except NoSuchElementException:\n return False\n else:\n return True\n\nchromedriver = \"/usr/bin/chromedriver\"\n\ndef get_finalCode_routine(nbCodes=1):\n finalCodes = []\n\n options = webdriver.ChromeOptions()\n options.add_argument('headless')\n driver = webdriver.Chrome(executable_path=chromedriver, chrome_options=options)\n \n i = 0\n while i < int(nbCodes):\n time.sleep(0.1)\n driver.get(\"https://www.bkvousecoute.fr/\")\n\n while 1:\n time.sleep(0.1)\n\n # Get the form\n if check_if_elem_exist_by_id(driver, \"surveyEntryForm\"):\n bk_form = driver.find_element_by_id(\"surveyEntryForm\")\n elif check_if_elem_exist_by_id(driver, \"surveyForm\"):\n bk_form = driver.find_element_by_id(\"surveyForm\")\n elif check_if_elem_exist_by_class_name(driver, \"ValCode\"):\n finalCodes.append(driver.find_element_by_class_name(\"ValCode\").text)\n i += 1\n break\n \n # Fill it out\n if check_if_elem_exist_by_id(driver, \"SurveyCode\"):\n elem = driver.find_element_by_id(\"SurveyCode\")\n elem.send_keys(\"22373\") # Code restaurant = Tourcoing\n\n if check_if_elem_exist_by_id(driver, \"InputDay\"):\n if orderDate.day < 10:\n formatDay = \"0\" + str(orderDate.day)\n else:\n formatDay = str(orderDate.day)\n select = Select(driver.find_element_by_id(\"InputDay\"))\n select.select_by_value(formatDay) # Jour de la commande\n\n if check_if_elem_exist_by_id(driver, \"InputMonth\"):\n if orderDate.month < 10:\n formatMonth = \"0\" + str(orderDate.month)\n else:\n formatMonth = str(orderDate.month)\n select = Select(driver.find_element_by_id(\"InputMonth\"))\n select.select_by_value(formatMonth) # Mois de la commande\n\n if check_if_elem_exist_by_id(driver, \"InputYear\"):\n formatYear = str(orderDate.year)[-2:]\n select = Select(driver.find_element_by_id(\"InputYear\"))\n select.select_by_value(formatYear) # Année de la commande ex: 18\n\n if check_if_elem_exist_by_id(driver, \"InputHour\"):\n select = Select(driver.find_element_by_id(\"InputHour\"))\n select.select_by_value(\"13\") # Heure de la commande\n\n if check_if_elem_exist_by_id(driver, \"InputMinute\"):\n select = Select(driver.find_element_by_id(\"InputMinute\"))\n select.select_by_value(\"42\") # Minute de la commande\n\n if check_if_elem_exist_by_xpath(driver, '//*[text()=\\'Merci de sélectionner votre type de commande\\']'):\n radios = driver.find_elements_by_xpath('//*[@type=\\'radio\\']')\n radios[0].find_element_by_xpath('..').find_element_by_class_name(\"radioBranded\").click()\n \n if check_if_elem_exist_by_xpath(driver, '//*[text()=\\'Combien de personnes étiez-vous ?\\']'):\n radios = driver.find_elements_by_xpath('//*[@type=\\'radio\\']')\n radios[2].find_element_by_xpath('..').find_element_by_class_name(\"radioBranded\").click()\n\n if check_if_elem_exist_by_class_name(driver, \"InputRowOdd\"):\n for line in driver.find_elements_by_class_name(\"InputRowOdd\"):\n radios = line.find_elements_by_xpath('//*[@type=\\'radio\\']')\n radios[1].find_element_by_xpath('..').find_element_by_class_name(\"radioBranded\").click()\n\n if check_if_elem_exist_by_class_name(driver, \"InputRowEven\"):\n for line in driver.find_elements_by_class_name(\"InputRowEven\"):\n radios = line.find_elements_by_xpath('//*[@type=\\'radio\\']')\n radios[0].find_element_by_xpath('..').find_element_by_class_name(\"radioBranded\").click()\n\n if check_if_elem_exist_by_xpath(driver, '//*[text()=\\'Combiende fois êtes-vous venus chez BURGER KING\\']'):\n radios = driver.find_elements_by_xpath('//*[@type=\\'radio\\']')\n radios[0].find_element_by_xpath('..').find_element_by_class_name(\"radioBranded\").click()\n\n if check_if_elem_exist_by_xpath(driver, '//*[text()=\\'Parmi les propositions suivantes, laquelle décrit le mieux la raison de votre visite chez BURGER KING\\']'):\n radios = driver.find_elements_by_xpath('//*[@type=\\'radio\\']')\n radios[0].find_element_by_xpath('..').find_element_by_class_name(\"radioBranded\").click()\n\n if check_if_elem_exist_by_xpath(driver, '//*[text()=\\'Parmi les enseignes de restauration rapide suivantes, dans laquelle allez-vous le plus souvent ?\\']'):\n radios = driver.find_elements_by_xpath('//*[@type=\\'radio\\']')\n radios[0].find_element_by_xpath('..').find_element_by_class_name(\"radioBranded\").click()\n\n if check_if_elem_exist_by_xpath(driver, '//*[text()=\\'Afin de nous aider à mieux analyser vos réponses et mieux vous comprendre, merci de répondre à ces dernières questions\\']'):\n for elem in driver.find_elements_by_tag_name(\"select\"):\n select = Select(elem)\n select.select_by_index(2)\n\n # Submit the form\n bk_form.submit()\n\n driver.close()\n return finalCodes\n\ndef test(nbCodes=1):\n i = 0\n while i < 10:\n try:\n return get_finalCode_routine(nbCodes)\n except ConnectionResetError:\n i += 1\n continue\n else:\n break\n \n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET'])\ndef index():\n return render_template('index.html')\n\n@app.route('/', methods=['POST'])\ndef getCodes():\n return render_template('codes.html', codes=test(request.form['nbCodes']))\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "sub_path": "__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 6832, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "datetime.date.today", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 8, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 8, "usage_type": "call"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 13, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 20, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 27, "usage_type": "name"}, {"api_name": "selenium.webdriver.ChromeOptions", "line_number": 37, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 37, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 39, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 39, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 43, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 47, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.Select", "line_number": 69, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.Select", "line_number": 77, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.Select", "line_number": 82, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.Select", "line_number": 86, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.Select", "line_number": 90, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.Select", "line_number": 125, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 146, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 150, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 154, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 154, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 154, "usage_type": "name"}]} +{"seq_id": "44747828", "text": "from celery import shared_task\nimport time\nfrom .models import Snake\nfrom django.core.mail import send_mail\nimport logging\nlogger = logging.getLogger(__name__)\n\n@shared_task # декоратор shared_task превращает функцию в задачу\ndef save_snakes():\n time.sleep(2)\n snakes = Snake.objects.all()\n with open('result.txt', 'w', encoding='utf-8') as f:\n for item in snakes:\n f.write(item.name + '\\n')\n\n@shared_task\ndef send_mail_task(subject, text, email):\n # time.sleep(30)\n logger.info(f\"Вызван метод send_mail_task с параметрами:{subject}, {text_message}\")\n send_mail(subject, text,\n 'pikulev.l.v@gmail.com',\n [email],\n fail_silently=False)", "sub_path": "L26_django_users/serp/serpentarium/snakes/tasks.py", "file_name": "tasks.py", "file_ext": "py", "file_size_in_byte": 765, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "logging.getLogger", "line_number": 6, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 10, "usage_type": "call"}, {"api_name": "models.Snake.objects.all", "line_number": 11, "usage_type": "call"}, {"api_name": "models.Snake.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "models.Snake", "line_number": 11, "usage_type": "name"}, {"api_name": "celery.shared_task", "line_number": 8, "usage_type": "name"}, {"api_name": "django.core.mail.send_mail", "line_number": 20, "usage_type": "call"}, {"api_name": "celery.shared_task", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "372981924", "text": "from django.shortcuts import render\n\n# Create your views here.\nfrom django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.conf import settings\n \nfrom linebot import LineBotApi, WebhookParser\nfrom linebot.exceptions import InvalidSignatureError, LineBotApiError\nfrom linebot.models import (MessageEvent, TextSendMessage,ImageSendMessage,TemplateSendMessage,ButtonsTemplate,MessageTemplateAction,PostbackEvent,PostbackTemplateAction)\n\nfrom .scraper import stock\nfrom datetime import date\n \nline_bot_api = LineBotApi(settings.LINE_CHANNEL_ACCESS_TOKEN)\nparser = WebhookParser(settings.LINE_CHANNEL_SECRET)\n\n\n \n@csrf_exempt\ndef callback(request):\n \n if request.method == 'POST':\n signature = request.META['HTTP_X_LINE_SIGNATURE']\n body = request.body.decode('utf-8')\n \n try:\n events = parser.parse(body, signature) # 傳入的事件\n print(events)\n \n except InvalidSignatureError:\n return HttpResponseForbidden()\n \n except LineBotApiError:\n return HttpResponseBadRequest()\n \n for event in events:\n \n if isinstance(event, MessageEvent): # 如果有訊息��件\n \n if '~' not in event.message.text : \n \n global stock_search\n stock_search = stock(event.message.text) ## 導入yahoo finance api\n stock_search.stock_name()\n\n\n if stock_search.stock_name == 'AttributeError' : \n\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text = '錯誤股票代號,請重新輸入')\n )\n\n else : \n\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text = '輸入查詢期間(格式:1900-01-01~1900-01-30)')\n )\n \n else : \n ##擷取二次event內容\n start_time = event.message.text.split('~')[0]\n end_time = event.message.text.split('~')[1]\n \n try : \n date.fromisoformat(str(start_time))\n date.fromisoformat(str(end_time))\n \n stock_search.stock_pic(start_time, end_time)\n\n line_bot_api.reply_message(\n event.reply_token,\n [TextSendMessage(\n text = str(stock_search.stock_id) + stock_search.stock_name + '走勢圖'),\n ImageSendMessage(original_content_url = stock_search.pic_link,\n preview_image_url = stock_search.pic_link)\n ]## 文字與圖片回應\n )\n \n except :\n \n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text = '日期格式或值錯誤,請重新輸入')\n )\n \n \n return HttpResponse()\n else:\n return HttpResponseBadRequest()\n \n \n \n \n \n \n \n''' \n \n'''", "sub_path": "Python/LineBot/mylinebot/TWstocklinebot/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3552, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "linebot.LineBotApi", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.settings.LINE_CHANNEL_ACCESS_TOKEN", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 15, "usage_type": "name"}, {"api_name": "linebot.WebhookParser", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.settings.LINE_CHANNEL_SECRET", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 16, "usage_type": "name"}, {"api_name": "linebot.exceptions.InvalidSignatureError", "line_number": 31, "usage_type": "name"}, {"api_name": "django.http.HttpResponseForbidden", "line_number": 32, "usage_type": "call"}, {"api_name": "linebot.exceptions.LineBotApiError", "line_number": 34, "usage_type": "name"}, {"api_name": "django.http.HttpResponseBadRequest", "line_number": 35, "usage_type": "call"}, {"api_name": "linebot.models.MessageEvent", "line_number": 39, "usage_type": "argument"}, {"api_name": "scraper.stock", "line_number": 44, "usage_type": "call"}, {"api_name": "linebot.models.TextSendMessage", "line_number": 52, "usage_type": "call"}, {"api_name": "linebot.models.TextSendMessage", "line_number": 59, "usage_type": "call"}, {"api_name": "datetime.date.fromisoformat", "line_number": 68, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 68, "usage_type": "name"}, {"api_name": "datetime.date.fromisoformat", "line_number": 69, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 69, "usage_type": "name"}, {"api_name": "linebot.models.TextSendMessage", "line_number": 75, "usage_type": "call"}, {"api_name": "linebot.models.ImageSendMessage", "line_number": 77, "usage_type": "call"}, {"api_name": "linebot.models.TextSendMessage", "line_number": 86, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 90, "usage_type": "call"}, {"api_name": "django.http.HttpResponseBadRequest", "line_number": 92, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "127006911", "text": "from bs4 import BeautifulSoup\n\nwith open(\"Index.html\",'r') as html_file:\n content=html_file.read()\n # print(content)\n\n soup=BeautifulSoup(content, 'lxml')\n # print(soup.prettify())\n # tags=soup.find_all('h5')\n # print(tags)\n\n # courses_html_tags=soup.find_all('h5')\n # for course in courses_html_tags:\n # print(course.text)\n\n course_cards=soup.find_all('div', class_='card')\n for course in course_cards:\n course_name=course.h5.text\n course_price=course.a.text.split()[-1]\n print(f'{course_name} and price is {course_price}')\n\n", "sub_path": "ScrapingRevision.py", "file_name": "ScrapingRevision.py", "file_ext": "py", "file_size_in_byte": 584, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "bs4.BeautifulSoup", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "141426906", "text": "import re\r\nimport time\r\nimport requests\r\n\r\nfrom bs4 import BeautifulSoup\r\n\r\nre_ip = re.compile(r'\\d+\\.\\d+\\.\\d+\\.\\d+$')\r\ndomain_list = ['download.xboxlive.com', 'assets1.xboxlive.com', 'assets2.xboxlive.com']\r\nip_list_file = 'data/ip.list'\r\n\r\ndef get_ip_list_of(domain):\r\n ip_list = []\r\n url = 'http://site.ip138.com/{}/'.format(domain)\r\n headers = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'\r\n }\r\n response = requests.get(url, headers=headers)\r\n soup = BeautifulSoup(response.text, 'html.parser')\r\n curadress_tag = soup.find(id='curadress')\r\n for tag in curadress_tag.next_siblings:\r\n if tag.name == 'p':\r\n ip = _get_ip_from_a(tag.a)\r\n if ip:\r\n ip_list.append(ip)\r\n return ip_list\r\n\r\ndef _get_ip_from_a(a):\r\n if not a.string:\r\n return ''\r\n a_string = a.string.strip()\r\n match = re_ip.match(a_string)\r\n if(match is None):\r\n return ''\r\n return a_string\r\n\r\ndef main():\r\n ip_list = []\r\n for domain in domain_list:\r\n ip_list.extend(get_ip_list_of(domain))\r\n time.sleep(1)\r\n ip_list = list(set(ip_list))\r\n with open(ip_list_file, 'w') as file:\r\n file.write('\\n'.join(ip_list))\r\n\r\nif __name__ == \"__main__\":\r\n print('updating ip list...')\r\n main()\r\n print('update success. ip list file: {}'.format(ip_list_file))\r\n", "sub_path": "update_ip_list.py", "file_name": "update_ip_list.py", "file_ext": "py", "file_size_in_byte": 1443, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "re.compile", "line_number": 7, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 17, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 18, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "258733678", "text": "from rest_framework.exceptions import ParseError\n\nfrom rest_framework.mixins import (CreateModelMixin, UpdateModelMixin,\n DestroyModelMixin)\nfrom rest_framework.viewsets import GenericViewSet\n\n\nfrom posts.models import Post\nfrom .models import Comment\nfrom .serializers import CommentSerializer\n\nfrom common.permissions import ItemOwnerOrCreateOrReadOnly\nfrom common.view_mixins import VoteAPIViewMixin\n\n\nclass CommentViewSet(VoteAPIViewMixin, CreateModelMixin, UpdateModelMixin,\n DestroyModelMixin, GenericViewSet):\n model = Comment\n queryset = Comment.objects.all()\n serializer_class = CommentSerializer\n permission_classes = (ItemOwnerOrCreateOrReadOnly, )\n\n def perform_create(self, serializer):\n try:\n parent = serializer.validated_data.pop('_parent')\n except KeyError:\n raise ParseError(detail=\"Parent URL is required.\")\n \n extra = dict(sender=self.request.user)\n\n if isinstance(parent, Post):\n extra.update({\n 'post': parent,\n 'parent': None\n })\n elif isinstance(parent, Comment):\n extra.update({\n 'post': parent.post,\n 'parent': parent\n })\n\n serializer.save(**extra)\n", "sub_path": "django-angular_notreddit/comments/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1315, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "common.view_mixins.VoteAPIViewMixin", "line_number": 16, "usage_type": "name"}, {"api_name": "rest_framework.mixins.CreateModelMixin", "line_number": 16, "usage_type": "name"}, {"api_name": "rest_framework.mixins.UpdateModelMixin", "line_number": 16, "usage_type": "name"}, {"api_name": "rest_framework.mixins.DestroyModelMixin", "line_number": 17, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.GenericViewSet", "line_number": 17, "usage_type": "name"}, {"api_name": "models.Comment", "line_number": 18, "usage_type": "name"}, {"api_name": "models.Comment.objects.all", "line_number": 19, "usage_type": "call"}, {"api_name": "models.Comment.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "models.Comment", "line_number": 19, "usage_type": "name"}, {"api_name": "serializers.CommentSerializer", "line_number": 20, "usage_type": "name"}, {"api_name": "common.permissions.ItemOwnerOrCreateOrReadOnly", "line_number": 21, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.ParseError", "line_number": 27, "usage_type": "call"}, {"api_name": "posts.models.Post", "line_number": 31, "usage_type": "argument"}, {"api_name": "models.Comment", "line_number": 36, "usage_type": "argument"}]} +{"seq_id": "601659832", "text": "# -*- coding: utf-8 -*-\nimport logging\nimport DB_Handler as db\nimport Convocations as con\nimport Users as us\nimport Teams as t\nfrom Commons import log_it\nfrom telegram import (ReplyKeyboardMarkup, ReplyKeyboardRemove, InlineKeyboardMarkup, InlineKeyboardButton, Bot, ParseMode)\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters, ConversationHandler, RegexHandler, CallbackQueryHandler\nimport CON_PROP as prop\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO, handlers=[\n logging.FileHandler(\"TEL_BOT.log\"),\n logging.StreamHandler()])\n\ntb = Bot(token=prop.bot_code)\ntb_up = Updater(tb.token)\n\ndb.update_VARS()\n\ndef listner(bot, update):\n\tlog_it(update.message.chat_id, update.message.text, \"listner\")\n\tupdate.message.reply_text(\"I'm not sure what this means, maybe the conversation timed out.\")\n\ndef start(bot, update):\n\tlog_it(update.message.chat_id, update.message.text, \"start\")\n\tif(update.message.chat_id not in db.USERS.keys()):\n\t\tupdate.message.reply_text(\"Hola,\\n veo que aun no estas registrado, puedes mandarme tu nombre y apellido en un mismo mensaje?\")\n\t\treturn 1\n\telif(update.message.chat_id in db.PENDING_USERS):\n\t\tupdate.message.reply_text(\"Hola,\\n aun no te han aceptado. No te preocupes, seguro que no tardaran mucho.\")\n\telse:\n\t\tupdate.message.reply_text(\"Hola de nuevo!\")\n\t\tprint(str(update.message.chat))\n\treturn ConversationHandler.END\n\ndef create_user(bot, update):\n\tlog_it(update.message.chat_id, update.message.text, \"create_user\")\n\tif(db.check_user_name(update.message.text)):\n\t\tupdate.message.reply_text(\"Lo siento, \"+update.message.text+\", este nombre ya esta en uso, puedes enviarnos otro?\")\n\t\treturn 1\n\telse:\n\t\tupdate.message.reply_text(\"Genial, \"+update.message.text+\" te avisare cuando te hayan aceptado!\")\n\t\tdb.insert_user(update.message.chat_id, update.message.text)\n\t\treturn ConversationHandler.END\n\ndef help(bot, update):\n\tlog_it(update.message.chat_id, update.message.text, \"help\")\n\tCOMMON = \"\"\"Hello,\nHere you have a list with a short descripton of all the commands you are able to use:\n\nCOMMONS:\n/help - Shows the help message.\n/menu - Shows the menu.\n/cancel - Use it to cancel a conversation.\n/user_check - Shows your user information.\n/team_check - Shows the Team information and members.\n\nCONVOCATIONS:\n[ACCEPT ✅] - Use this button to accept the convocation.\n[DENY ❌] - Use this button to deny the convocation.\n\"\"\"\n\n\tCAPTAIN=\"\"\"/convocations - Use this command to start the convocation conversation.\n[⏪ or ⏩] - Use this button to move between the last ten convocations.\n[📤]- Use this button to share the current convocation status with all the team members.\n[🔄] - Use this button to refresh the information.\n[💪] - Use this button to link a training type to the current convocation.\n[CONFIRM ✅] - Use this button to confirm the convocation.\n[CANCEL ❌] - Use this button to cancel the convocation.\n[CLOSE] - Use this button to end the conversation end close the buttons.\n\n\"\"\"\n\tADMIN=\"\"\"USER ADMINISTRATION:\n/u_accept - Use this command to accept new registered users.\n/u_add - Use this command to add a user to a team.\n/user_check_others - Use this command to view information about other users.\n/user_delete - Use this command to delete users\n\nTEAM ADMINISTRATION:\n/t_create - Use this command to create new teams.\n/t_del_member - Use this command to delete a team member.\n/t_mod - Use this command to modify a team description/captain.\n/team_delete - Use this command to delete teams.\n\n\"\"\"\n\tEND='\\n Thanks for using the bot.\\n Feel free to contact @LorenzoHG for futher information.'\n\n\tmessage = COMMON\n\tif(update.message.chat_id in db.USER_ROLE):\n\t\tmessage = message + CAPTAIN\n\t\tif(db.USER_ROLE[update.message.chat_id] != 3):\n\t\t\tmessage = message + ADMIN\n\tmessage = message + END\n\t\n\tupdate.message.reply_text(message, parse_mode=ParseMode.HTML)\n\nSTART_MENU_ALL = [[InlineKeyboardButton(\"CHECK USER INFO\", callback_data='/user_check'),\n\t\t\tInlineKeyboardButton(\"CHECK TEAM INFO\", callback_data='/team_check')]]\nSTART_MENU_CAP = [[InlineKeyboardButton(\"CHECK USER INFO\", callback_data='/user_check'),\n\t\t\tInlineKeyboardButton(\"CHECK TEAM INFO\", callback_data='/team_check')],\n\t\t\t[InlineKeyboardButton(\"MANAGE CONVOCATIONS\", callback_data='/convocations')]]\nSTART_MENU_FULL = [[InlineKeyboardButton(\"CHECK USER INFO\", callback_data='/user_check'),\n\t\t\tInlineKeyboardButton(\"CHECK TEAM INFO\", callback_data='/team_check')],\n\t\t\t[InlineKeyboardButton(\"MANAGE CONVOCATIONS\", callback_data='/convocations')],\n\t\t\t[InlineKeyboardButton(\"ACCEPT USERS\", callback_data='/u_accept'),\n\t\t\tInlineKeyboardButton(\"DELETE USERS\", callback_data='/user_delete')],\n\t\t\t[InlineKeyboardButton(\"CHECK USERS\", callback_data='/user_check_others')],\n\t\t\t[InlineKeyboardButton(\"ADD USER TO TEAM\", callback_data='/u_add'),\n\t\t\tInlineKeyboardButton(\"KICK USER FROM TEAM\", callback_data='/t_del_member')],\n\t\t\t[InlineKeyboardButton(\"CREATE TEAM\", callback_data='/t_create'),\n\t\t\tInlineKeyboardButton(\"MODIFY TEAM\", callback_data='/t_mod')],\n\t\t\t[InlineKeyboardButton(\"DELETE TEAM\", callback_data='/team_delete')]]\nSTART = [[InlineKeyboardButton(\"COMMONS\", callback_data='/user_check')],\n\t\t[InlineKeyboardButton(\"CONVOCATIONS\", callback_data='/user_check')],\n\t\t[InlineKeyboardButton(\"USER ADMINISTRATION\", callback_data='/user_check')],\n\t\t[InlineKeyboardButton(\"TEAM ADMINISTRATION\", callback_data='/user_check')]]\n\ndef menu(bot, update):\n\tlog_it(update.message.chat_id, update.message.text, \"menu\")\n\tkeyboard = START_MENU_ALL\n\tif(update.message.chat_id in db.USER_ROLE):\n\t\tif(db.USER_ROLE[update.message.chat_id] != 1):\n\t\t\tkeyboard= START_MENU_FULL\n\t\telse:\n\t\t\tkeyboard= START_MENU_CAP\n\tupdate.message.reply_text('What would you like to do?',\n\t\t\t\t\t\t\treply_markup=InlineKeyboardMarkup(keyboard))\n\treturn 1\n\ndef menu_handler(bot, update):\n\tquery = update.callback_query\n\tlog_it(query.from_user.id, query.data, \"menu_handler\")\n\tquery.answer()\n\treply_keyboard = [[str(query.data)]]\n\tquery.message.delete()\n\tbot.sendMessage(chat_id=query.from_user.id, text='Click the following button:', reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))\n\treturn ConversationHandler.END\n\n\ntb_up.dispatcher.add_handler(con.CONV_HANDLER)\ntb_up.dispatcher.add_handler(ConversationHandler(entry_points=[CommandHandler('start', start)],\n\t\t\t\t\t\t\t\t\t\t\t\tstates={\n\t\t\t\t\t\t\t\t\t\t\t\t1: [MessageHandler(Filters.text, create_user)]},\n\t\t\t\t\t\t\t\t\t\t\t\tfallbacks=[CommandHandler('cancel', con.cancel)],\n\t\t\t\t\t\t\t\t\t\t\t\tconversation_timeout=300))\ntb_up.dispatcher.add_handler(ConversationHandler(entry_points=[CommandHandler('menu', menu)],\n\t\t\t\t\t\t\t\t\t\t\t\tstates={\n\t\t\t\t\t\t\t\t\t\t\t\t1: [CallbackQueryHandler(menu_handler)]},\n\t\t\t\t\t\t\t\t\t\t\t\tfallbacks=[CommandHandler('cancel', con.cancel)],\n\t\t\t\t\t\t\t\t\t\t\t\tconversation_timeout=300))\ntb_up.dispatcher.add_handler(CommandHandler('help', help))\ntb_up.dispatcher.add_handler(con.CONV_B_HANDLER)\ntb_up.dispatcher.add_handler(us.U_HANDLER)\ntb_up.dispatcher.add_handler(us.U_2_HANDLER)\ntb_up.dispatcher.add_handler(us.U_3_HANDLER)\ntb_up.dispatcher.add_handler(us.U_4_HANDLER)\ntb_up.dispatcher.add_handler(us.U_5_HANDLER)\ntb_up.dispatcher.add_handler(t.T_HANDLER)\ntb_up.dispatcher.add_handler(t.T_2_HANDLER)\ntb_up.dispatcher.add_handler(t.T_3_HANDLER)\ntb_up.dispatcher.add_handler(t.T_4_HANDLER)\ntb_up.dispatcher.add_handler(t.T_5_HANDLER)\ntb_up.dispatcher.add_handler(MessageHandler(Filters.text, listner))\n\ntb_up.start_polling()\ntb_up.idle()\n", "sub_path": "Telegram ChatBot/TB.py", "file_name": "TB.py", "file_ext": "py", "file_size_in_byte": 7518, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "logging.basicConfig", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 12, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 13, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 14, "usage_type": "call"}, {"api_name": "telegram.Bot", "line_number": 16, "usage_type": "call"}, {"api_name": "CON_PROP.bot_code", "line_number": 16, "usage_type": "attribute"}, {"api_name": "telegram.ext.Updater", "line_number": 17, "usage_type": "call"}, {"api_name": "DB_Handler.update_VARS", "line_number": 19, "usage_type": "call"}, {"api_name": "Commons.log_it", "line_number": 22, "usage_type": "call"}, {"api_name": "Commons.log_it", "line_number": 26, "usage_type": "call"}, {"api_name": "DB_Handler.USERS.keys", "line_number": 27, "usage_type": "call"}, {"api_name": "DB_Handler.USERS", "line_number": 27, "usage_type": "attribute"}, {"api_name": "DB_Handler.PENDING_USERS", "line_number": 30, "usage_type": "attribute"}, {"api_name": "telegram.ext.ConversationHandler.END", "line_number": 35, "usage_type": "attribute"}, {"api_name": "telegram.ext.ConversationHandler", "line_number": 35, "usage_type": "name"}, {"api_name": "Commons.log_it", "line_number": 38, "usage_type": "call"}, {"api_name": "DB_Handler.check_user_name", "line_number": 39, "usage_type": "call"}, {"api_name": "DB_Handler.insert_user", "line_number": 44, "usage_type": "call"}, {"api_name": "telegram.ext.ConversationHandler.END", "line_number": 45, "usage_type": "attribute"}, {"api_name": "telegram.ext.ConversationHandler", "line_number": 45, "usage_type": "name"}, {"api_name": "Commons.log_it", "line_number": 48, "usage_type": "call"}, {"api_name": "DB_Handler.USER_ROLE", "line_number": 90, "usage_type": "attribute"}, {"api_name": "DB_Handler.USER_ROLE", "line_number": 92, "usage_type": "attribute"}, {"api_name": "telegram.ParseMode.HTML", "line_number": 96, "usage_type": "attribute"}, {"api_name": "telegram.ParseMode", "line_number": 96, "usage_type": "name"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 98, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 99, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 100, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 101, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 102, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 103, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 104, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 105, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 106, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 107, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 108, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 109, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 110, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 111, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 112, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 113, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 114, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 115, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 116, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 117, "usage_type": "call"}, {"api_name": "Commons.log_it", "line_number": 120, "usage_type": "call"}, {"api_name": "DB_Handler.USER_ROLE", "line_number": 122, "usage_type": "attribute"}, {"api_name": "DB_Handler.USER_ROLE", "line_number": 123, "usage_type": "attribute"}, {"api_name": "telegram.InlineKeyboardMarkup", "line_number": 128, "usage_type": "call"}, {"api_name": "Commons.log_it", "line_number": 133, "usage_type": "call"}, {"api_name": "telegram.ReplyKeyboardMarkup", "line_number": 137, "usage_type": "call"}, {"api_name": "telegram.ext.ConversationHandler.END", "line_number": 138, "usage_type": "attribute"}, {"api_name": "telegram.ext.ConversationHandler", "line_number": 138, "usage_type": "name"}, {"api_name": "Convocations.CONV_HANDLER", "line_number": 141, "usage_type": "attribute"}, {"api_name": "telegram.ext.ConversationHandler", "line_number": 142, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 142, "usage_type": "call"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 144, "usage_type": "call"}, {"api_name": "telegram.ext.Filters.text", "line_number": 144, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 144, "usage_type": "name"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 145, "usage_type": "call"}, {"api_name": "Convocations.cancel", "line_number": 145, "usage_type": "attribute"}, {"api_name": "telegram.ext.ConversationHandler", "line_number": 147, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 147, "usage_type": "call"}, {"api_name": "telegram.ext.CallbackQueryHandler", "line_number": 149, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 150, "usage_type": "call"}, {"api_name": "Convocations.cancel", "line_number": 150, "usage_type": "attribute"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 152, "usage_type": "call"}, {"api_name": "Convocations.CONV_B_HANDLER", "line_number": 153, "usage_type": "attribute"}, {"api_name": "Users.U_HANDLER", "line_number": 154, "usage_type": "attribute"}, {"api_name": "Users.U_2_HANDLER", "line_number": 155, "usage_type": "attribute"}, {"api_name": "Users.U_3_HANDLER", "line_number": 156, "usage_type": "attribute"}, {"api_name": "Users.U_4_HANDLER", "line_number": 157, "usage_type": "attribute"}, {"api_name": "Users.U_5_HANDLER", "line_number": 158, "usage_type": "attribute"}, {"api_name": "Teams.T_HANDLER", "line_number": 159, "usage_type": "attribute"}, {"api_name": "Teams.T_2_HANDLER", "line_number": 160, "usage_type": "attribute"}, {"api_name": "Teams.T_3_HANDLER", "line_number": 161, "usage_type": "attribute"}, {"api_name": "Teams.T_4_HANDLER", "line_number": 162, "usage_type": "attribute"}, {"api_name": "Teams.T_5_HANDLER", "line_number": 163, "usage_type": "attribute"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 164, "usage_type": "call"}, {"api_name": "telegram.ext.Filters.text", "line_number": 164, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 164, "usage_type": "name"}]} +{"seq_id": "499686616", "text": "from __future__ import print_function # Python 2/3 compatibility\nimport boto3\nimport json\nimport decimal\nfrom boto3.dynamodb.conditions import Key, Attr\nfrom botocore.exceptions import ClientError\n\n# Helper class to convert a DynamoDB item to JSON.\nclass DecimalEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n if o % 1 > 0:\n return float(o)\n else:\n return int(o)\n return super(DecimalEncoder, self).default(o)\n\n\ndynamodb = boto3.resource('dynamodb', region_name='us-west-2', endpoint_url=\"https://dynamodb.us-west-2.amazonaws.com\")\n\ntable = dynamodb.Table('Music')\n\nCompany = \"1\"\nJobTitle = \"CEO\"\n\ntry:\n response = table.get_item(\n Key={\n 'Company': Company,\n 'JobTitle': JobTitle\n }\n )\nexcept ClientError as e:\n print(e.response['Error']['Message'])\nelse:\n if 'Item' in response:\n print(len(response))\n print(len(response['Item']))\n item = response['Item']\n print(\"GetItem succeeded:\")\n print(json.dumps(item, indent=4, cls=DecimalEncoder))\n else:\n print('Item not found')\n\n", "sub_path": "sandbox/boto_sandbox/boto_read.py", "file_name": "boto_read.py", "file_ext": "py", "file_size_in_byte": 1176, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "json.JSONEncoder", "line_number": 9, "usage_type": "attribute"}, {"api_name": "decimal.Decimal", "line_number": 11, "usage_type": "attribute"}, {"api_name": "boto3.resource", "line_number": 19, "usage_type": "call"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 33, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "466524111", "text": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\nimport datetime\n#matplotlib inline\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom dateutil import parser\n\ndf_ferrara = pd.read_csv('ferrara_270615.csv')\ndf_milano = pd.read_csv('milano_270615.csv')\ndf_mantova = pd.read_csv('mantova_270615.csv')\ndf_ravenna = pd.read_csv('ravenna_270615.csv')\ndf_torino = pd.read_csv('torino_270615.csv')\ndf_asti = pd.read_csv('asti_270615.csv')\ndf_bologna = pd.read_csv('bologna_270615.csv')\ndf_piacenza = pd.read_csv('piacenza_270615.csv')\ndf_cesena = pd.read_csv('cesena_270615.csv')\ndf_faenza = pd.read_csv('faenza_270615.csv')\n\n# 取出我们要分析的温度和日期数据\ny1 = df_milano['temp']\nx1 = df_milano['day']\n#print(x1)\n#把日期数据转换成 datetime 的格式\nday_milano = [parser.parse(x) for x in x1]\nprint(day_milano)\n# 调用 subplot 函数\nfig, ax = plt.subplots()\nplt.xticks(rotation=70)\nhours = mdates.DateFormatter('%H:%M')\nax.xaxis.set_major_formatter(hours)\nax.plot(day_milano ,y1, 'r',linewidth=4)\nplt.xlabel(\"Time\")\nplt.ylabel(\"Temperature\")\nplt.legend([\"milano\"])\n# 读取温度和日期数据\ny1 = df_ravenna['temp']\nx1 = df_ravenna['day']\ny2 = df_faenza['temp']\nx2 = df_faenza['day']\ny3 = df_cesena['temp']\nx3 = df_cesena['day']\ny4 = df_milano['temp']\nx4 = df_milano['day']\ny5 = df_asti['temp']\nx5 = df_asti['day']\ny6 = df_torino['temp']\nx6 = df_torino['day']\n\n# 把日期从 string 类型转化为标准的 datetime 类型\nday_ravenna = [parser.parse(x) for x in x1]\nday_faenza = [parser.parse(x) for x in x2]\nday_cesena = [parser.parse(x) for x in x3]\ndat_milano = [parser.parse(x) for x in x4]\nday_asti = [parser.parse(x) for x in x5]\nday_torino = [parser.parse(x) for x in x6]\n\n# 调用 subplots() 函数,重新定义 fig, ax 变量\nfig, ax = plt.subplots(1,1)\nplt.xticks(rotation=70)\n\nhours = mdates.DateFormatter('%H:%M')\nax.xaxis.set_major_formatter(hours)\n\n#这里需要画出三根线,所以需要三组参数, 'g'代表'green'\nax.plot(day_ravenna,y1,'r',day_faenza,y2,'#00FF00',day_cesena,y3,'#8B0000',linewidth=2)\nax.plot(dat_milano,y4,'#F08080',day_asti,y5,'#4B0082',day_torino,y6,'y',linewidth=2)\nplt.xlabel(\"Time\")\nplt.ylabel(\"Temperature\")\nplt.legend([\"ravenna\",\"faenza\",\"cesena\",\"milano\",\"asti\",\"torino\"])\n#plt.savefig('power.png', dpi=75)\nplt.show()", "sub_path": "601-607/Pic9-9.py", "file_name": "Pic9-9.py", "file_ext": "py", "file_size_in_byte": 2329, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pandas.read_csv", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 15, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 20, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 27, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.dates.DateFormatter", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "dateutil.parser.parse", "line_number": 53, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 53, "usage_type": "name"}, {"api_name": "dateutil.parser.parse", "line_number": 54, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 54, "usage_type": "name"}, {"api_name": "dateutil.parser.parse", "line_number": 55, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 55, "usage_type": "name"}, {"api_name": "dateutil.parser.parse", "line_number": 56, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 56, "usage_type": "name"}, {"api_name": "dateutil.parser.parse", "line_number": 57, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 57, "usage_type": "name"}, {"api_name": "dateutil.parser.parse", "line_number": 58, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.dates.DateFormatter", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}]} +{"seq_id": "239508765", "text": "from django.urls import path\n\nfrom . import views\n# imported all the functions written in views.py\n\napp_name = \"polls\" # useful whenever we have more than one apps\n# but if our project contains only one app then it's not that useful\n# but for demostration purpose we have written it over here\n\n\nurlpatterns = [\n\tpath('',views.index,name='polls-index'),\t# matched the index function from views.py with '' url;\n\n\tpath(\"/\",views.details,name = \"details\"),\n\n\tpath(\"/results/\",views.results,name=\"results\"),\n\n\tpath(\"/vote/\",views.votes,name=\"vote\"),\n\n]", "sub_path": "polls_project/polls_project/polls/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 599, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "642185228", "text": "import requests\nimport json\nimport pandas as pd\nimport datetime as dt\n# noinspection SpellCheckingInspection\n\n\nclass ReadDB:\n\n def __init__(self):\n \"\"\"This object was created to extract data from API XM\"\"\"\n\n self.url = \"http://servapibi.xm.com.co/hourly\"\n self.connection = None\n self.request = ''\n self.inventario_metricas = \\\n {'Gene': [(0, 'Generacion Real', 'Sistema', 'Horaria'),\n (1, 'Generacion Real por Recurso', 'Recurso', 'Horaria')],\n 'DemaCome': [(0, 'Demanda Comercial', 'Sistema', 'Horaria'),\n (1, 'Demanda Comercial por Agente', 'Agente', 'Horaria')],\n 'AporEner': [(0, 'Aportes Energia', 'Sistema', 'Diaria'),\n (1, 'Aportes Energia por Rio', 'Rio', 'Diaria')],\n 'PrecEscaAct': [(0, 'Precio de Escasez de Activacion', 'Sistema', 'Diaria')],\n 'ConsCombustibleMBTU': [(0, 'Cons. Comb. Recursos DC', 'Recurso', 'Horaria')],\n 'PrecOferDesp': [(0, 'Precio de Oferta del Despacho', 'Recurso', 'Horaria')],\n 'PrecBolsNaci': [(0, 'Precio de Bolsa Nacional', 'Sistema', 'Horaria')],\n 'MaxPrecOferNal': [(0, 'Máximo Precio de Oferta Nacional', 'Sistema', 'Horaria')],\n 'RestAliv': [(0, 'Restricciones Aliviadas', 'Sistema', 'Horaria')],\n 'GeneIdea': [(0, 'Generacion Ideal', 'Sistema', 'Horaria'),\n (1, 'Generacion Ideal', 'Recurso', 'Horaria')],\n 'VoluUtilDiarEner': [(0, 'Volumen Util Diario', 'Sistema', 'Diaria'),\n (1, 'Volumen Util Diario por Embalse', 'Embalse', 'Diaria')],\n 'RemuRealIndiv': [(0, 'RRID', 'Sistema', 'Diaria')],\n 'CapEfecNeta': [(0, 'Listado de recursos térmicos CEN por mes', 'Sistema', 'Anual'),\n (1, 'Listado Recursos Generación', 'Recurso', 'Diaria')],\n 'VentContEner': [(0, 'Ventas en Contratos Energía', 'Sistema', 'Horaria'),\n (1, 'Ventas en Contratos Energía por Agente', 'Agente', 'Horaria')],\n 'CompContEner': [(0, 'Compras en Contrato Energía', 'Sistema', 'Horaria'),\n (1, 'Compras en Contrato Energía por Agente', 'Agente', 'Horaria')],\n 'CompBolsNaciEner': [(0, 'Compras en Bolsa Nacional Energía', 'Sistema', 'Horaria'),\n (1, 'Compras en Bolsa Nacional Energía por Agente', 'Agente', 'Horaria')],\n 'PrecPromContRegu': [(0, 'Precio Promedio Contratos Regulado', 'Sistema', 'Diaria')],\n 'PrecPromContNoRegu': [(0, 'Precio Promedio Contratos No Regulado', 'Sistema', 'Diaria')],\n 'ConsCombAprox': [(0, 'Consumo Comb Aprox.', 'RecursoComb', 'Horaria')],\n 'EmisionesCO2': [(0, 'Emisiones CO2', 'RecursoComb', 'Horaria')],\n 'EmisionesCH4': [(0, 'Emisiones CH4', 'RecursoComb', 'Horaria')],\n 'EmisionesN2O': [(0, 'Emisiones N2O', 'RecursoComb', 'Horaria')],\n 'EmisionesCO2Eq': [(0, 'Emisiones CO2e', 'Recurso', 'Horaria')],\n 'factorEmisionCO2e': [(0, 'factor emision CO2e', 'Sistema', 'Horaria')],\n 'ImpoEner': [(0, 'Importaciones Energía', 'Sistema', 'Horaria')],\n 'DemaOR': [(0, 'Demanda por OR', 'Agente', 'Horaria')],\n 'PerdidasEner': [(0, 'Perdidas en Energía', 'Sistema', 'Horaria')],\n 'DemaSIN': [(0, 'Demanda del SIN', 'Sistema', 'Diaria')],\n 'DemaNoAtenProg': [(0, 'Demanda No Atendida Programada por Área', 'Area', 'Diaria'),\n (1, 'Demanda No Atendida Programada por Subárea', 'Subarea', 'Diaria')],\n 'DemaNoAtenNoProg': [(0, 'Demanda No Atendida No Programada por Área', 'Area', 'Diaria'),\n (1, 'Demanda No Atendida No Programada por Subárea', 'Subarea', 'Diaria')],\n 'CapaUtilDiarEner': [(0, 'Capacidad Util Diario', 'Sistema', 'Diaria'),\n (1, 'Capacidad Util Diario por Embalse', 'Embalse', 'Diaria')],\n 'AporEnerMediHist': [(0, 'Media Historica Aportes', 'Sistema', 'Diaria'),\n (1, 'Media Historica Aportes por Rio', 'Rio', 'Diaria')],\n 'GeneSeguridad': [(0, 'Generación Seguridad', 'Recurso', 'Horaria')],\n 'GeneFueraMerito': [(0, 'Generación Fuera de Merito', 'Recurso', 'Horaria')],\n 'ObligEnerFirme': [(0, 'Obligaciones de Energía Firme', 'Recurso', 'Diaria')],\n 'FAZNI': [(0, 'Recaudo FAZNI', 'Sistema', 'Diaria')],\n 'PRONE': [(0, 'Recaudo PRONE', 'Sistema', 'Diaria')],\n 'FAER': [(0, 'Recaudo FAER', 'Sistema', 'Diaria')]\n }\n\n def get_collections(self, coleccion):\n\n return self.inventario_metricas[coleccion]\n\n def request_data(self, coleccion, metrica, start_date, end_date):\n \"\"\" request public server data from XM by the API\n Args:\n coleccion: one of the set of variables availables at self.get_collections()\n metrica:one of this variables \"DemaCome\", \"Gene\", \"GeneIdea\", \"PrecBolsNaci\", \"RestAliv\"\n start_date: start date consult data\n end_date: end date consult data\n Returns: DataFrame with the raw Data\n \"\"\"\n if coleccion not in self.inventario_metricas.keys():\n print('No existe la colección {}'.format(coleccion))\n return pd.DataFrame()\n if metrica > len(self.inventario_metricas[coleccion]):\n print('No existe la metrica')\n return pd.DataFrame()\n\n if self.inventario_metricas[coleccion][metrica][3] == 'Horaria':\n\n end = end_date\n condition = True\n aux = True\n data = None\n while condition:\n if (start_date - end_date).days < 30:\n end = start_date + dt.timedelta(29)\n if end > end_date:\n end = end_date\n self.request = {\"MetricId\": coleccion,\n \"StartDate\": \"{}\".format(str(start_date)),\n \"EndDate\": \"{}\".format(str(end)),\n 'Entity': self.inventario_metricas[coleccion][metrica][2]}\n\n self.connection = requests.post(self.url, json=self.request)\n\n data_json = json.loads(self.connection.content)\n\n temporal_data = pd.json_normalize(data_json['Items'], 'HourlyEntities', 'Date', sep='_')\n\n if data is None:\n data = temporal_data.copy()\n else:\n data = data.append(temporal_data, ignore_index=True)\n start_date = start_date + dt.timedelta(30)\n\n if end == end_date:\n aux = False\n condition = ((end - start_date).days > 30 | (end - end_date).days != 0) | aux\n elif self.inventario_metricas[coleccion][metrica][3] == 'Diaria' and coleccion == 'CapEfecNeta':\n end = end_date\n condition = True\n aux = True\n data = None\n while condition:\n if (start_date - end_date).days < 1:\n end = start_date + dt.timedelta(0)\n if end > end_date:\n end = end_date\n self.request = {\"MetricId\": coleccion,\n \"StartDate\": \"{}\".format(str(start_date)),\n \"EndDate\": \"{}\".format(str(end)),\n 'Entity': self.inventario_metricas[coleccion][metrica][2]}\n self.url = self.url.replace('hourly', 'daily')\n self.connection = requests.post(self.url, json=self.request)\n\n data_json = json.loads(self.connection.content)\n\n temporal_data = pd.json_normalize(data_json['Items'], 'DailyEntities', 'Date', sep='_')\n\n if data is None:\n data = temporal_data.copy()\n else:\n data = data.append(temporal_data, ignore_index=True)\n start_date = start_date + dt.timedelta(1)\n\n if end == end_date:\n aux = False\n condition = ((end - start_date).days > 1 | (end - end_date).days != 0) | aux\n elif self.inventario_metricas[coleccion][metrica][3] == 'Diaria':\n end = end_date\n condition = True\n aux = True\n data = None\n while condition:\n if (start_date - end_date).days < 30:\n end = start_date + dt.timedelta(29)\n if end > end_date:\n end = end_date\n\n self.request = {\"MetricId\": coleccion,\n \"StartDate\": \"{}\".format(str(start_date)),\n \"EndDate\": \"{}\".format(str(end)),\n 'Entity': self.inventario_metricas[coleccion][metrica][2]}\n self.url = self.url.replace('hourly', 'daily')\n self.connection = requests.post(self.url, json=self.request)\n data_json = json.loads(self.connection.content)\n temporal_data = pd.json_normalize(data_json['Items'], 'DailyEntities', 'Date', sep='_')\n if data is None:\n data = temporal_data.copy()\n else:\n data = data.append(temporal_data, ignore_index=True)\n\n start_date = start_date + dt.timedelta(30)\n if end == end_date:\n aux = False\n condition = ((end - start_date).days > 29 | (end - end_date).days != 0) | aux\n\n elif self.inventario_metricas[coleccion][metrica][3] == 'Anual':\n\n end = end_date\n condition = True\n aux = True\n data = None\n while condition:\n if (start_date - end_date).days < 366:\n end = start_date + dt.timedelta(365)\n if end > end_date:\n end = end_date\n\n self.request = {\"MetricId\": coleccion,\n \"StartDate\": \"{}\".format(str(start_date)),\n \"EndDate\": \"{}\".format(str(end)),\n 'Entity': self.inventario_metricas[coleccion][metrica][2]}\n self.url = self.url.replace('hourly', 'annual')\n self.connection = requests.post(self.url, json=self.request)\n data_json = json.loads(self.connection.content)\n temporal_data = pd.json_normalize(data_json['Items'], 'AnnualEntities', 'Code', sep='_')\n if data is None:\n data = temporal_data.copy()\n else:\n data = data.append(temporal_data, ignore_index=True)\n\n start_date = start_date + dt.timedelta(366)\n if end == end_date:\n aux = False\n condition = ((end - start_date).days > 365 | (end - end_date).days != 0) | aux\n\n return data\n\n\nif __name__ == \"__main__\":\n consult = ReadDB()\n df1 = consult.request_data(\"Gene\", 1, dt.date(2020, 7, 1), dt.date(2020, 9, 10))", "sub_path": "pydataxm.py", "file_name": "pydataxm.py", "file_ext": "py", "file_size_in_byte": 11312, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pandas.DataFrame", "line_number": 85, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 88, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 98, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 106, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 108, "usage_type": "call"}, {"api_name": "pandas.json_normalize", "line_number": 110, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 116, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 128, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 136, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 138, "usage_type": "call"}, {"api_name": "pandas.json_normalize", "line_number": 140, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 146, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 158, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 167, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 168, "usage_type": "call"}, {"api_name": "pandas.json_normalize", "line_number": 169, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 175, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 188, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 197, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 198, "usage_type": "call"}, {"api_name": "pandas.json_normalize", "line_number": 199, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 205, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 215, "usage_type": "call"}]} +{"seq_id": "436224228", "text": "from matplotlib import pyplot, cm\nimport numpy as np\nimport taichi as ti\nfrom mpl_toolkits.mplot3d import Axes3D ##New Library required for projected 3d plots\n\nnx = 81\nny = 81\nnt = 100\nc = 1\n\ndx = 2 / (nx - 1)\ndy = 2 / (ny - 1)\nsigma = .2\ndt = sigma * dx\n\nx = np.linspace(0, 2, nx)\ny = np.linspace(0, 2, ny)\nX, Y = np.meshgrid(x, y) \n\n\nu = ti.Vector.field(2, dtype=float, shape=(ny, nx))\nun = ti.Vector.field(2, dtype=float, shape=(ny, nx))\n\n\n@ti.kernel\ndef init():\n for i, j in ti.grouped(u):\n if i >= int(.5 / dx) and i < int(1 / dx + 1) and j >= int(.5 / dy) and j < int(1 / dy + 1):\n u[i, j] = 2\n else:\n u[i, j] = 1\n un [i, j] = u[i, j]\n\n@ti.kernel\ndef propagate():\n for i, j in ti.grouped(u):\n un[i, j] = u[i, j]\n\n for j, i in ti.ndrange((1, ny), (1, nx)): \n if j == 0 or j == ny - 1 or i == 0 or i == nx - 1:\n u[j, i] = [1, 1]\n else:\n u[j, i] = (un[j, i] - \n (un[j, i] * c * dt / dx * (un[1:, 1:] - un[1:, :-1])) -\n vn[1:, 1:] * c * dt / dy * (un[1:, 1:] - un[:-1, 1:]))\n\ndef plot(ax, pause_time=0.005):\n ax.cla()\n surf = ax.plot_surface(X, Y, u.to_numpy()[:], cmap=cm.viridis)\n pyplot.pause(pause_time)\n\n# gui = ti.GUI(\"2D Convection\", res=(ny, nx))\ndef main():\n t = 0\n fig = pyplot.figure(figsize=(11, 7), dpi=100)\n ax = fig.gca(projection='3d')\n fig.show()\n pyplot.ion()\n\n while True:\n if t == 0 or t > nt:\n init()\n t = 0\n plot(ax)\n # gui.set_image(u)\n # gui.show()\n propagate()\n t += 1\n\nif __name__ == '__main__':\n main()\n", "sub_path": "taichi/step6.py", "file_name": "step6.py", "file_ext": "py", "file_size_in_byte": 1684, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "numpy.linspace", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 18, "usage_type": "call"}, {"api_name": "taichi.Vector.field", "line_number": 21, "usage_type": "call"}, {"api_name": "taichi.Vector", "line_number": 21, "usage_type": "attribute"}, {"api_name": "taichi.Vector.field", "line_number": 22, "usage_type": "call"}, {"api_name": "taichi.Vector", "line_number": 22, "usage_type": "attribute"}, {"api_name": "taichi.grouped", "line_number": 27, "usage_type": "call"}, {"api_name": "taichi.kernel", "line_number": 25, "usage_type": "attribute"}, {"api_name": "taichi.grouped", "line_number": 36, "usage_type": "call"}, {"api_name": "taichi.ndrange", "line_number": 39, "usage_type": "call"}, {"api_name": "taichi.kernel", "line_number": 34, "usage_type": "attribute"}, {"api_name": "matplotlib.cm.viridis", "line_number": 49, "usage_type": "attribute"}, {"api_name": "matplotlib.cm", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ion", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}]} +{"seq_id": "459412349", "text": "import configparser\nimport os\n\n\nscript_dir = os.path.dirname(__file__)\nconnections_cfg_path = os.path.join(script_dir, 'connections.cfg')\nparser = configparser.ConfigParser()\n\n \ndef get_mongo_connection_str(): \n parser.read(connections_cfg_path)\n connection_str = parser.get('mongo_db', 'connection_str')\n \n return connection_str\n\n\ndef get_postgres_connection_str():\n parser.read(connections_cfg_path)\n connection_str = parser.get('postgres_db', 'connection_str')\n\n return connection_str\n\ndef get_twiiter_credentials():\n parser.read(connections_cfg_path)\n\n consumer_key = parser.get('twitter', 'consumer_key')\n consumer_secret = parser.get('twitter', 'consumer_secret')\n access_key = parser.get('twitter', 'access_key')\n access_secret = parser.get('twitter', 'access_secret')\n stream_keyword = parser.get('twitter', 'stream_keyword')\n \n return consumer_key, consumer_secret, access_key, access_secret, stream_keyword\n\n\ndef get_kafka_server():\n parser.read(connections_cfg_path)\n\n return parser.get('kafka', 'host')\n", "sub_path": "backend/common/common.py", "file_name": "common.py", "file_ext": "py", "file_size_in_byte": 1072, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "os.path.dirname", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "66204709", "text": "# -*- coding: utf-8 -*-\n\n# In your terminal, install several dash libraries.\n# These libraries are under active development, so install and upgrade frequently.\n# Python 2 and 3 are supported.\n#\n# (for Unix shell) to install Dash please run the below command in your terminal:\n# pip install dash==1.14.0\n\n# Run this app with `python app.py` and\n# visit http://127.0.0.1:8050/ in your web browser.\n\n# more info about logic:\n# https://dash.plotly.com/urls\n# https://dash.plotly.com/layout\n\nimport dash\nimport dash_bootstrap_components as dbc\n\n# https://community.plotly.com/t/dash-v1-12-0-release-pattern-matching-callbacks-fixes-shape-drawing-new-datatable-conditional-formatting-options-prevent-initial-call-and-more/38867\napp = dash.Dash(__name__,\n external_stylesheets=[dbc.themes.UNITED],\n prevent_initial_callbacks=True)\napp.config.suppress_callback_exceptions = True\nserver = app.server", "sub_path": "wse-dash/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 920, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "dash.Dash", "line_number": 21, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.themes", "line_number": 22, "usage_type": "attribute"}]} +{"seq_id": "373791736", "text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport MySQLdb\n\n# 打开数据库连接\ndb = MySQLdb.connect(\"localhost\", \"root\", \"123456\", \"test\")\n\n# 使用cursor()方法获取操作游标\ncur = db.cursor()\n\n# SQL 插入语句\nsql = \"UPDATE xiao SET AGE = AGE + 1 WHERE SEX = '%s'\" % ('男')\ntry:\n cur.execute(sql)\n db.commit()\n print(\"123\")\nexcept:\n db.rollback()\ndb.close()", "sub_path": "z_python-stu1/数据库/添加.py", "file_name": "添加.py", "file_ext": "py", "file_size_in_byte": 379, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "MySQLdb.connect", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "287336905", "text": "import base64\nfrom PIL import Image\n\ndef dump_datetime(value):\n \"\"\"Deserialize datetime object into string form for JSON processing.\"\"\"\n if value is None:\n return None\n return [value.strftime(\"%Y-%m-%d\"), value.strftime(\"%H:%M:%S\")]\n\n\n\ndef username_to_base64_avatar(userName):\n\n filename = 'avatars/{0}_avatar.jpg'.format(userName) \n \n with open(filename, 'rb') as f:\n imgdata = f.read()\n img_base64 = base64.encodebytes(imgdata)\n\n return img_base64\n\ndef path_to_base64_avatar(filename):\n\n with open(filename, 'rb') as f:\n imgdata = f.read()\n img_base64 = base64.encodebytes(imgdata)\n \n return img_base64\n\ndef resize_image(filename):\n \n img = Image.open(filename)\n img.thumbnail((200, 200), Image.ANTIALIAS)\n img.save(filename, 'JPEG')\n\n print(filename, ' resized')\n\n", "sub_path": "api/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 847, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "base64.encodebytes", "line_number": 18, "usage_type": "call"}, {"api_name": "base64.encodebytes", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 32, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 32, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 33, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 33, "usage_type": "name"}]} +{"seq_id": "76358892", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"crawler.settings\")\nimport django\ndjango.setup()\n\nfrom best_comment.models import Article\n\n\ndef comment_crawl():\n driver = webdriver.Chrome(\n '/Users/ton/OnedgeLee/OnedgeLeeTools/chromedriver')\n driver.implicitly_wait(3)\n driver.get(\n 'https://m.sports.naver.com/kbaseball/news/index.nhn?isPhoto=N&type=popular')\n article_a = driver.find_elements_by_xpath(\n '//ul[@class=\"article_lst\"]/li/a')\n article = list()\n\n for i in range(10):\n article.append({'href': article_a[i].get_attribute('href')})\n\n for i in range(10):\n driver.get(article[i]['href'])\n article[i]['title'] = (driver.find_element_by_class_name(\n 'media_end_head_headline').text)\n upvotes = driver.find_elements_by_class_name('u_cbox_cnt_recomm')\n elupvote = upvotes[0]\n maxupvote = int(upvotes[0].text)\n for upvote in upvotes:\n if maxupvote <= int(upvote.text):\n maxupvote = int(upvote.text)\n elupvote = upvote\n bc = elupvote.find_element_by_xpath(\n './ancestor::div[@class=\"u_cbox_area\"]')\n article[i]['writer'] = bc.find_element_by_class_name(\n 'u_cbox_nick').text\n article[i]['comment'] = bc.find_element_by_class_name(\n 'u_cbox_contents').text\n article[i]['upvote'] = maxupvote\n article[i]['downvote'] = int(\n bc.find_element_by_class_name('u_cbox_cnt_unrecomm').text)\n driver.close()\n return article\n\n\nif __name__ == '__main__':\n articles = comment_crawl()\n for article in articles:\n Article(article_href=article['href'], article_title=article['title'], article_writer=article['writer'],\n article_comment=article['comment'], article_upvote=article['upvote'], article_downvote=article['downvote']).save()\n", "sub_path": "crawler.py", "file_name": "crawler.py", "file_ext": "py", "file_size_in_byte": 1966, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "os.environ.setdefault", "line_number": 5, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.setup", "line_number": 7, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 13, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 13, "usage_type": "name"}, {"api_name": "best_comment.models.Article", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "522329194", "text": "#\n# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport collections.abc\nimport warnings\n\nfrom nvtabular.columns import ColumnSelector, Schema\nfrom nvtabular.ops import LambdaOp, Operator, internal\nfrom nvtabular.ops.internal.concat_columns import ConcatColumns\nfrom nvtabular.ops.internal.subset_columns import SubsetColumns\n\n\nclass WorkflowNode:\n \"\"\"A WorkflowNode is a group of columns that you want to apply the same transformations to.\n WorkflowNode's can be transformed by shifting operators on to them, which returns a new\n WorkflowNode with the transformations applied. This lets you define a graph of operations\n that makes up your workflow\n\n Parameters\n ----------\n selector: ColumnSelector\n Defines which columns to select from the input Dataset using column names and tags.\n \"\"\"\n\n def __init__(self, selector=None):\n self.parents = []\n self.children = []\n self.dependencies = []\n\n self.op = None\n self.input_schema = None\n self.output_schema = None\n\n if isinstance(selector, list):\n warnings.warn(\n 'The `[\"a\", \"b\", \"c\"] >> ops.Operator` syntax for creating a `ColumnGroup` '\n \"has been deprecated in NVTabular 21.09 and will be removed in a future version.\",\n FutureWarning,\n )\n selector = ColumnSelector(selector)\n\n if selector and not isinstance(selector, ColumnSelector):\n raise TypeError(\"The selector argument must be a list or a ColumnSelector\")\n\n self._selector = selector\n\n @property\n def selector(self):\n return self._selector\n\n @selector.setter\n def selector(self, sel):\n if isinstance(sel, list):\n sel = ColumnSelector(sel)\n\n self._selector = sel\n\n def compute_schemas(self, root_schema):\n # If parent is an addition node, we may need to propagate grouping\n # unless we're a node that already has a selector\n if not self.selector:\n if (\n len(self.parents) == 1\n and isinstance(self.parents[0].op, internal.ConcatColumns)\n and self.parents[0].selector\n and (self.parents[0].selector.names)\n ):\n\n self.selector = self.parents[0].selector\n\n # If we have a selector, apply it to upstream schemas from nodes/dataset\n if self.selector:\n upstream_schema = root_schema + _combine_schemas(self.parents_with_dep_nodes)\n self.input_schema = upstream_schema.apply(self.selector)\n else:\n # If we don't have a selector but we're an addition node,\n if isinstance(self.op, ConcatColumns):\n upstream_selector = _combine_selectors(self.parents)\n upstream_selector += _combine_selectors(self.dependencies)\n\n if upstream_selector.names:\n self.selector = upstream_selector\n\n # For addition nodes, some of the operands are parents and\n # others are dependencies so grab schemas from both\n upstream_schema = root_schema + _combine_schemas(self.parents_with_dep_nodes)\n self.input_schema = upstream_schema.apply(self.selector)\n\n # If we're a subtraction node, we have to do some gymnastics to compute\n # the schema, because operands may be in the parents or the dependencies\n # or both\n elif isinstance(self.op, SubsetColumns):\n operands = self.parents + self.dependencies\n left_operand = operands.pop(0)\n\n left_operand_schema = _combine_schemas([left_operand])\n\n operands_schema = _combine_schemas(operands)\n\n self.input_schema = left_operand_schema - operands_schema\n\n # If none of the above apply, then we don't have a selector\n # and we're not an add or sub node, so our input is just the\n # parents output\n else:\n self.input_schema = _combine_schemas(self.parents)\n\n # Then we delegate to the op (if there is one) to compute this node's\n # output schema. If there's no op, then outputs are just the inputs\n if self.op:\n self.output_schema = self.op.compute_output_schema(self.input_schema, self.selector)\n else:\n self.output_schema = self.input_schema\n\n def __rshift__(self, operator):\n \"\"\"Transforms this WorkflowNode by applying an Operator\n\n Parameters\n -----------\n operators: Operator or callable\n\n Returns\n -------\n WorkflowNode\n \"\"\"\n if isinstance(operator, type) and issubclass(operator, Operator):\n # handle case where an operator class is passed\n operator = operator()\n elif callable(operator):\n # implicit lambdaop conversion.\n operator = LambdaOp(operator)\n\n if not isinstance(operator, Operator):\n raise ValueError(f\"Expected operator or callable, got {operator.__class__}\")\n\n child = WorkflowNode()\n child.parents = [self]\n self.children.append(child)\n child.op = operator\n\n dependencies = operator.dependencies()\n\n if dependencies:\n if not isinstance(dependencies, collections.abc.Sequence):\n dependencies = [dependencies]\n\n for dependency in dependencies:\n if isinstance(dependency, WorkflowNode):\n dependency.children.append(child)\n child.parents.append(dependency)\n elif not isinstance(dependency, ColumnSelector):\n dependency = ColumnSelector(dependency)\n child.dependencies.append(dependency)\n\n return child\n\n def __add__(self, other):\n \"\"\"Adds columns from this WorkflowNode with another to return a new WorkflowNode\n\n Parameters\n -----------\n other: WorkflowNode or str or list of str\n\n Returns\n -------\n WorkflowNode\n \"\"\"\n if isinstance(self.op, internal.ConcatColumns):\n child = self\n else:\n # Create a child node\n child = WorkflowNode()\n child.op = internal.ConcatColumns(label=\"+\")\n\n # Add self as a parent\n self.children.append(child)\n child.parents.append(self)\n\n # The right operand becomes a dependency\n if isinstance(other, list):\n other = _strs_to_selectors(other)\n elif not isinstance(other, (ColumnSelector, WorkflowNode)):\n other = ColumnSelector(other)\n\n # If the other node is a `+` node, we want to collapse it into this `+` node to\n # avoid creating a cascade of repeated `+`s that we'd need to optimize out by\n # re-combining them later in order to clean up the graph\n if isinstance(other, WorkflowNode) and isinstance(other.op, internal.ConcatColumns):\n child.dependencies += other.parents + other.dependencies\n else:\n child.dependencies.append(other)\n\n return child\n\n # handle the \"column_name\" + WorkflowNode case\n __radd__ = __add__\n\n def __sub__(self, other):\n \"\"\"Removes columns from this WorkflowNode with another to return a new WorkflowNode\n\n Parameters\n -----------\n other: WorkflowNode or str or list of str\n Columns to remove\n\n Returns\n -------\n WorkflowNode\n \"\"\"\n\n if isinstance(self.op, internal.SubsetColumns):\n child = self\n else:\n # Create a child node\n child = WorkflowNode()\n child.op = internal.SubsetColumns(label=\"-\")\n\n # Add self as a parent\n self.children.append(child)\n child.parents.append(self)\n\n # The right operand becomes a dependency\n if not isinstance(other, (ColumnSelector, WorkflowNode)):\n other = ColumnSelector(other)\n\n child.dependencies.append(other)\n\n return child\n\n def __rsub__(self, other):\n # Create a child node\n child = WorkflowNode()\n child.op = internal.SubsetColumns(label=\"-\")\n\n # The left operand becomes a dependency\n if not isinstance(other, (ColumnSelector, WorkflowNode)):\n other = ColumnSelector(other)\n\n # Add self as a dependency\n child.dependencies.append(other)\n child.dependencies.append(self)\n\n return child\n\n def __getitem__(self, columns):\n \"\"\"Selects certain columns from this WorkflowNode, and returns a new Columngroup with only\n those columns\n\n Parameters\n -----------\n columns: str or list of str\n Columns to select\n\n Returns\n -------\n WorkflowNode\n \"\"\"\n col_selector = ColumnSelector(columns)\n child = WorkflowNode(col_selector)\n child.parents = [self]\n self.children.append(child)\n child.op = internal.SubsetColumns(label=str(list(columns)))\n return child\n\n def __repr__(self):\n output = \" output\" if not self.children else \"\"\n return f\"\"\n\n @property\n def parents_with_dep_nodes(self):\n return self.parents + self.dependency_nodes\n\n @property\n def input_columns(self):\n if self.input_schema is None:\n raise RuntimeError(\n \"The input columns aren't computed until the workflow \"\n \"is fit to a dataset or input schema.\"\n )\n\n if self.selector:\n # To maintain column groupings\n return self.selector\n else:\n return ColumnSelector(self.input_schema.column_names)\n\n @property\n def output_columns(self):\n if self.output_schema is None:\n raise RuntimeError(\n \"The output columns aren't computed until the workflow \"\n \"is fit to a dataset or input schema.\"\n )\n\n return ColumnSelector(self.output_schema.column_names)\n\n @property\n def dependency_schema(self):\n return _combine_schemas(self.dependencies)\n\n @property\n def dependency_columns(self):\n return _combine_selectors(self.dependency_selectors)\n\n @property\n def dependency_nodes(self):\n return _filter_by_type(self.dependencies, WorkflowNode)\n\n @property\n def dependency_selectors(self):\n return _filter_by_type(self.dependencies, ColumnSelector)\n\n @property\n def label(self):\n if self.op and hasattr(self.op, \"label\"):\n return self.op.label\n elif self.op:\n return str(type(self.op))\n elif not self.parents:\n return f\"input cols=[{self._cols_repr}]\"\n else:\n return \"??\"\n\n @property\n def _cols_repr(self):\n if self.input_schema:\n columns = self.input_schema.column_names\n elif self.selector:\n columns = self.selector.names\n else:\n columns = []\n\n cols_repr = \", \".join(map(str, columns[:3]))\n if len(columns) > 3:\n cols_repr += \"...\"\n\n return cols_repr\n\n @property\n def graph(self):\n return _to_graphviz(self)\n\n\ndef iter_nodes(nodes):\n queue = nodes[:]\n while queue:\n current = queue.pop()\n yield current\n # TODO: deduplicate nodes?\n for parent in current.parents:\n queue.append(parent)\n\n for dep in current.dependency_nodes:\n queue.append(dep)\n\n\ndef _filter_by_type(elements, type_):\n results = []\n\n for elem in elements:\n if isinstance(elem, type_):\n results.append(elem)\n elif isinstance(elem, list):\n results += _filter_by_type(elem, type_)\n\n return results\n\n\ndef _combine_schemas(elements):\n combined = Schema()\n for elem in elements:\n if isinstance(elem, WorkflowNode):\n combined += elem.output_schema\n elif isinstance(elem, ColumnSelector):\n combined += Schema(elem.names)\n elif isinstance(elem, list):\n combined += _combine_schemas(elem)\n return combined\n\n\ndef _combine_selectors(elements):\n combined = ColumnSelector()\n for elem in elements:\n if isinstance(elem, WorkflowNode):\n combined += ColumnSelector(elem.output_schema.column_names)\n elif isinstance(elem, ColumnSelector):\n combined += elem\n elif isinstance(elem, list):\n combined += ColumnSelector(subgroups=_combine_selectors(elem))\n return combined\n\n\ndef _to_selector(value):\n if not isinstance(value, (ColumnSelector, WorkflowNode)):\n return ColumnSelector(value)\n else:\n return value\n\n\ndef _strs_to_selectors(elements):\n return [_to_selector(elem) for elem in elements]\n\n\ndef _to_graphviz(workflow_node):\n \"\"\"Converts a WorkflowNode to a GraphViz DiGraph object useful for display in notebooks\"\"\"\n from graphviz import Digraph\n\n graph = Digraph()\n\n # get all the nodes from parents of this columngroup\n # and add edges between each of them\n allnodes = list(set(iter_nodes([workflow_node])))\n node_ids = {v: str(k) for k, v in enumerate(allnodes)}\n for node, nodeid in node_ids.items():\n graph.node(nodeid, node.label)\n for parent in node.parents_with_dep_nodes:\n graph.edge(node_ids[parent], nodeid)\n\n full_selector = ColumnSelector()\n\n if node.selector and not node.parents:\n full_selector += node.selector\n full_selector += sum(node.dependency_selectors, full_selector)\n\n if full_selector.names:\n selector_id = f\"{nodeid}_selector\"\n graph.node(selector_id, str(full_selector.names))\n graph.edge(selector_id, nodeid)\n\n # add a single 'output' node representing the final state\n output_node_id = str(len(allnodes))\n output_string = \"output cols\"\n if workflow_node._cols_repr:\n output_string += f\"=[{workflow_node._cols_repr}]\"\n graph.node(output_node_id, output_string)\n graph.edge(node_ids[workflow_node], output_node_id)\n return graph\n\n\ndef _convert_col(col):\n if isinstance(col, (str, tuple)):\n return col\n elif isinstance(col, list):\n return tuple(col)\n else:\n raise ValueError(f\"Invalid column value for WorkflowNode: {col}\")\n", "sub_path": "nvtabular/workflow/node.py", "file_name": "node.py", "file_ext": "py", "file_size_in_byte": 14972, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "warnings.warn", "line_number": 47, "usage_type": "call"}, {"api_name": "nvtabular.columns.ColumnSelector", "line_number": 52, "usage_type": "call"}, {"api_name": "nvtabular.columns.ColumnSelector", "line_number": 54, "usage_type": "argument"}, {"api_name": "nvtabular.columns.ColumnSelector", "line_number": 66, "usage_type": "call"}, {"api_name": "nvtabular.ops.internal.ConcatColumns", "line_number": 76, "usage_type": "attribute"}, {"api_name": "nvtabular.ops.internal", "line_number": 76, "usage_type": "name"}, {"api_name": "nvtabular.ops.internal.concat_columns.ConcatColumns", "line_number": 89, "usage_type": "argument"}, {"api_name": "nvtabular.ops.internal.subset_columns.SubsetColumns", "line_number": 104, "usage_type": "argument"}, {"api_name": "nvtabular.ops.Operator", "line_number": 138, "usage_type": "argument"}, {"api_name": "nvtabular.ops.LambdaOp", "line_number": 143, "usage_type": "call"}, {"api_name": "nvtabular.ops.Operator", "line_number": 145, "usage_type": "argument"}, {"api_name": "collections.abc.abc", "line_number": 156, "usage_type": "attribute"}, {"api_name": "collections.abc", "line_number": 156, "usage_type": "name"}, {"api_name": "nvtabular.columns.ColumnSelector", "line_number": 163, "usage_type": "argument"}, {"api_name": "nvtabular.columns.ColumnSelector", "line_number": 164, "usage_type": "call"}, {"api_name": "nvtabular.ops.internal.ConcatColumns", "line_number": 180, "usage_type": "attribute"}, {"api_name": "nvtabular.ops.internal", "line_number": 180, "usage_type": "name"}, {"api_name": "nvtabular.ops.internal.ConcatColumns", "line_number": 185, "usage_type": "call"}, {"api_name": "nvtabular.ops.internal", "line_number": 185, "usage_type": "name"}, {"api_name": "nvtabular.columns.ColumnSelector", "line_number": 194, "usage_type": "name"}, {"api_name": "nvtabular.columns.ColumnSelector", "line_number": 195, "usage_type": "call"}, {"api_name": "nvtabular.ops.internal.ConcatColumns", "line_number": 200, "usage_type": "attribute"}, {"api_name": "nvtabular.ops.internal", "line_number": 200, "usage_type": "name"}, {"api_name": "nvtabular.ops.internal.SubsetColumns", "line_number": 223, "usage_type": "attribute"}, {"api_name": "nvtabular.ops.internal", "line_number": 223, "usage_type": "name"}, {"api_name": "nvtabular.ops.internal.SubsetColumns", "line_number": 228, "usage_type": "call"}, {"api_name": "nvtabular.ops.internal", "line_number": 228, "usage_type": "name"}, {"api_name": "nvtabular.columns.ColumnSelector", "line_number": 235, "usage_type": "name"}, {"api_name": "nvtabular.columns.ColumnSelector", "line_number": 236, "usage_type": "call"}, {"api_name": "nvtabular.ops.internal.SubsetColumns", "line_number": 245, "usage_type": "call"}, {"api_name": "nvtabular.ops.internal", "line_number": 245, "usage_type": "name"}, {"api_name": "nvtabular.columns.ColumnSelector", "line_number": 248, "usage_type": "name"}, {"api_name": "nvtabular.columns.ColumnSelector", "line_number": 249, "usage_type": "call"}, {"api_name": "nvtabular.columns.ColumnSelector", "line_number": 270, "usage_type": "call"}, {"api_name": "nvtabular.ops.internal.SubsetColumns", "line_number": 274, "usage_type": "call"}, {"api_name": "nvtabular.ops.internal", "line_number": 274, "usage_type": "name"}, {"api_name": "nvtabular.columns.ColumnSelector", "line_number": 297, "usage_type": "call"}, {"api_name": "nvtabular.columns.ColumnSelector", "line_number": 307, "usage_type": "call"}, {"api_name": "nvtabular.columns.ColumnSelector", "line_number": 323, "usage_type": "argument"}, {"api_name": "nvtabular.columns.Schema", "line_number": 382, "usage_type": "call"}, {"api_name": "nvtabular.columns.ColumnSelector", "line_number": 386, "usage_type": "argument"}, {"api_name": "nvtabular.columns.Schema", "line_number": 387, "usage_type": "call"}, {"api_name": "nvtabular.columns.ColumnSelector", "line_number": 394, "usage_type": "call"}, {"api_name": "nvtabular.columns.ColumnSelector", "line_number": 397, "usage_type": "call"}, {"api_name": "nvtabular.columns.ColumnSelector", "line_number": 398, "usage_type": "argument"}, {"api_name": "nvtabular.columns.ColumnSelector", "line_number": 401, "usage_type": "call"}, {"api_name": "nvtabular.columns.ColumnSelector", "line_number": 406, "usage_type": "name"}, {"api_name": "nvtabular.columns.ColumnSelector", "line_number": 407, "usage_type": "call"}, {"api_name": "graphviz.Digraph", "line_number": 420, "usage_type": "call"}, {"api_name": "nvtabular.columns.ColumnSelector", "line_number": 431, "usage_type": "call"}]} +{"seq_id": "513359728", "text": "# Copyright © 2014 Zack Weinberg\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# There is NO WARRANTY.\n\n\"\"\"Merge several URL databases into one. The -d option specifies the\n destination database (which should not yet exist); give source\n databases as non-option arguments.\"\"\"\n\ndef setup_argp(ap):\n ap.add_argument(\"sources\", metavar=\"SOURCE\", nargs=\"+\",\n help=\"Source databases to merge into the database \"\n \"specified with the -d option.\")\n\ndef run(args):\n merger = Merger(args)\n Monitor(merger, banner=\"Merging databases\")\n merger.dump()\n\nfrom collections import defaultdict\nfrom shared import url_database\nfrom shared.monitor import Monitor\n\nclass argshim:\n def __init__(self, db):\n self.database = db\n\nclass Merger:\n def __init__(self, args):\n self.args = args\n self.tables_processed = set()\n\n def dump(self):\n pass\n\n def __call__(self, mon, thr):\n self.mon = mon\n\n srcdbs = { src: url_database.reconnect_to_database(argshim(src))\n for src in self.args.sources }\n\n destdb = url_database.ensure_database(self.args)\n\n self.uidmap = self.merge_url_strings(destdb, srcdbs)\n self.oidmap = self.merge_origins(destdb, srcdbs)\n self.cidmap = self.merge_canon_statuses(destdb, srcdbs)\n\n self.merge_urls(destdb, srcdbs)\n self.merge_canon_urls(destdb, srcdbs)\n self.merge_anomalies(destdb, srcdbs)\n self.merge_ancillary(destdb, srcdbs)\n\n def merge_url_strings(self, destdb, srcdbs):\n # It is expected to be more efficient to merge the strings in\n # memory.\n stringmap = defaultdict(dict)\n uidmap = {}\n\n for tag, sdb in srcdbs.items():\n self.mon.report_status(\"Reading URL strings ({})...\"\n .format(tag))\n self.mon.maybe_pause_or_stop()\n scur = sdb.cursor()\n n = 0\n for row in url_database.fetch_iter(\n scur.execute(\"SELECT * FROM url_strings\")):\n stringmap[row['url']][tag] = row['id']\n n = max(n, row['id'])\n self.mon.report_status(\"Reading URL strings ({})... {}\"\n .format(tag, n))\n self.mon.maybe_pause_or_stop()\n uidmap[tag] = [None]*(n+1)\n\n self.mon.report_status(\"Sorting URL strings...\")\n self.mon.maybe_pause_or_stop()\n merged = sorted(stringmap.keys())\n self.mon.report_status(\"Writing URL strings...\")\n self.mon.maybe_pause_or_stop()\n destdb.executemany(\"INSERT INTO url_strings VALUES(?,?)\",\n enumerate(merged, 1))\n self.mon.report_status(\"Writing URL strings (commit)...\")\n self.mon.maybe_pause_or_stop()\n destdb.commit()\n self.mon.report_status(\"Constructing uidmap...\")\n self.mon.maybe_pause_or_stop()\n for uid, url in enumerate(merged, 1):\n smap = stringmap[url]\n self.mon.report_status(\"Constructing uidmap... {}\".format(uid))\n self.mon.maybe_pause_or_stop()\n for tag in srcdbs.keys():\n sid = smap.get(tag, None)\n if sid is not None:\n uidmap[tag][sid] = uid\n\n self.tables_processed.add(\"url_strings\")\n return uidmap\n\n def merge_origins(self, destdb, srcdbs):\n # The logic here is essentially the same as for URL strings,\n # but the origins lists are much, much shorter.\n\n stringmap = defaultdict(dict)\n oidmap = {}\n\n for tag, sdb in srcdbs.items():\n self.mon.report_status(\"Reading origin list ({})...\"\n .format(tag))\n scur = sdb.cursor()\n n = 0\n for row in url_database.fetch_iter(\n scur.execute(\"SELECT * FROM origins\")):\n stringmap[row['label']][tag] = row['id']\n n += 1\n self.mon.report_status(\"Reading origin list ({})... {}\"\n .format(tag, n))\n oidmap[tag] = [None]*(n+1)\n\n self.mon.report_status(\"Sorting origin list...\")\n merged = sorted(stringmap.keys())\n self.mon.report_status(\"Writing origin list...\")\n destdb.executemany(\"INSERT INTO origins VALUES(?,?)\",\n enumerate(merged, 1))\n self.mon.report_status(\"Writing origins (commit)...\")\n destdb.commit()\n self.mon.report_status(\"Constructing oidmap...\")\n for oid, label in enumerate(merged, 1):\n smap = stringmap[label]\n for tag in srcdbs.keys():\n sid = smap.get(tag, None)\n if sid is not None:\n oidmap[tag][sid] = oid\n\n self.tables_processed.add(\"origins\")\n return oidmap\n\n def merge_canon_statuses(self, destdb, srcdbs):\n # Similarly.\n\n stringmap = defaultdict(dict)\n cidmap = {}\n\n for tag, sdb in srcdbs.items():\n self.mon.report_status(\"Reading canon status list ({})...\"\n .format(tag))\n scur = sdb.cursor()\n n = 0\n for row in url_database.fetch_iter(\n scur.execute(\"SELECT * FROM canon_statuses\")):\n stringmap[row['label']][tag] = row['id']\n n += 1\n self.mon.report_status(\"Reading canon_status list ({})... {}\"\n .format(tag, n))\n cidmap[tag] = [None]*(n+1)\n\n self.mon.report_status(\"Sorting canon status list...\")\n merged = sorted(stringmap.keys())\n self.mon.report_status(\"Writing canon status list...\")\n destdb.executemany(\"INSERT INTO canon_statuses VALUES(?,?)\",\n enumerate(merged, 1))\n self.mon.report_status(\"Writing canon_statuses (commit)...\")\n destdb.commit()\n self.mon.report_status(\"Constructing cidmap...\")\n for oid, label in enumerate(merged, 1):\n smap = stringmap[label]\n for tag in srcdbs.keys():\n sid = smap.get(tag, None)\n if sid is not None:\n cidmap[tag][sid] = oid\n\n self.tables_processed.add(\"canon_statuses\")\n return cidmap\n\n def merge_urls(self, destdb, srcdbs):\n\n writer = destdb.cursor()\n write_batch = []\n\n for tag, sdb in srcdbs.items():\n self.mon.report_status(\"Merging URLs ({})...\"\n .format(tag))\n self.mon.maybe_pause_or_stop()\n\n scur = sdb.cursor()\n omap = self.oidmap[tag]\n umap = self.uidmap[tag]\n for row in url_database.fetch_iter(\n scur.execute(\"SELECT * FROM urls\")):\n m_origin = omap[row['origin']]\n m_origin_id = row['origin_id']\n m_url = umap[row['url']]\n write_batch.append((m_origin, m_origin_id, m_url))\n self.mon.report_status(\"Merging URLs ({})... {}.{}\"\n .format(tag, m_origin, m_origin_id))\n self.mon.maybe_pause_or_stop()\n\n if len(write_batch) > 10000:\n self.mon.report_status(\"Merging URLs ({})... writing\"\n .format(tag))\n writer.executemany(\"INSERT INTO urls VALUES(?,?,?)\",\n write_batch)\n write_batch = []\n destdb.commit()\n self.mon.maybe_pause_or_stop()\n\n self.mon.report_status(\"Merging URLs ({})... writing\"\n .format(tag))\n writer.executemany(\"INSERT INTO urls VALUES(?,?,?)\",\n write_batch)\n destdb.commit()\n self.tables_processed.add(\"urls\")\n\n def merge_canon_urls(self, destdb, srcdbs):\n writer = destdb.cursor()\n write_batch = []\n\n for tag, sdb in srcdbs.items():\n self.mon.report_status(\"Merging canon URLs ({})...\"\n .format(tag))\n self.mon.maybe_pause_or_stop()\n\n scur = sdb.cursor()\n umap = self.uidmap[tag]\n cmap = self.cidmap[tag]\n for row in url_database.fetch_iter(\n scur.execute(\"SELECT * FROM canon_urls\")):\n m_url = umap[row['url']]\n m_canon = umap[row['canon']]\n m_status = cmap[row['status']]\n write_batch.append((m_url, m_canon, m_status))\n self.mon.report_status(\"Merging canon URLs ({})... {}.{}\"\n .format(tag, m_url))\n self.mon.maybe_pause_or_stop()\n\n if len(write_batch) > 10000:\n self.mon.report_status(\"Merging canon URLs ({})... writing\"\n .format(tag))\n writer.executemany(\"INSERT INTO canon_urls VALUES(?,?,?)\",\n write_batch)\n write_batch = []\n destdb.commit()\n self.mon.maybe_pause_or_stop()\n\n self.mon.report_status(\"Merging canon URLs ({})... writing\"\n .format(tag))\n writer.executemany(\"INSERT INTO canon_urls VALUES(?,?,?)\",\n write_batch)\n destdb.commit()\n self.tables_processed.add(\"canon_urls\")\n\n def merge_anomalies(self, destdb, srcdbs):\n writer = destdb.cursor()\n write_batch = []\n\n for tag, sdb in srcdbs.items():\n self.mon.report_status(\"Merging anomalies ({})...\"\n .format(tag))\n self.mon.maybe_pause_or_stop()\n\n scur = sdb.cursor()\n umap = self.uidmap[tag]\n cmap = self.cidmap[tag]\n for row in url_database.fetch_iter(\n scur.execute(\"SELECT * FROM anomalies\")):\n m_url = umap[row['url']]\n m_status = cmap[row['status']]\n write_batch.append((m_url, m_status, row['response']))\n self.mon.report_status(\"Merging anomalies ({})... {}.{}\"\n .format(tag, m_url))\n self.mon.maybe_pause_or_stop()\n\n if len(write_batch) > 10000:\n self.mon.report_status(\"Merging anomalies ({})... writing\"\n .format(tag))\n writer.executemany(\"INSERT INTO anomalies VALUES(?,?,?)\",\n write_batch)\n write_batch = []\n destdb.commit()\n self.mon.maybe_pause_or_stop()\n\n self.mon.report_status(\"Merging anomalies ({})... writing\"\n .format(tag))\n writer.executemany(\"INSERT INTO anomalies VALUES(?,?,?)\",\n write_batch)\n destdb.commit()\n self.tables_processed.add(\"anomalies\")\n\n def merge_ancillary(self, destdb, srcdbs):\n # The ancillary table merge currently assumes that we don't\n # need to do any ID fixups. This is accurate for the present\n # data set just because each database-to-be-merged has only one\n # origin in it, but may become a problem later.\n\n writer = destdb.cursor()\n already_created = set(row[0] for row in writer.execute(\n \"SELECT name FROM sqlite_master WHERE name NOT LIKE 'sqlite_%'\"\n ).fetchall())\n\n for tag, sdb in srcdbs.items():\n self.mon.report_status(\"Merging ancillary tables ({})...\"\n .format(tag))\n self.mon.maybe_pause_or_stop()\n scur = sdb.cursor()\n for name, sql in scur.execute(\n \"SELECT name, sql FROM sqlite_master \"\n \"WHERE type = 'table' \"\n \"AND name NOT LIKE 'sqlite_%'\").fetchall():\n if name in self.tables_processed:\n continue\n\n self.mon.report_status(\"Merging ancillary tables ({})... {}\"\n .format(tag, name))\n self.mon.maybe_pause_or_stop()\n\n if name not in already_created:\n writer.executescript(sql)\n for isql in scur.execute(\n \"SELECT sql FROM sqlite_master \"\n \"WHERE type = 'index' \"\n \"AND tbl_name = ?\", (name,)).fetchall():\n writer.executescript(isql[0])\n already_created.add(name)\n\n # This is the least bad available way to find out how\n # many columns the table has. It doesn't work if the\n # table is empty, but if the table is empty, we don't\n # need to do anything.\n row = scur.execute(\n \"SELECT * FROM \\\"\"+name+\"\\\" LIMIT 1\").fetchone()\n if row is None:\n continue\n\n cols = len(row)\n insertion = (\"INSERT INTO \\\"\"+name+\"\\\" VALUES(\" +\n \",\".join([\"?\"]*cols) + \")\")\n selection = \"SELECT * FROM \\\"\"+name+\"\\\"\"\n # sqlite3.Row objects cannot be passed directly to execute().\n # Feh. Feh, I say.\n for row in url_database.fetch_iter(scur.execute(selection)):\n writer.execute(insertion, tuple(row))\n destdb.commit()\n", "sub_path": "lib/url_sources/s_merge.py", "file_name": "s_merge.py", "file_ext": "py", "file_size_in_byte": 13903, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "shared.url_database.reconnect_to_database", "line_number": 41, "usage_type": "call"}, {"api_name": "shared.url_database", "line_number": 41, "usage_type": "name"}, {"api_name": "shared.url_database.ensure_database", "line_number": 44, "usage_type": "call"}, {"api_name": "shared.url_database", "line_number": 44, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 58, "usage_type": "call"}, {"api_name": "shared.url_database.fetch_iter", "line_number": 67, "usage_type": "call"}, {"api_name": "shared.url_database", "line_number": 67, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 104, "usage_type": "call"}, {"api_name": "shared.url_database.fetch_iter", "line_number": 112, "usage_type": "call"}, {"api_name": "shared.url_database", "line_number": 112, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 141, "usage_type": "call"}, {"api_name": "shared.url_database.fetch_iter", "line_number": 149, "usage_type": "call"}, {"api_name": "shared.url_database", "line_number": 149, "usage_type": "name"}, {"api_name": "shared.url_database.fetch_iter", "line_number": 188, "usage_type": "call"}, {"api_name": "shared.url_database", "line_number": 188, "usage_type": "name"}, {"api_name": "shared.url_database.fetch_iter", "line_number": 226, "usage_type": "call"}, {"api_name": "shared.url_database", "line_number": 226, "usage_type": "name"}, {"api_name": "shared.url_database.fetch_iter", "line_number": 264, "usage_type": "call"}, {"api_name": "shared.url_database", "line_number": 264, "usage_type": "name"}, {"api_name": "shared.url_database.fetch_iter", "line_number": 340, "usage_type": "call"}, {"api_name": "shared.url_database", "line_number": 340, "usage_type": "name"}]} +{"seq_id": "452503078", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import stats\n\nblack = (0, 0, 0)\nblue = (0.149, 0.365, 0.670)\norange = (0.875, 0.361, 0.141)\ngreen = (0.20, 0.592, 0.282)\npink = (0.898, 0.071, 0.435)\nbrown = (0.616, 0.447, 0.165)\npurple = (0.533, 0.337, 0.654)\nyellow = (0.780, 0.706, 0.180)\nred = (0.796, 0.125, 0.152)\ngrey = (0.827, 0.827, 0.827)\ndarkgrey = (0.745, 0.745, 0.745)\n\n\ndef export_plot(figure, directory, tight=False):\n print(\"exported an image to: \", directory)\n if tight:\n return figure.savefig(directory, dpi=600, bbox_inches='tight')\n else:\n return figure.savefig(directory, dpi=600)\n\n\ndef plot_legend(ax1, ax2=None, ax3=None, figsize=None):\n \"\"\"plot the legend in a separate figure. axes should be an iterable, also if there is only one\"\"\"\n if not figsize:\n figsize = [1.5, 1]\n fig_legend, axi = plt.subplots(figsize=(figsize[0], figsize[1]))\n # get the handles and labels. If there are more axes, create one list with all the handles and labels\n handles, labels = ax1.get_legend_handles_labels()\n if ax2:\n handles_ax2, labels_ax2 = ax2.get_legend_handles_labels()\n handles = handles + handles_ax2\n labels = labels + labels_ax2\n if ax3:\n handles_ax3, labels_ax3 = ax3.get_legend_handles_labels()\n handles = handles + handles_ax3\n labels = labels + labels_ax3\n axi.legend(handles, labels, loc='center')\n # remove x axis\n axi.xaxis.set_visible(False)\n # remove y axes\n axi.yaxis.set_visible(False)\n # remove spines\n axi.spines['right'].set_visible(False)\n axi.spines['left'].set_visible(False)\n axi.spines['top'].set_visible(False)\n axi.spines['bottom'].set_visible(False)\n # fig_legend.tight_layout()\n return fig_legend\n\n\ndef linear_regression(x, y):\n mask = ~np.isnan(x) & ~np.isnan(y) # create mask to remove NANs\n slope, intercept, r_value, p_value, std_err = stats.linregress(x[mask], y[mask])\n return slope, intercept, r_value ** 2\n\n\ndef add_trendline(x, y, ax):\n slope, intercept, r_squared = linear_regression(x, y)\n print(f'slope: {slope}, intercept:{intercept}, r squared: {r_squared}')\n ax.plot(x, x * slope + intercept,\n c=black,\n linewidth=0.5)\n return ax\n\n\ndef data_for_trendline(df, x, y, outliers = None):\n \"\"\"outliers should be list of two items: first column name and then list of values to remove\"\"\"\n df = df.dropna(subset=[y])\n if outliers:\n df = df[~df[outliers[0]].isin(outliers[1])]\n return df[x].values, df[y].values", "sub_path": "scripts/plot_methods.py", "file_name": "plot_methods.py", "file_ext": "py", "file_size_in_byte": 2561, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "matplotlib.pyplot.subplots", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "numpy.isnan", "line_number": 56, "usage_type": "call"}, {"api_name": "scipy.stats.linregress", "line_number": 57, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "197998552", "text": " # -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Dec 10 15:11:11 2018\r\n\r\n@author: ds-02\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nhochR=[]\r\nhochC=[]\r\nhochH=[]\r\nmaxR=[]\r\nmax05=[]\r\nbegin=[]\r\nend=[]\r\n\r\nfor x in range(1,6):\r\n hochR.append(np.load(\"hochR\"+str(x)+\".npy\"))\r\n hochH.append(np.load(\"hochH\"+str(x)+\".npy\"))\r\n hochC.append(np.load(\"hochC\"+str(x)+\".npy\"))\r\nfor i in range(5): \r\n maxR.append(np.max(hochR[i][10000:200000,]))\r\n max05.append(maxR[i]*0.05)\r\n \r\nfor j in range(5):\r\n for z in range(5000,50000):\r\n if(hochR[j][z] >= max05[j]):\r\n begin.append(z)\r\n end.append(z+44100)\r\n break\r\nplt.plot(hochR[2])\r\n\r\nprint(begin)\r\nprint(end)\r\n", "sub_path": "Versuch4/V4a.py", "file_name": "V4a.py", "file_ext": "py", "file_size_in_byte": 720, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "numpy.load", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "607464511", "text": "from bottle import run, get , post, request , delete\r\n\r\nanimals = [{'name' : 'Ellie', 'type' :'Elephant'},\r\n {'name' : 'Python', 'type' : 'Snake'},\r\n {'name' : 'Zed', 'type' : 'Zebra'}]\r\n\r\n@get('/animal')\r\ndef getAll():\r\n return {'animals' : animals}\r\n\r\n@get('/animal/')\r\ndef getOne(name):\r\n the_animal = [animal for animal in animals if animal['name'] == name]\r\n return {'animal' : the_animal[0]}\r\n\r\n@post('/animal')\r\ndef addOne():\r\n new_animal = {'name' : request.json.get('name'), 'type' : request.json.get('type')}\r\n animals.append(new_animal)\r\n return {'animals' : animals}\r\n\r\n@post('/age')\r\ndef age():\r\n name = dict()\r\n age_calc = 0\r\n name.add = {'name': request.json.get('name'), 'yob': request.json.get('yob')}\r\n age_calc = 2018-int(name['yob'])\r\n return {'name': name['name'], 'age': age_calc}\r\n\r\n\r\n@delete('/animal/')\r\ndef removeOne(name):\r\n the_animal = [animal for animal in animals if animal['name'] == name]\r\n animals.remove(the_animal[0])\r\n return {'animals' : animals}\r\n\r\nrun(reloader=True, debug=True)", "sub_path": "zoo.py", "file_name": "zoo.py", "file_ext": "py", "file_size_in_byte": 1092, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "bottle.get", "line_number": 7, "usage_type": "call"}, {"api_name": "bottle.get", "line_number": 11, "usage_type": "call"}, {"api_name": "bottle.request.json.get", "line_number": 18, "usage_type": "call"}, {"api_name": "bottle.request.json", "line_number": 18, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 18, "usage_type": "name"}, {"api_name": "bottle.post", "line_number": 16, "usage_type": "call"}, {"api_name": "bottle.request.json.get", "line_number": 26, "usage_type": "call"}, {"api_name": "bottle.request.json", "line_number": 26, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 26, "usage_type": "name"}, {"api_name": "bottle.post", "line_number": 22, "usage_type": "call"}, {"api_name": "bottle.delete", "line_number": 31, "usage_type": "call"}, {"api_name": "bottle.run", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "237621914", "text": "from rest_framework.authtoken.models import Token\n\n\ndef user_construct(user):\n fields = ['id', 'email', 'first_name', 'last_name', 'date_joined', 'avatar']\n token, created = Token.objects.get_or_create(user=user)\n\n return {\n 'user': {field: getattr(user, field) for field in fields},\n 'token': token.key\n }\n", "sub_path": "api/chat/helpers.py", "file_name": "helpers.py", "file_ext": "py", "file_size_in_byte": 333, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "rest_framework.authtoken.models.Token.objects.get_or_create", "line_number": 6, "usage_type": "call"}, {"api_name": "rest_framework.authtoken.models.Token.objects", "line_number": 6, "usage_type": "attribute"}, {"api_name": "rest_framework.authtoken.models.Token", "line_number": 6, "usage_type": "name"}]} +{"seq_id": "164881595", "text": "import datetime\nimport uuid\n\nimport pytest\n\nfrom lots_admin.models import Application\nfrom lots_client.views import advance_if_ppf_and_eds_submitted\n\n\n@pytest.mark.django_db\n@pytest.mark.parametrize('eds_received,ppf_received', [\n (False, False),\n (True, False),\n (False, True),\n (True, True)\n])\ndef test_advance_to_step_8(django_db_setup,\n eds_received,\n ppf_received):\n\n application = Application.objects.get(applicationstatus__current_step__step=7)\n application.eds_sent = True\n application.save()\n\n setattr(application, 'eds_received', eds_received)\n setattr(application, 'ppf_received', ppf_received)\n\n if all([eds_received, ppf_received]):\n step = 8\n else:\n step = 7\n\n advance_if_ppf_and_eds_submitted(application)\n\n assert application.applicationstatus_set.first().current_step.step == step\n\n@pytest.mark.django_db\ndef test_view_requires_tracking_id(django_db_setup,\n client,\n application):\n\n rv = client.get('/principal-profile-form/')\n\n assert 'Oops!' in str(rv.content)\n\n app = application.build()\n rv = client.get('/principal-profile-form/{}/'.format(app.tracking_id))\n\n assert 'Instructions' in str(rv.content)\n\n@pytest.mark.django_db\n@pytest.mark.parametrize('ppf_type', ['individual', 'organization'])\ndef test_individual_ppf_submission(django_db_setup,\n client,\n application,\n ppf_blob,\n ppf_type):\n\n if ppf_type == 'individual':\n app = application.build()\n data = ppf_blob.build(app)\n\n elif ppf_type == 'organization':\n app = application.build(organization_confirmed=True,\n organization='The Peacock Company')\n\n data = ppf_blob.build(app, home_address_street='456 Feather Lane')\n\n rv = client.post(\n '/principal-profile-form/{}/'.format(app.tracking_id),\n data=data,\n )\n\n assert rv.status_code == 200\n assert 'Success!' in str(rv.content)\n\n app.refresh_from_db()\n assert app.ppf_received == True\n\n principal_profiles = app.principalprofile_set.all()\n assert len(principal_profiles) == 2\n\n primary_ppf = principal_profiles.first()\n\n if ppf_type == 'organization':\n assert primary_ppf.address == '456 Feather Lane, Chicago, IL 60609'\n\n elif ppf_type == 'individual':\n assert primary_ppf.address == '5000 S ELIZABETH ST, Chicago, IL 60609'\n\n related_person = app.relatedperson_set.get()\n related_ppf = principal_profiles.last()\n\n assert 'Petmore Dogs' in str(related_person)\n assert related_ppf.address == '4539 N Paulina, Chicago, IL 60624'\n", "sub_path": "tests/lots_client/test_ppf.py", "file_name": "test_ppf.py", "file_ext": "py", "file_size_in_byte": 2817, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "lots_admin.models.Application.objects.get", "line_number": 21, "usage_type": "call"}, {"api_name": "lots_admin.models.Application.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "lots_admin.models.Application", "line_number": 21, "usage_type": "name"}, {"api_name": "lots_client.views.advance_if_ppf_and_eds_submitted", "line_number": 33, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 11, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 52, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 52, "usage_type": "attribute"}]} +{"seq_id": "73824757", "text": "\"\"\"\n Utilities for writing Python tests with CIJOE\n\"\"\"\nimport sys\nimport os\nimport yaml\nimport cij.board\nimport cij.block\nimport cij.lnvm\nimport cij.util\nimport cij.ssh\nimport cij\n\n\nPASS = 0\nFAIL = 1\nSKIP = 2\nREQS = []\n\n\ndef tindex(spath=None):\n \"\"\"\n Lists tindex in CIJ_TESTCASES\n\n @Returns On success, a list of filenames is returned. On error, None is\n returned\n \"\"\"\n\n spath = spath if spath else os.environ.get(\"CIJ_TESTCASES\", None)\n if spath is None:\n return None\n\n tests = [] # Look for .sh files\n for root, _, files in os.walk(spath):\n if root != spath:\n continue\n\n tests += [f for f in files if f[-3:] in [\".sh\", \".py\"]]\n\n return tests\n\n\ndef envs():\n \"\"\"\n Return variables defined by modules required by test\n \"\"\"\n\n variables = {}\n\n for req in REQS:\n prefix = req.upper()\n variables[prefix] = cij.env_to_dict(\n prefix, getattr(cij, req).REQUIRED + getattr(cij, req).EXPORTED\n )\n\n return variables\n\n\ndef require(req):\n \"\"\"Add test requirement\"\"\"\n\n REQS.append(req)\n\n\ndef enter():\n \"\"\"Enter the test, check requirements and setup aux. environment\"\"\"\n\n if cij.ssh.env():\n tfail(\"cij.test: invalid SSH environment\")\n\n for req in REQS:\n if getattr(cij, req).env():\n tfail()\n\n cij.emph(\"cij.test: entering test\")\n\n\ndef texit(msg=None, rcode=1):\n \"\"\"Exit the test\"\"\"\n\n msg = \", msg: %r\" % msg if msg else \"\"\n\n if rcode:\n cij.err(\"cij.test: FAILED%s\" % msg)\n else:\n cij.good(\"cij.test: PASSED%s\" % msg)\n\n sys.exit(rcode)\n\n\ndef tpass(msg=None):\n \"\"\"Testing: Exit test indicating test passed\"\"\"\n\n texit(msg, 0)\n\n\ndef tfail(msg=None):\n \"\"\"Testing: Exit test indicating test failed\"\"\"\n\n texit(msg, 1)\n\n\ndef command(cmd, ssh=True, shell=True, echo=True):\n \"\"\"\n Execute the given 'cmd'\n\n @returns (rcode, stdout, stderr)\n \"\"\"\n\n if ssh:\n return cij.ssh.command(cmd, shell, echo)\n\n return cij.util.execute(cmd, shell, echo)\n\n\ndef command_to_struct(cmd):\n \"\"\"\n Same as `command` except it tries to convert stdout to struct\n\n @returns (rcode, struct, stderr, struct)\n \"\"\"\n\n struct = None\n\n rcode, stdout, stderr = command(cmd)\n\n try:\n lines = []\n\n for line in stdout.splitlines():\n if line.strip().startswith(\"#\"):\n continue\n\n lines.append(line)\n\n struct = yaml.safe_load(\"\\n\".join(lines))\n except (yaml.YAMLError) as exc:\n cij.err(\"could not parse stdout as yaml, exc: %r\" % exc)\n\n return rcode, stdout, stderr, struct\n", "sub_path": "modules/cij/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 2652, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "os.environ.get", "line_number": 29, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 34, "usage_type": "call"}, {"api_name": "cij.env_to_dict", "line_number": 52, "usage_type": "call"}, {"api_name": "cij.ssh.env", "line_number": 68, "usage_type": "call"}, {"api_name": "cij.ssh", "line_number": 68, "usage_type": "attribute"}, {"api_name": "cij.emph", "line_number": 75, "usage_type": "call"}, {"api_name": "cij.err", "line_number": 84, "usage_type": "call"}, {"api_name": "cij.good", "line_number": 86, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 88, "usage_type": "call"}, {"api_name": "cij.ssh.command", "line_number": 111, "usage_type": "call"}, {"api_name": "cij.ssh", "line_number": 111, "usage_type": "attribute"}, {"api_name": "cij.util.execute", "line_number": 113, "usage_type": "call"}, {"api_name": "cij.util", "line_number": 113, "usage_type": "attribute"}, {"api_name": "yaml.safe_load", "line_number": 136, "usage_type": "call"}, {"api_name": "yaml.YAMLError", "line_number": 137, "usage_type": "attribute"}, {"api_name": "cij.err", "line_number": 138, "usage_type": "call"}]} +{"seq_id": "4343394", "text": "import awkward as ak\n\nfrom coffea import processor, hist\nfrom coffea.nanoevents import NanoEventsFactory, NanoAODSchema\nfrom coffea.analysis_tools import Weights, PackedSelection\n\nimport numpy as np\n\nfrom Tools.objects import *\nfrom Tools.basic_objects import *\nfrom Tools.cutflow import *\nfrom Tools.config_helpers import *\nfrom Tools.triggers import *\nfrom Tools.btag_scalefactors import *\nfrom Tools.ttH_lepton_scalefactors import *\nfrom Tools.helpers import mt, get_four_vec, pad_and_flatten\n\nimport sys\nimport warnings\n\n\nout_dict = {\n 'total': processor.defaultdict_accumulator(int),\n 'presel': processor.defaultdict_accumulator(int),\n 'sel': processor.defaultdict_accumulator(int),\n 'topW_v3': processor.defaultdict_accumulator(int),\n 'topW_v2': processor.defaultdict_accumulator(int),\n 'TTW': processor.defaultdict_accumulator(int),\n 'TTZ': processor.defaultdict_accumulator(int),\n 'TTH': processor.defaultdict_accumulator(int),\n 'rare': processor.defaultdict_accumulator(int),\n 'ttbar': processor.defaultdict_accumulator(int),\n 'ttbar1l_MG': processor.defaultdict_accumulator(int),\n}\n\nvariables = [\n 'mjj_max',\n 'delta_eta_jj',\n 'met',\n 'ht',\n 'st',\n 'n_jet',\n 'n_btag',\n 'n_fwd',\n 'n_central',\n 'n_tau',\n 'n_track',\n 'n_lep_tight',\n 'n_lep',\n\n 'fwd_jet_p',\n 'fwd_jet_pt',\n 'fwd_jet_eta',\n 'fwd_jet_phi',\n 'fwd_jet_energy',\n 'fwd_jet_px',\n 'fwd_jet_py',\n 'fwd_jet_pz',\n\n 'lead_jet_pt',\n 'lead_jet_eta',\n 'lead_jet_phi',\n\n 'sublead_jet_pt',\n 'sublead_jet_eta',\n 'sublead_jet_phi',\n\n 'lead_btag_pt',\n 'lead_btag_eta',\n 'lead_btag_phi',\n 'lead_btag_energy',\n 'lead_btag_px',\n 'lead_btag_py',\n 'lead_btag_pz',\n\n 'sublead_btag_pt',\n 'sublead_btag_eta',\n 'sublead_btag_phi',\n 'sublead_btag_energy',\n 'sublead_btag_px',\n 'sublead_btag_py',\n 'sublead_btag_pz',\n\n 'lead_lep_pt',\n 'lead_lep_eta',\n 'lead_lep_phi',\n 'lead_lep_charge',\n 'lead_lep_energy',\n 'lead_lep_px',\n 'lead_lep_py',\n 'lead_lep_pz',\n\n 'sublead_lep_pt',\n 'sublead_lep_eta',\n 'sublead_lep_phi',\n 'sublead_lep_charge',\n 'sublead_lep_energy',\n 'sublead_lep_px',\n 'sublead_lep_py',\n 'sublead_lep_pz',\n\n 'dilepton_mass',\n 'dilepton_pt',\n 'min_bl_dR',\n 'min_mt_lep_met',\n 'label',\n 'label_cat',\n 'weight',\n]\n\nfor i in range(6):\n for j in ['pt', 'eta', 'phi', 'energy', 'px', 'py', 'pz']:\n variables.append('j%s_%s'%(i, j))\n\nfor var in variables:\n out_dict.update({var: processor.column_accumulator(np.zeros(shape=(0,)))})\n\n\nclass ML_preprocessor(processor.ProcessorABC):\n '''\n e.g. deltaR of leptons, min deltaR of lepton and jet\n\n '''\n def __init__(self, year=2018):\n \n self.year = year\n \n self._accumulator = processor.dict_accumulator( out_dict )\n self.btagSF = btag_scalefactor(year)\n\n self.leptonSF = LeptonSF(year=year)\n\n\n @property\n def accumulator(self):\n return self._accumulator\n\n # we will receive a NanoEvents instead of a coffea DataFrame\n def process(self, events):\n \n output = self.accumulator.identity()\n \n output['total']['all'] += len(events)\n # use a very loose preselection to filter the events\n presel = ak.num(events.Jet)>2\n \n ev = events[presel]\n dataset = ev.metadata['dataset']\n \n # load the config - probably not needed anymore\n cfg = loadConfig()\n \n gen_lep = ev.GenL\n \n ## Muons\n muon = Collections(ev, \"Muon\", \"vetoTTH\").get()\n tightmuon = Collections(ev, \"Muon\", \"tightSSTTH\").get()\n dimuon = choose(muon, 2)\n SSmuon = ak.any((dimuon['0'].charge * dimuon['1'].charge)>0, axis=1)\n leading_muon_idx = ak.singletons(ak.argmax(muon.pt, axis=1))\n leading_muon = muon[leading_muon_idx]\n \n ## Electrons\n electron = Collections(ev, \"Electron\", \"vetoTTH\").get()\n tightelectron = Collections(ev, \"Electron\", \"tightSSTTH\").get()\n dielectron = choose(electron, 2)\n SSelectron = ak.any((dielectron['0'].charge * dielectron['1'].charge)>0, axis=1)\n leading_electron_idx = ak.singletons(ak.argmax(electron.pt, axis=1))\n leading_electron = electron[leading_electron_idx]\n \n ## Merge electrons and muons - this should work better now in ak1\n dilepton = cross(muon, electron)\n SSlepton = ak.any((dilepton['0'].charge * dilepton['1'].charge)>0, axis=1)\n\n lepton = ak.concatenate([muon, electron], axis=1)\n lepton = get_four_vec(lepton)\n leading_lepton_idx = ak.singletons(ak.argmax(lepton.pt, axis=1))\n leading_lepton = get_four_vec(lepton[leading_lepton_idx])\n trailing_lepton_idx = ak.singletons(ak.argmin(lepton.pt, axis=1))\n trailing_lepton = get_four_vec(lepton[trailing_lepton_idx])\n \n dilepton_mass = (leading_lepton+trailing_lepton).mass\n dilepton_pt = (leading_lepton+trailing_lepton).pt\n dilepton_dR = delta_r(leading_lepton, trailing_lepton)\n\n mt_lep_met = mt(lepton.pt, lepton.phi, ev.MET.pt, ev.MET.phi)\n min_mt_lep_met = ak.min(mt_lep_met, axis=1)\n\n ## Jets\n jet = getJets(ev, minPt=25, maxEta=4.7, pt_var='pt_nom')\n jet = jet[ak.argsort(jet.pt_nom, ascending=False)] # need to sort wrt smeared and recorrected jet pt\n jet = jet[~match(jet, muon, deltaRCut=0.4)] # remove jets that overlap with muons\n jet = jet[~match(jet, electron, deltaRCut=0.4)] # remove jets that overlap with electrons\n \n central = jet[(abs(jet.eta)<2.4)]\n btag = getBTagsDeepFlavB(jet, year=self.year) # should study working point for DeepJet\n light = getBTagsDeepFlavB(jet, year=self.year, invert=True)\n fwd = getFwdJet(light)\n fwd_noPU = getFwdJet(light, puId=False)\n fwd_cleaned = fwd[~match(fwd, getFwdJet(jet[:,0:5]), deltaRCut=0.1)] # the leading forward jets that are not in the 5 leading jets overall\n \n tau = getTaus(ev)\n track = getIsoTracks(ev)\n ## forward jets\n j_fwd = fwd[ak.singletons(ak.argmax(fwd.p, axis=1))] # highest momentum spectator\n\n high_score_btag = central[ak.argsort(central.btagDeepFlavB)][:,:2]\n \n bl = cross(lepton, high_score_btag)\n bl_dR = delta_r(bl['0'], bl['1'])\n min_bl_dR = ak.min(bl_dR, axis=1)\n\n jf = cross(j_fwd, jet)\n mjf = (jf['0']+jf['1']).mass\n j_fwd2 = jf[ak.singletons(ak.argmax(mjf, axis=1))]['1'] # this is the jet that forms the largest invariant mass with j_fwd\n delta_eta = ak.fill_none(ak.pad_none(abs(j_fwd2.eta - j_fwd.eta), 1, clip=True), 0)\n\n ## MET -> can switch to puppi MET\n met_pt = ev.MET.pt\n met_phi = ev.MET.phi\n\n ## other variables\n ht = ak.sum(jet.pt, axis=1)\n st = met_pt + ht + ak.sum(muon.pt, axis=1) + ak.sum(electron.pt, axis=1)\n \n ## event selectors\n filters = getFilters(ev, year=self.year, dataset=dataset)\n \n dilep = ((ak.num(tightelectron) + ak.num(tightmuon))==2)\n lep0pt = ((ak.num(electron[(electron.pt>25)]) + ak.num(muon[(muon.pt>25)]))>0)\n lep1pt = ((ak.num(electron[(electron.pt>20)]) + ak.num(muon[(muon.pt>20)]))>1)\n lepveto = ((ak.num(electron) + ak.num(muon))==2)\n \n \n selection = PackedSelection()\n selection.add('lepveto', lepveto)\n selection.add('dilep', dilep )\n selection.add('filter', (filters) )\n selection.add('p_T(lep0)>25', lep0pt )\n selection.add('p_T(lep1)>20', lep1pt )\n selection.add('SS', ( SSlepton | SSelectron | SSmuon) )\n selection.add('N_jet>3', (ak.num(jet)>=4) )\n selection.add('N_central>2', (ak.num(central)>=3) )\n selection.add('N_btag>0', (ak.num(btag)>=1) )\n selection.add('N_fwd>0', (ak.num(fwd)>=1 ))\n \n #ss_reqs = ['lepveto', 'dilep', 'filter', 'p_T(lep0)>25', 'p_T(lep1)>20', 'SS']\n ss_reqs = ['lepveto', 'dilep', 'filter', 'p_T(lep0)>25', 'p_T(lep1)>20', 'SS']\n #bl_reqs = ss_reqs + ['N_jet>3', 'N_central>2', 'N_btag>0', 'N_fwd>0']\n bl_reqs = ss_reqs + ['N_jet>3', 'N_central>2', 'N_btag>0']\n\n ss_reqs_d = { sel: True for sel in ss_reqs }\n ss_selection = selection.require(**ss_reqs_d)\n bl_reqs_d = { sel: True for sel in bl_reqs }\n BL = selection.require(**bl_reqs_d)\n\n weight = Weights( len(ev) )\n\n if not dataset=='MuonEG':\n # lumi weight\n weight.add(\"weight\", ev.weight)\n\n # PU weight - not in the babies...\n weight.add(\"PU\", ev.puWeight, weightUp=ev.puWeightUp, weightDown=ev.puWeightDown, shift=False)\n\n # b-tag SFs\n weight.add(\"btag\", self.btagSF.Method1a(btag, light))\n\n # lepton SFs\n weight.add(\"lepton\", self.leptonSF.get(electron, muon))\n\n\n #cutflow = Cutflow(output, ev, weight=weight)\n #cutflow_reqs_d = {}\n #for req in bl_reqs:\n # cutflow_reqs_d.update({req: True})\n # cutflow.addRow( req, selection.require(**cutflow_reqs_d) )\n\n labels = {'topW_v3': 0, 'TTW':1, 'TTZ': 2, 'TTH': 3, 'ttbar': 4, 'ttbar1l_MG': 4, 'DY': 6, 'topW_EFT_cp8':100 }\n if dataset in labels:\n label_mult = labels[dataset]\n else:\n label_mult = 5\n\n label = np.ones(len(ev[BL])) * label_mult\n\n n_nonprompt = (getNonPromptFromFlavour(tightelectron) + getNonPromptFromFlavour(tightmuon))[BL]\n n_chargeflip = (getChargeFlips(tightelectron, ev.GenPart) + getChargeFlips(tightmuon, ev.GenPart))[BL]\n n_genlep = ak.num(ev.GenL, axis=1)[BL]\n\n label_cat = (n_nonprompt>0)*100 + (n_chargeflip>0)*1000 + (n_genlep>2)*10 + np.ones(len(ev[BL])) # >1000 for charge flip, >100 for non prompt, >10 for more than 2 gen lep, 1 for prompt\n if dataset=='topW_v3':\n label_cat = np.ones(len(ev[BL])) * 0\n else:\n label_cat = 4*(label_cat>=1000) + 3*((label_cat>=100) & (label_cat<1000)) + 2*((label_cat>=10) & (label_cat<100)) + 1*(label_cat<10) # this makes charge flip 4, nonprompt 3...\n label_cat = np.array(label_cat)\n\n output[\"n_lep\"] += processor.column_accumulator(ak.to_numpy( (ak.num(electron) + ak.num(muon))[BL] ))\n output[\"n_lep_tight\"] += processor.column_accumulator(ak.to_numpy( (ak.num(tightelectron) + ak.num(tightmuon))[BL] ))\n\n o_leading_lepton = get_four_vec(leading_lepton[BL])\n output[\"lead_lep_pt\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(o_leading_lepton.pt, axis=1)))\n output[\"lead_lep_eta\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(o_leading_lepton.eta, axis=1)))\n output[\"lead_lep_phi\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(o_leading_lepton.phi, axis=1)))\n output[\"lead_lep_charge\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(o_leading_lepton.charge, axis=1)))\n output[\"lead_lep_energy\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(o_leading_lepton.energy, axis=1)))\n output[\"lead_lep_px\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(o_leading_lepton.px, axis=1)))\n output[\"lead_lep_py\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(o_leading_lepton.py, axis=1)))\n output[\"lead_lep_pz\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(o_leading_lepton.pz, axis=1)))\n\n o_trailing_lepton = get_four_vec(trailing_lepton[BL])\n output[\"sublead_lep_pt\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(o_trailing_lepton.pt, axis=1)))\n output[\"sublead_lep_eta\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(o_trailing_lepton.eta, axis=1)))\n output[\"sublead_lep_phi\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(o_trailing_lepton.phi, axis=1)))\n output[\"sublead_lep_charge\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(o_trailing_lepton.charge, axis=1)))\n output[\"sublead_lep_energy\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(o_trailing_lepton.energy, axis=1)))\n output[\"sublead_lep_px\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(o_trailing_lepton.px, axis=1)))\n output[\"sublead_lep_py\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(o_trailing_lepton.py, axis=1)))\n output[\"sublead_lep_pz\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(o_trailing_lepton.pz, axis=1)))\n\n output[\"lead_jet_pt\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(jet[:, 0:1][BL].pt, axis=1)))\n output[\"lead_jet_eta\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(jet[:, 0:1][BL].eta, axis=1)))\n output[\"lead_jet_phi\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(jet[:, 0:1][BL].phi, axis=1)))\n\n output[\"sublead_jet_pt\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(jet[:, 1:2][BL].pt, axis=1)))\n output[\"sublead_jet_eta\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(jet[:, 1:2][BL].eta, axis=1)))\n output[\"sublead_jet_phi\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(jet[:, 1:2][BL].phi, axis=1)))\n\n for i in range(5):\n output[\"j%s_pt\"%i] += processor.column_accumulator(ak.to_numpy(pad_and_flatten(jet[:,i:i+1][BL].pt)))\n output[\"j%s_eta\"%i] += processor.column_accumulator(ak.to_numpy(pad_and_flatten(jet[:,i:i+1][BL].eta)))\n output[\"j%s_phi\"%i] += processor.column_accumulator(ak.to_numpy(pad_and_flatten(jet[:,i:i+1][BL].phi)))\n output[\"j%s_energy\"%i] += processor.column_accumulator(ak.to_numpy(pad_and_flatten(jet[:,i:i+1][BL].energy)))\n output[\"j%s_px\"%i] += processor.column_accumulator(ak.to_numpy(pad_and_flatten(jet[:,i:i+1][BL].px)))\n output[\"j%s_py\"%i] += processor.column_accumulator(ak.to_numpy(pad_and_flatten(jet[:,i:i+1][BL].py)))\n output[\"j%s_pz\"%i] += processor.column_accumulator(ak.to_numpy(pad_and_flatten(jet[:,i:i+1][BL].pz)))\n\n output[\"j5_pt\"] += processor.column_accumulator(ak.to_numpy(pad_and_flatten(fwd_cleaned[:,0:1][BL].pt)))\n output[\"j5_eta\"] += processor.column_accumulator(ak.to_numpy(pad_and_flatten(fwd_cleaned[:,0:1][BL].eta)))\n output[\"j5_phi\"] += processor.column_accumulator(ak.to_numpy(pad_and_flatten(fwd_cleaned[:,0:1][BL].phi)))\n output[\"j5_energy\"] += processor.column_accumulator(ak.to_numpy(pad_and_flatten(fwd_cleaned[:,0:1][BL].energy)))\n output[\"j5_px\"] += processor.column_accumulator(ak.to_numpy(pad_and_flatten(fwd_cleaned[:,0:1][BL].px)))\n output[\"j5_py\"] += processor.column_accumulator(ak.to_numpy(pad_and_flatten(fwd_cleaned[:,0:1][BL].py)))\n output[\"j5_pz\"] += processor.column_accumulator(ak.to_numpy(pad_and_flatten(fwd_cleaned[:,0:1][BL].pz)))\n\n output[\"lead_btag_pt\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(high_score_btag[:, 0:1][BL].pt, axis=1)))\n output[\"lead_btag_eta\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(high_score_btag[:, 0:1][BL].eta, axis=1)))\n output[\"lead_btag_phi\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(high_score_btag[:, 0:1][BL].phi, axis=1)))\n output[\"lead_btag_energy\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(high_score_btag[:, 0:1][BL].energy, axis=1)))\n output[\"lead_btag_px\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(high_score_btag[:, 0:1][BL].px, axis=1)))\n output[\"lead_btag_py\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(high_score_btag[:, 0:1][BL].py, axis=1)))\n output[\"lead_btag_pz\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(high_score_btag[:, 0:1][BL].pz, axis=1)))\n\n output[\"sublead_btag_pt\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(high_score_btag[:, 1:2][BL].pt, axis=1)))\n output[\"sublead_btag_eta\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(high_score_btag[:, 1:2][BL].eta, axis=1)))\n output[\"sublead_btag_phi\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(high_score_btag[:, 1:2][BL].phi, axis=1)))\n output[\"sublead_btag_energy\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(high_score_btag[:, 1:2][BL].energy, axis=1)))\n output[\"sublead_btag_px\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(high_score_btag[:, 1:2][BL].px, axis=1)))\n output[\"sublead_btag_py\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(high_score_btag[:, 1:2][BL].py, axis=1)))\n output[\"sublead_btag_pz\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(high_score_btag[:, 1:2][BL].pz, axis=1)))\n\n output[\"fwd_jet_p\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(ak.fill_none(ak.pad_none(j_fwd[BL].p, 1, clip=True), 0), axis=1)))\n output[\"fwd_jet_pt\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(ak.fill_none(ak.pad_none(j_fwd[BL].pt, 1, clip=True), 0), axis=1)))\n output[\"fwd_jet_eta\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(ak.fill_none(ak.pad_none(j_fwd[BL].eta,1, clip=True), 0), axis=1)))\n output[\"fwd_jet_phi\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(ak.fill_none(ak.pad_none(j_fwd[BL].phi,1, clip=True), 0), axis=1)))\n output[\"fwd_jet_energy\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(ak.fill_none(ak.pad_none(j_fwd[BL].energy,1, clip=True), 0), axis=1)))\n output[\"fwd_jet_px\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(ak.fill_none(ak.pad_none(j_fwd[BL].px,1, clip=True), 0), axis=1)))\n output[\"fwd_jet_py\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(ak.fill_none(ak.pad_none(j_fwd[BL].py,1, clip=True), 0), axis=1)))\n output[\"fwd_jet_pz\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(ak.fill_none(ak.pad_none(j_fwd[BL].pz,1, clip=True), 0), axis=1)))\n\n output[\"mjj_max\"] += processor.column_accumulator(ak.to_numpy(ak.fill_none(ak.max(mjf[BL], axis=1),0)))\n output[\"delta_eta_jj\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(delta_eta[BL], axis=1)))\n\n output[\"met\"] += processor.column_accumulator(ak.to_numpy(met_pt[BL]))\n output[\"ht\"] += processor.column_accumulator(ak.to_numpy(ht[BL]))\n output[\"st\"] += processor.column_accumulator(ak.to_numpy(st[BL]))\n output[\"n_jet\"] += processor.column_accumulator(ak.to_numpy(ak.num(jet[BL])))\n output[\"n_btag\"] += processor.column_accumulator(ak.to_numpy(ak.num(btag[BL])))\n output[\"n_fwd\"] += processor.column_accumulator(ak.to_numpy(ak.num(fwd[BL])))\n output[\"n_central\"] += processor.column_accumulator(ak.to_numpy(ak.num(central[BL])))\n output[\"n_tau\"] += processor.column_accumulator(ak.to_numpy(ak.num(tau[BL])))\n output[\"n_track\"] += processor.column_accumulator(ak.to_numpy(ak.num(track[BL])))\n \n output[\"dilepton_pt\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(dilepton_pt[BL], axis=1)))\n output[\"dilepton_mass\"] += processor.column_accumulator(ak.to_numpy(ak.flatten(dilepton_mass[BL], axis=1)))\n output[\"min_bl_dR\"] += processor.column_accumulator(ak.to_numpy(min_bl_dR[BL]))\n output[\"min_mt_lep_met\"] += processor.column_accumulator(ak.to_numpy(min_mt_lep_met[BL]))\n\n output[\"label\"] += processor.column_accumulator(label)\n output[\"label_cat\"] += processor.column_accumulator(label_cat)\n output[\"weight\"] += processor.column_accumulator(weight.weight()[BL])\n \n output[\"presel\"][\"all\"] += len(ev[ss_selection])\n output[\"sel\"][\"all\"] += len(ev[BL])\n\n return output\n\n def postprocess(self, accumulator):\n return accumulator\n\n\n\n\nif __name__ == '__main__':\n\n import glob\n from klepto.archives import dir_archive\n from Tools.samples import * # fileset_2018 #, fileset_2018_small\n import pandas as pd\n\n overwrite = True\n year = 2018\n \n # load the config and the cache\n cfg = loadConfig()\n \n fileset = {\n ##'topW_v2': fileset_2018['topW_v2'],\n 'topW_v3': fileset_2018['topW_v3'], # 6x larger stats\n #'topW_EFT_cp8': fileset_2018['topW_EFT_cp8']\n ##'topW_v3': glob.glob('/hadoop/cms/store/user/dspitzba/nanoAOD/ttw_samples/topW_v0.2.3/ProjectMetis_TTWplusJetsToLNuEWK_5f_NLO_v2_RunIIAutumn18_NANO_v4/*_1.root'), # 6x larger stats\n 'TTW': fileset_2018['TTW'],\n 'TTZ': fileset_2018['TTZ'],\n 'TTH': fileset_2018['TTH'],\n 'ttbar': fileset_2018['ttbar'],\n 'rare': fileset_2018['TTTT'] + fileset_2018['diboson'], # also contains triboson\n 'DY': fileset_2018['DY'],\n ##'ttbar1l_MG': fileset_2018['ttbar1l_MG'],\n }\n \n exe_args = {\n 'workers': 16,\n 'function_args': {'flatten': False},\n \"schema\": NanoAODSchema,\n }\n exe = processor.futures_executor\n \n print (\"I'm running now\")\n \n if not sys.warnoptions:\n warnings.simplefilter(\"ignore\")\n \n output = processor.run_uproot_job(\n fileset,\n \"Events\",\n ML_preprocessor(year = year),\n exe,\n exe_args,\n chunksize=250000,\n )\n \n df_dict = {}\n for var in variables:\n df_dict.update({var: output[var].value})\n\n df_out = pd.DataFrame( df_dict )\n\n df_out.to_hdf('data/multiclass_input_v4.h5', key='df', format='table', mode='w')#, append=True)\n", "sub_path": "ML/multi_class_preprocessor.py", "file_name": "multi_class_preprocessor.py", "file_ext": "py", "file_size_in_byte": 21884, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "coffea.processor.defaultdict_accumulator", "line_number": 23, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 23, "usage_type": "name"}, {"api_name": "coffea.processor.defaultdict_accumulator", "line_number": 24, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 24, "usage_type": "name"}, {"api_name": "coffea.processor.defaultdict_accumulator", "line_number": 25, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 25, "usage_type": "name"}, {"api_name": "coffea.processor.defaultdict_accumulator", "line_number": 26, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 26, "usage_type": "name"}, {"api_name": "coffea.processor.defaultdict_accumulator", "line_number": 27, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 27, "usage_type": "name"}, {"api_name": "coffea.processor.defaultdict_accumulator", "line_number": 28, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 28, "usage_type": "name"}, {"api_name": "coffea.processor.defaultdict_accumulator", "line_number": 29, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 29, "usage_type": "name"}, {"api_name": "coffea.processor.defaultdict_accumulator", "line_number": 30, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 30, "usage_type": "name"}, {"api_name": "coffea.processor.defaultdict_accumulator", "line_number": 31, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 31, "usage_type": "name"}, {"api_name": "coffea.processor.defaultdict_accumulator", "line_number": 32, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 32, "usage_type": "name"}, {"api_name": "coffea.processor.defaultdict_accumulator", "line_number": 33, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 33, "usage_type": "name"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 116, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 116, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 116, "usage_type": "call"}, {"api_name": "coffea.processor.ProcessorABC", "line_number": 119, "usage_type": "attribute"}, {"api_name": "coffea.processor", "line_number": 119, "usage_type": "name"}, {"api_name": "coffea.processor.dict_accumulator", "line_number": 128, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 128, "usage_type": "name"}, {"api_name": "awkward.num", "line_number": 145, "usage_type": "call"}, {"api_name": "awkward.any", "line_number": 159, "usage_type": "call"}, {"api_name": "awkward.singletons", "line_number": 160, "usage_type": "call"}, {"api_name": "awkward.argmax", "line_number": 160, "usage_type": "call"}, {"api_name": "awkward.any", "line_number": 167, "usage_type": "call"}, {"api_name": "awkward.singletons", "line_number": 168, "usage_type": "call"}, {"api_name": "awkward.argmax", "line_number": 168, "usage_type": "call"}, {"api_name": "awkward.any", "line_number": 173, "usage_type": "call"}, {"api_name": "awkward.concatenate", "line_number": 175, "usage_type": "call"}, {"api_name": "Tools.helpers.get_four_vec", "line_number": 176, "usage_type": "call"}, {"api_name": "awkward.singletons", "line_number": 177, "usage_type": "call"}, {"api_name": "awkward.argmax", "line_number": 177, "usage_type": "call"}, {"api_name": "Tools.helpers.get_four_vec", "line_number": 178, "usage_type": "call"}, {"api_name": "awkward.singletons", "line_number": 179, "usage_type": "call"}, {"api_name": "awkward.argmin", "line_number": 179, "usage_type": "call"}, {"api_name": "Tools.helpers.get_four_vec", "line_number": 180, "usage_type": "call"}, {"api_name": "Tools.helpers.mt", "line_number": 186, "usage_type": "call"}, {"api_name": "awkward.min", "line_number": 187, "usage_type": "call"}, {"api_name": "awkward.argsort", "line_number": 191, "usage_type": "call"}, {"api_name": "awkward.singletons", "line_number": 205, "usage_type": "call"}, {"api_name": "awkward.argmax", "line_number": 205, "usage_type": "call"}, {"api_name": "awkward.argsort", "line_number": 207, "usage_type": "call"}, {"api_name": "awkward.min", "line_number": 211, "usage_type": "call"}, {"api_name": "awkward.singletons", "line_number": 215, "usage_type": "call"}, {"api_name": "awkward.argmax", "line_number": 215, "usage_type": "call"}, {"api_name": "awkward.fill_none", "line_number": 216, "usage_type": "call"}, {"api_name": "awkward.pad_none", "line_number": 216, "usage_type": "call"}, {"api_name": "awkward.sum", "line_number": 223, "usage_type": "call"}, {"api_name": "awkward.sum", "line_number": 224, "usage_type": "call"}, {"api_name": "awkward.num", "line_number": 229, "usage_type": "call"}, {"api_name": "awkward.num", "line_number": 230, "usage_type": "call"}, {"api_name": "awkward.num", "line_number": 231, "usage_type": "call"}, {"api_name": "awkward.num", "line_number": 232, "usage_type": "call"}, {"api_name": "coffea.analysis_tools.PackedSelection", "line_number": 235, "usage_type": "call"}, {"api_name": "awkward.num", "line_number": 242, "usage_type": "call"}, {"api_name": "awkward.num", "line_number": 243, "usage_type": "call"}, {"api_name": "awkward.num", "line_number": 244, "usage_type": "call"}, {"api_name": "awkward.num", "line_number": 245, "usage_type": "call"}, {"api_name": "coffea.analysis_tools.Weights", "line_number": 257, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 285, "usage_type": "call"}, {"api_name": "awkward.num", "line_number": 289, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 291, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 293, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 296, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 298, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 298, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 298, "usage_type": "call"}, {"api_name": "awkward.num", "line_number": 298, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 299, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 299, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 299, "usage_type": "call"}, {"api_name": "awkward.num", "line_number": 299, "usage_type": "call"}, {"api_name": "Tools.helpers.get_four_vec", "line_number": 301, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 302, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 302, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 302, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 302, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 303, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 303, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 303, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 303, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 304, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 304, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 304, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 304, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 305, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 305, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 305, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 305, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 306, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 306, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 306, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 306, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 307, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 307, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 307, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 307, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 308, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 308, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 308, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 308, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 309, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 309, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 309, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 309, "usage_type": "call"}, {"api_name": "Tools.helpers.get_four_vec", "line_number": 311, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 312, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 312, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 312, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 312, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 313, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 313, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 313, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 313, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 314, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 314, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 314, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 314, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 315, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 315, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 315, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 315, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 316, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 316, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 316, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 316, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 317, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 317, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 317, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 317, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 318, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 318, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 318, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 318, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 319, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 319, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 319, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 319, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 321, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 321, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 321, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 321, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 322, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 322, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 322, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 322, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 323, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 323, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 323, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 323, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 325, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 325, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 325, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 325, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 326, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 326, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 326, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 326, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 327, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 327, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 327, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 327, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 330, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 330, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 330, "usage_type": "call"}, {"api_name": "Tools.helpers.pad_and_flatten", "line_number": 330, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 331, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 331, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 331, "usage_type": "call"}, {"api_name": "Tools.helpers.pad_and_flatten", "line_number": 331, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 332, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 332, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 332, "usage_type": "call"}, {"api_name": "Tools.helpers.pad_and_flatten", "line_number": 332, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 333, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 333, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 333, "usage_type": "call"}, {"api_name": "Tools.helpers.pad_and_flatten", "line_number": 333, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 334, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 334, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 334, "usage_type": "call"}, {"api_name": "Tools.helpers.pad_and_flatten", "line_number": 334, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 335, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 335, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 335, "usage_type": "call"}, {"api_name": "Tools.helpers.pad_and_flatten", "line_number": 335, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 336, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 336, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 336, "usage_type": "call"}, {"api_name": "Tools.helpers.pad_and_flatten", "line_number": 336, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 338, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 338, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 338, "usage_type": "call"}, {"api_name": "Tools.helpers.pad_and_flatten", "line_number": 338, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 339, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 339, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 339, "usage_type": "call"}, {"api_name": "Tools.helpers.pad_and_flatten", "line_number": 339, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 340, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 340, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 340, "usage_type": "call"}, {"api_name": "Tools.helpers.pad_and_flatten", "line_number": 340, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 341, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 341, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 341, "usage_type": "call"}, {"api_name": "Tools.helpers.pad_and_flatten", "line_number": 341, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 342, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 342, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 342, "usage_type": "call"}, {"api_name": "Tools.helpers.pad_and_flatten", "line_number": 342, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 343, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 343, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 343, "usage_type": "call"}, {"api_name": "Tools.helpers.pad_and_flatten", "line_number": 343, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 344, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 344, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 344, "usage_type": "call"}, {"api_name": "Tools.helpers.pad_and_flatten", "line_number": 344, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 346, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 346, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 346, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 346, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 347, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 347, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 347, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 347, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 348, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 348, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 348, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 348, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 349, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 349, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 349, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 349, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 350, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 350, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 350, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 350, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 351, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 351, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 351, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 351, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 352, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 352, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 352, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 352, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 354, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 354, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 354, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 354, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 355, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 355, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 355, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 355, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 356, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 356, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 356, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 356, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 357, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 357, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 357, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 357, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 358, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 358, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 358, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 358, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 359, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 359, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 359, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 359, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 360, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 360, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 360, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 360, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 362, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 362, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 362, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 362, "usage_type": "call"}, {"api_name": "awkward.fill_none", "line_number": 362, "usage_type": "call"}, {"api_name": "awkward.pad_none", "line_number": 362, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 363, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 363, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 363, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 363, "usage_type": "call"}, {"api_name": "awkward.fill_none", "line_number": 363, "usage_type": "call"}, {"api_name": "awkward.pad_none", "line_number": 363, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 364, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 364, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 364, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 364, "usage_type": "call"}, {"api_name": "awkward.fill_none", "line_number": 364, "usage_type": "call"}, {"api_name": "awkward.pad_none", "line_number": 364, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 365, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 365, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 365, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 365, "usage_type": "call"}, {"api_name": "awkward.fill_none", "line_number": 365, "usage_type": "call"}, {"api_name": "awkward.pad_none", "line_number": 365, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 366, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 366, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 366, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 366, "usage_type": "call"}, {"api_name": "awkward.fill_none", "line_number": 366, "usage_type": "call"}, {"api_name": "awkward.pad_none", "line_number": 366, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 367, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 367, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 367, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 367, "usage_type": "call"}, {"api_name": "awkward.fill_none", "line_number": 367, "usage_type": "call"}, {"api_name": "awkward.pad_none", "line_number": 367, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 368, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 368, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 368, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 368, "usage_type": "call"}, {"api_name": "awkward.fill_none", "line_number": 368, "usage_type": "call"}, {"api_name": "awkward.pad_none", "line_number": 368, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 369, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 369, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 369, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 369, "usage_type": "call"}, {"api_name": "awkward.fill_none", "line_number": 369, "usage_type": "call"}, {"api_name": "awkward.pad_none", "line_number": 369, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 371, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 371, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 371, "usage_type": "call"}, {"api_name": "awkward.fill_none", "line_number": 371, "usage_type": "call"}, {"api_name": "awkward.max", "line_number": 371, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 372, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 372, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 372, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 372, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 374, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 374, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 374, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 375, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 375, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 375, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 376, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 376, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 376, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 377, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 377, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 377, "usage_type": "call"}, {"api_name": "awkward.num", "line_number": 377, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 378, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 378, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 378, "usage_type": "call"}, {"api_name": "awkward.num", "line_number": 378, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 379, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 379, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 379, "usage_type": "call"}, {"api_name": "awkward.num", "line_number": 379, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 380, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 380, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 380, "usage_type": "call"}, {"api_name": "awkward.num", "line_number": 380, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 381, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 381, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 381, "usage_type": "call"}, {"api_name": "awkward.num", "line_number": 381, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 382, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 382, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 382, "usage_type": "call"}, {"api_name": "awkward.num", "line_number": 382, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 384, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 384, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 384, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 384, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 385, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 385, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 385, "usage_type": "call"}, {"api_name": "awkward.flatten", "line_number": 385, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 386, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 386, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 386, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 387, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 387, "usage_type": "name"}, {"api_name": "awkward.to_numpy", "line_number": 387, "usage_type": "call"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 389, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 389, "usage_type": "name"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 390, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 390, "usage_type": "name"}, {"api_name": "coffea.processor.column_accumulator", "line_number": 391, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 391, "usage_type": "name"}, {"api_name": "coffea.nanoevents.NanoAODSchema", "line_number": 434, "usage_type": "name"}, {"api_name": "coffea.processor.futures_executor", "line_number": 436, "usage_type": "attribute"}, {"api_name": "coffea.processor", "line_number": 436, "usage_type": "name"}, {"api_name": "sys.warnoptions", "line_number": 440, "usage_type": "attribute"}, {"api_name": "warnings.simplefilter", "line_number": 441, "usage_type": "call"}, {"api_name": "coffea.processor.run_uproot_job", "line_number": 443, "usage_type": "call"}, {"api_name": "coffea.processor", "line_number": 443, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 456, "usage_type": "call"}]} +{"seq_id": "223711893", "text": "from openerp import models, fields\nimport openerp.addons.decimal_precision as dp\nimport time\nfrom datetime import date\nfrom dateutil import relativedelta\nfrom openerp.tools.translate import _\n \nclass hr_holidays_status(models.Model):\n _inherit = \"hr.holidays.status\"\n\n paid_leave = fields.Boolean('Paid Leave')\n \n _sql_constraints = [\n ('paid_leave', 'unique(name, paid_leave)', 'Holidays paid leave must be unique !'),\n ]\n\nclass hr_employee(models.Model):\n _inherit=\"hr.employee\"\n\n remaining_paid_leaves = fields.Float('Remaining Paid Leaves', digits_compute=dp.get_precision('Account'), required=True)\n \nclass hr_payslip_run(models.Model):\n _inherit = 'hr.payslip.run'\n \n def close_payslip_run(self, cr, uid, ids, context=None):\n res = {}\n holiday_obj = self.pool.get('hr.holidays')\n holiday_id = self.pool.get('hr.holidays.status').search(cr, uid, [('paid_leave','=',True)])\n emp_ids = self.pool.get('hr.payslip').search(cr, uid, [('payslip_run_id','=',ids)])\n for payslip in self.pool.get('hr.payslip').browse(cr, uid, emp_ids, context=context):\n inputs = {\n 'name': _('Right to leave ' + str((date(*time.strptime(str(payslip.date_from),'%Y-%m-%d')[:3])).strftime('%Y-%m'))),\n 'holiday_type': 'employee',\n 'holiday_status_id': holiday_id[0],\n 'employee_id': payslip.employee_id.id,\n 'number_of_days_temp': payslip.employee_id.remaining_paid_leaves,\n 'state':'validate',\n 'type':'add',\n }\n holiday_obj.create(cr, uid, inputs, context=context)\n return super(hr_payslip_run, self).close_payslip_run(cr, uid, ids, context=context)", "sub_path": "prooaddons/hr_paid_leave/hr_paid_leave.py", "file_name": "hr_paid_leave.py", "file_ext": "py", "file_size_in_byte": 1815, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "openerp.models.Model", "line_number": 8, "usage_type": "attribute"}, {"api_name": "openerp.models", "line_number": 8, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 11, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 11, "usage_type": "name"}, {"api_name": "openerp.models.Model", "line_number": 17, "usage_type": "attribute"}, {"api_name": "openerp.models", "line_number": 17, "usage_type": "name"}, {"api_name": "openerp.fields.Float", "line_number": 20, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 20, "usage_type": "name"}, {"api_name": "openerp.addons.decimal_precision.get_precision", "line_number": 20, "usage_type": "call"}, {"api_name": "openerp.addons.decimal_precision", "line_number": 20, "usage_type": "name"}, {"api_name": "openerp.models.Model", "line_number": 22, "usage_type": "attribute"}, {"api_name": "openerp.models", "line_number": 22, "usage_type": "name"}, {"api_name": "openerp.tools.translate._", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 32, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "22434946", "text": "import math\nfrom typing import List\n\nfrom euclid import Vector3, Quaternion\n\nfrom ga.chromosome_elem import ChromosomeElem\nfrom track_generator.state import State\nfrom track_generator.track_point import TrackPoint\nfrom track_generator.track_script import TrackScript\n\n\ndef generate_track(chromosome_elements: List[ChromosomeElem]) -> List[TrackPoint]:\n track_points: List[TrackPoint] = []\n\n start_position = Vector3(x=49.7, y=0.5, z=50.0)\n span = Vector3(x=0.0, y=0.0, z=0.0)\n span_dist = 2\n\n track_script = TrackScript(chromosome_elements=chromosome_elements)\n if track_script.parse_chromosome():\n\n dy = 0.0\n\n s = start_position\n s.y = 0.5\n span.x = 0.0\n span.y = 0.0\n span.z = span_dist\n turn_val = 10.0\n\n for track_script_element in track_script.track:\n\n if track_script_element.state == State.AngleDY:\n turn_val = track_script_element.value\n elif track_script_element.state == State.CurveY:\n dy = track_script_element.value * turn_val\n else:\n dy = 0.0\n\n for i in range(track_script_element.num_to_set):\n turn = dy\n rot = Quaternion.new_rotate_euler(math.radians(turn), 0, 0)\n span = rot * span.normalized()\n span *= span_dist\n s = s + span\n\n track_points.append(TrackPoint(x=s.x, y=s.z))\n\n return track_points\n", "sub_path": "track_generator/generator.py", "file_name": "generator.py", "file_ext": "py", "file_size_in_byte": 1470, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "typing.List", "line_number": 12, "usage_type": "name"}, {"api_name": "ga.chromosome_elem.ChromosomeElem", "line_number": 12, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 13, "usage_type": "name"}, {"api_name": "track_generator.track_point.TrackPoint", "line_number": 13, "usage_type": "name"}, {"api_name": "euclid.Vector3", "line_number": 15, "usage_type": "call"}, {"api_name": "euclid.Vector3", "line_number": 16, "usage_type": "call"}, {"api_name": "track_generator.track_script.TrackScript", "line_number": 19, "usage_type": "call"}, {"api_name": "track_generator.state.State.AngleDY", "line_number": 33, "usage_type": "attribute"}, {"api_name": "track_generator.state.State", "line_number": 33, "usage_type": "name"}, {"api_name": "track_generator.state.State.CurveY", "line_number": 35, "usage_type": "attribute"}, {"api_name": "track_generator.state.State", "line_number": 35, "usage_type": "name"}, {"api_name": "euclid.Quaternion.new_rotate_euler", "line_number": 42, "usage_type": "call"}, {"api_name": "euclid.Quaternion", "line_number": 42, "usage_type": "name"}, {"api_name": "math.radians", "line_number": 42, "usage_type": "call"}, {"api_name": "track_generator.track_point.TrackPoint", "line_number": 47, "usage_type": "call"}, {"api_name": "track_generator.track_point.TrackPoint", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "523288372", "text": "# coding:utf-8\nimport os\nimport csv\nimport sys\nimport glob\nimport time\nimport json\nimport runpy\nimport random\nimport socket\nimport gettext\nimport hashlib\nimport logging\nimport zipfile\nimport importlib\nimport threading\nimport configparser\n\nimport yaml\n\n\ndef new_hash():\n m = hashlib.md5()\n m.update(str(random.random()).encode(\"utf-8\"))\n return m.hexdigest().upper()\n\n\nclass DebugEngine:\n def __init__(self):\n formatter = logging.Formatter('')\n stream_handler = logging.StreamHandler(sys.stdout)\n stream_handler.setFormatter(formatter)\n file_handler = logging.FileHandler('Back.log', 'w', 'utf-8')\n file_handler.setFormatter(formatter)\n self.logger = logging.getLogger('logger')\n self.logger.setLevel(logging.DEBUG)\n self.logger.addHandler(stream_handler)\n self.logger.addHandler(file_handler)\n\n def debug(self, text):\n temp = '[DEBG]({:.7f}){}'\n text = temp.format(time.time(), text)\n self.logger.debug(text)\n\n def info(self, text):\n temp = '[INFO]({:.7f}){}'\n text = temp.format(time.time(), text)\n self.logger.info(text)\n\n def warn(self, text):\n temp = '[WARN]({:.7f}){}'\n text = temp.format(time.time(), text)\n self.logger.warning(text)\n\n def error(self, text):\n temp = '[ERRO]({:.7f}){}'\n text = temp.format(time.time(), text)\n self.logger.error(text)\n\n def critical(self, text):\n temp = '[!!!!]({:.7f}){}'\n text = temp.format(time.time(), text)\n self.logger.critical(text)\n\n\nclass EventEngine(DebugEngine):\n _listener_list = []\n\n def add_listener(self, type, listener, hash='', removable=True):\n new_listener = {\n 'type': type,\n 'listener': listener,\n 'hash': hash,\n 'removable': removable,\n }\n self._listener_list.append(new_listener)\n\n def remove_listener(self, type, listener=None, hash=''):\n for i, each in enumerate(self._listener_list):\n if each['type'] == type and each['listener'].__name__ == listener.__name__ and each['hash'] == hash:\n self._listener_list.pop(i)\n break\n\n def remove_all_listeners(self):\n new_listener_list = []\n for each in self._listener_list:\n if not each['removable']:\n new_listener_list.append(each)\n self._listener_list = new_listener_list\n\n def has_listener(self, type):\n found = False\n for each in self._listener_list:\n if each['type'] == type:\n found = True\n return found\n\n def dispatch_event(self, type, target='', value={}):\n event = {\n 'type': type,\n 'target': target,\n 'value': value,\n }\n for each in self._listener_list:\n if event['type'] == each['type']:\n t = threading.Thread(\n target=each['listener'],\n args=(event, ),\n kwargs={}\n )\n t.start()\n\n\nclass DataEngine(EventEngine):\n data = {}\n pool = []\n\n def fix_path(self):\n if getattr(sys, 'frozen', False):\n # frozen\n d = os.path.dirname(sys.executable)\n gamepath = os.path.dirname(d)\n else:\n # unfrozen\n d = os.path.dirname(os.path.realpath(__file__))\n gamepath = os.path.dirname(os.path.dirname(d))\n sys.path.append(gamepath)\n\n def self_check(self):\n self.data = {\n \"config\": {\n \"plugin\": {},\n \"dlc\": {},\n \"mod\": {},\n },\n \"class\": {},\n \"api\": {},\n \"entity\": {},\n \"db\": {}, # 可保存的数据\n \"act\": {},\n \"kojo\": {}\n }\n check_folder_list = [\n 'config',\n 'dlc',\n 'logic',\n 'mod',\n 'data',\n 'save',\n 'script'\n ]\n check_file_list = [\n 'config/config.ini'\n ]\n for each in check_folder_list:\n if not os.path.isdir(each):\n self.warn('Folder {} is not Exist. Creating...'.format(each))\n os.mkdir(each)\n for each in check_file_list:\n if not os.path.isfile(each):\n self.warn('File {} is not Exist. Creating...'.format(each))\n open(each, 'w')\n\n def load_config(self, config_path):\n config = self.load_data(config_path)\n for each in config['config.config'].keys():\n self.data['config'][each] = config['config.config'][each]\n\n def scan(self, path_to_folder):\n fileList = []\n for root, dirs, files in os.walk(path_to_folder):\n for each in files:\n fileList.append(root + '\\\\' + each)\n return fileList\n\n def save_to(self, save_num, save_name=''):\n self.save_file(self.data['db'],\n 'save/{}.{}.zip'.format(save_num, save_name))\n\n def load_from(self, save_num):\n save_file_path_list = self.scan('save')\n for each in save_file_path_list:\n if each.split('\\\\')[-1].split('.')[0] == str(save_num):\n self.data['db'] = self.load_file(each)\n\n def add(self, item):\n item['hash'] = new_hash()\n self.pool.append(item)\n return item['hash']\n\n def get(self, pattern):\n # 参考GraphQL的部分实现原理\n def match(item, pattern):\n found = True\n for each_key in pattern.keys():\n if not each_key in item.keys():\n found = False\n break\n if found:\n for each_key in pattern.keys():\n if isinstance(pattern[each_key], dict):\n if not match(item[each_key], pattern[each_key]):\n found = False\n break\n elif not pattern[each_key] == item[each_key]:\n found = False\n break\n if found:\n return True\n return False\n\n candidate_item = []\n for each in self.pool:\n if match(each, pattern):\n candidate_item.append(each)\n return candidate_item\n\n def path2dot(self, path):\n \"\"\"将路径转换为点路径\"\"\"\n path = path.replace('/', '\\\\')\n dot = '.'.join('.'.join(path.split('.')[0:-1]).split('\\\\'))\n ext = path.split('.')[-1]\n return dot, ext\n\n def dot2path(self, dot, ext):\n \"\"\"将点路径转换为路径\"\"\"\n path = '.'.join(['\\\\'.join(dot.split('.')), ext])\n return path\n\n def load_data(self, files, send_func=None):\n data = {}\n for each in files:\n key = self.path2dot(each)[0]\n # 载入文件\n self.info('│ ├─ Loading [{}]...'.format(each))\n if not send_func == None:\n bag = {\n 'type': 'load_text',\n 'value': 'Data: [ {} ]...'.format(key),\n 'from': 'b',\n 'to': 'r'\n }\n send_func(bag)\n data[key] = self.load_file(each)\n return data\n\n def save_data_to_file(self, dot_path, ext='yaml'):\n \"\"\"将一个data文件夹中加载的数据重新保存回去\"\"\"\n data = self.data[dot_path]\n path_to_file = self.dot2path(dot_path, ext)\n self.save_file(data, path_to_file)\n\n def load_file(self, path_to_file):\n \"\"\"从文件加载数据,并返回\"\"\"\n path_to_file = path_to_file.replace('/', '\\\\')\n ext = path_to_file.split('\\\\')[-1].split('.')[-1]\n data = None\n time_start = time.time()\n if ext in ['cfg', 'config', 'ini', 'inf']:\n config = configparser.ConfigParser()\n config.read(path_to_file)\n d = dict(config._sections)\n for k in d:\n d[k] = dict(d[k])\n data = d\n elif ext == 'csv':\n with open(path_to_file, 'r', newline='', encoding='utf-8') as f:\n reader = csv.reader(f)\n new_list = []\n for row in reader:\n new_list.append(row)\n data = new_list\n elif ext == 'json':\n with open(path_to_file, 'r', encoding='utf-8') as f:\n data = json.loads(''.join(f.readlines()))\n elif ext == 'yaml':\n with open(path_to_file, 'r', encoding='utf-8') as f:\n data = yaml.load(''.join(f.readlines()))\n elif ext == 'zip':\n with zipfile.ZipFile(path_to_file) as z:\n data = {}\n for file_name in z.namelist():\n with z.open(file_name) as f:\n data['.'.join(file_name.split('.')[0:-1])\n ] = json.loads(f.read())\n elif ext == 'txt':\n data = []\n with open(path_to_file, 'r') as f:\n for line in f.readlines():\n data.append(line[:-1])\n time_stop = time.time()\n # print('加载{}文件用时:{}ms'.format(path_to_file,\n # int((time_stop-time_start)*1000)))\n return data\n\n def save_file(self, data, path_to_file):\n \"\"\"保存数据到某文件\"\"\"\n path_to_file = path_to_file.replace('/', '\\\\')\n ext = path_to_file.split('\\\\')[-1].split('.')[-1]\n time_start = time.time()\n if ext in ['cfg', 'config', 'ini', 'inf']:\n config = configparser.ConfigParser()\n config.read_dict(data)\n with open(path_to_file, 'w')as f:\n config.write(f)\n elif ext == 'csv':\n with open(path_to_file, 'w', newline='', encoding='utf-8') as f:\n reader = csv.writer(f)\n reader.writerows(data)\n elif ext == 'json':\n with open(path_to_file, 'w', encoding='utf-8') as f:\n f.write(json.dumps(data, ensure_ascii=False))\n elif ext == 'yaml':\n with open(path_to_file, 'w', encoding='utf-8') as f:\n f.write(yaml.dump(data, allow_unicode=True,\n default_flow_style=False))\n elif ext == 'zip':\n with zipfile.ZipFile(path_to_file, 'w', zipfile.ZIP_LZMA) as z:\n for key in data:\n z.writestr('{}.json'.format(key), json.dumps(\n data[key], ensure_ascii=False))\n elif ext == 'txt':\n with open(path_to_file, 'w') as f:\n for line in data:\n f.write('{}\\n'.format(line))\n time_stop = time.time()\n # print('保存{}文件用时:{}ms'.format(path_to_file,\n # int((time_stop-time_start)*1000)))\n\n\nclass LoadEngine(DataEngine):\n def scan_plugin(self):\n # 扫描插件文件\n path = os.path.dirname(os.path.abspath(__file__))\n plugin_path_list = self.scan('{}/plugin'.format(path))\n # 提取插件名称\n plugin_name_list = []\n for each in plugin_path_list:\n plugin_name = '.'.join(each.replace(\n '/', '\\\\').split('\\\\')[-1].split('.')[0:-1])\n self.info('│ ├─ Scanning [{}]...'.format(plugin_name))\n plugin_name_list.append(plugin_name)\n # 比对配置信息\n for each in plugin_name_list:\n if not each.lower() in self.data['config']['plugin'].keys():\n self.data['config']['plugin'][each.lower()] = 'no'\n # 同步\n config = configparser.ConfigParser()\n config.read_dict(self.data['config'])\n with open('config/config.ini', 'w') as configfile:\n config.write(configfile)\n return len(plugin_path_list)\n\n def load_plugin(self):\n num_of_loaded_plugins = 0\n path = os.path.dirname(os.path.abspath(__file__))\n for each in self.data['config']['plugin'].keys():\n if self.data['config']['plugin'][each] == 'yes':\n plugin_path_list = self.scan('{}/plugin'.format(path))\n for every in plugin_path_list:\n module_name = '.'.join(every.replace(\n '/', '\\\\').split('\\\\')[-1].split('.')[0:-1])\n if module_name.lower() == each:\n self.info('│ ├─ Loading [{}]...'.format(module_name))\n # importlib.import_module('')\n with open(every, 'r', encoding='utf8') as target:\n sys.argv = [self]\n exec(''.join(target.readlines()))\n num_of_loaded_plugins += 1\n return num_of_loaded_plugins\n\n def scan_script(self):\n # 扫描插件文件\n script_path_list = self.scan('script')\n # 提取插件名称\n script_name_list = []\n for each in script_path_list:\n script_name = '.'.join(each.replace(\n '/', '\\\\').split('\\\\')[-1].split('.')[0:-1])\n self.info('│ ├─ Scanning [{}]...'.format(script_name))\n script_name_list.append(script_name)\n return len(script_path_list)\n\n def load_script(self, send_func=None):\n num_of_loaded_script = 0\n script_path_list = self.scan('script')\n for every in script_path_list:\n module_name = '.'.join(every.replace(\n '/', '\\\\').split('\\\\')[-1].split('.')[0:-1])\n self.info('│ ├─ Loading [{}]...'.format(module_name))\n if not send_func == None:\n bag = {\n 'type': 'load_text',\n 'value': 'Script: [ {} ]...'.format(module_name),\n 'from': 'b',\n 'to': 'r'\n }\n send_func(bag)\n with open(every, 'r', encoding='utf8') as target:\n sys.argv = [self]\n exec(''.join(target.readlines()))\n num_of_loaded_script += 1\n return num_of_loaded_script\n\n def scan_dlc(self):\n # 扫描插件文件\n dlc_path_list = self.scan('dlc')\n # 提取插件名称\n dlc_name_list = []\n for each in dlc_path_list:\n dlc_name = '.'.join(each.replace(\n '/', '\\\\').split('\\\\')[-1].split('.')[0:-1])\n self.info('│ ├─ Scanning [{}]...'.format(dlc_name))\n dlc_name_list.append(dlc_name)\n # 比对配置信息\n for each in dlc_name_list:\n if not each.lower() in self.data['config']['dlc'].keys():\n self.data['config']['dlc'][each.lower()] = 'no'\n # 同步\n config = configparser.ConfigParser()\n config.read_dict(self.data['config'])\n with open('config/config.ini', 'w') as configfile:\n config.write(configfile)\n return len(dlc_path_list)\n\n def load_dlc(self):\n num_of_loaded_dlcs = 0\n for each in self.data['config']['dlc'].keys():\n if self.data['config']['dlc'][each] == 'yes':\n dlc_path_list = self.scan('dlc')\n for every in dlc_path_list:\n module_name = '.'.join(every.replace(\n '/', '\\\\').split('\\\\')[-1].split('.')[0:-1])\n if module_name.lower() == each:\n self.info('│ ├─ Loading [{}]...'.format(module_name))\n with open(every, 'r', encoding='utf8') as target:\n sys.argv = [self]\n exec(''.join(target.readlines()))\n num_of_loaded_dlcs += 1\n return num_of_loaded_dlcs\n\n def scan_mod(self):\n # 扫描插件文件\n mod_path_list = self.scan('mod')\n # 提取插件名称\n mod_name_list = []\n for each in mod_path_list:\n mod_name = '.'.join(each.replace(\n '/', '\\\\').split('\\\\')[-1].split('.')[0:-1])\n self.info('│ ├─ Scanning [{}]...'.format(mod_name))\n mod_name_list.append(mod_name)\n # 比对配置信息\n for each in mod_name_list:\n if not each.lower() in self.data['config']['mod'].keys():\n self.data['config']['mod'][each.lower()] = 'no'\n # 同步\n config = configparser.ConfigParser()\n config.read_dict(self.data['config'])\n with open('config/config.ini', 'w') as configfile:\n config.write(configfile)\n return len(mod_path_list)\n\n def load_mod(self):\n num_of_loaded_mods = 0\n for each in self.data['config']['mod'].keys():\n if self.data['config']['mod'][each] == 'yes':\n mod_path_list = self.scan('mod')\n for every in mod_path_list:\n module_name = '.'.join(every.replace(\n '/', '\\\\').split('\\\\')[-1].split('.')[0:-1])\n if module_name.lower() == each:\n self.info('│ ├─ Loading [{}]...'.format(module_name))\n with open(every, 'r', encoding='utf8') as target:\n sys.argv = [self]\n exec(''.join(target.readlines()))\n num_of_loaded_mods += 1\n return num_of_loaded_mods\n\n\nclass SocketEngine(LoadEngine):\n HOST = 'localhost'\n PORT = 11994\n _conn = None\n _cmd_list = []\n _gui_list = []\n isConnected = False\n\n def _parse_bag(self, bag):\n target = ''\n value = {}\n if 'hash' in bag:\n target = bag['hash']\n if 'value' in bag:\n value = bag['value']\n self.dispatch_event(bag['type'], target, value)\n\n def connect(self):\n def core():\n while True:\n data = self.recv()\n for each in data:\n self._parse_bag(each)\n\n def func_connect():\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as c:\n self._conn = c\n try:\n self._conn.connect((self.HOST, self.PORT))\n self.isConnected = True\n self.info('│ └─ Connected!')\n core()\n except OSError as err:\n if err.errno == 10061:\n self.warn('前端未启动!')\n os._exit(1)\n else:\n self.error(err)\n\n t = threading.Thread(name='func_connect', target=func_connect)\n t.start()\n while True:\n if self.isConnected:\n break\n time.sleep(0.1)\n\n def send_config(self):\n bag = {\n 'type': 'init',\n 'value': {\n 'resolution': (800, 600)},\n 'from': 'b',\n 'to': 'm'\n }\n self.send(bag)\n\n def send_loaded(self):\n bag = {\n 'type': 'loaded',\n 'from': 'b',\n 'to': 'r'\n }\n self.send(bag)\n\n def send(self, bag):\n # self.debug(\"发送:{}\".format(bag))\n self._conn.send(json.dumps(bag, ensure_ascii=False).encode())\n\n def recv(self):\n data = self._conn.recv(4096000)\n # self.debug(\"接收:{}\".format(data))\n if not data:\n return\n data = data.decode().split('}{')\n for i in range(len(data)):\n if not i == 0:\n data[i] = '}' + data[i]\n if not i == len(data) - 1:\n data[i] = data[i] + '}'\n for i, each in enumerate(data):\n data[i] = json.loads(each)\n return data\n", "sub_path": "erajs/new_engine.py", "file_name": "new_engine.py", "file_ext": "py", "file_size_in_byte": 19839, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "hashlib.md5", "line_number": 23, "usage_type": "call"}, {"api_name": "random.random", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 30, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 31, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 31, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 33, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 35, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 36, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 42, "usage_type": "call"}, {"api_name": "time.time", "line_number": 47, "usage_type": "call"}, {"api_name": "time.time", "line_number": 52, "usage_type": "call"}, {"api_name": "time.time", "line_number": 57, "usage_type": "call"}, {"api_name": "time.time", "line_number": 62, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path", "line_number": 121, "usage_type": "attribute"}, {"api_name": "sys.executable", "line_number": 121, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path", "line_number": 122, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 125, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 126, "usage_type": "call"}, {"api_name": "os.path", "line_number": 126, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 127, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 127, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path", "line_number": 156, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path", "line_number": 160, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 171, "usage_type": "call"}, {"api_name": "time.time", "line_number": 258, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 260, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 268, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 275, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 278, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 280, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 285, "usage_type": "call"}, {"api_name": "time.time", "line_number": 291, "usage_type": "call"}, {"api_name": "time.time", "line_number": 300, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 302, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 308, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 312, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 315, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 318, "usage_type": "call"}, {"api_name": "zipfile.ZIP_LZMA", "line_number": 318, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 320, "usage_type": "call"}, {"api_name": "time.time", "line_number": 326, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 334, "usage_type": "call"}, {"api_name": "os.path", "line_number": 334, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 334, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 348, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 356, "usage_type": "call"}, {"api_name": "os.path", "line_number": 356, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 356, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 367, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 400, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 420, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 437, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 457, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 474, "usage_type": "attribute"}, {"api_name": "socket.socket", "line_number": 505, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 505, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 505, "usage_type": "attribute"}, {"api_name": "os._exit", "line_number": 515, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 519, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 524, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 546, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 560, "usage_type": "call"}]} +{"seq_id": "339146027", "text": "# Flax ID\n# Reference Python implementation\n\n# Clarity was chosen over the performance consideration,\n# but it is fast enough to be used in most practical scenarios.\n\nimport calendar\nimport random\nimport time\nfrom datetime import datetime\n\nfrom six.moves import range\n\n# The parameters below are really the part of the algorithm and are\n# not expected to change\n\n# Flax ID Regex\nFLAX_ID_REGEX = '[-0-9A-Z_a-z]+'\n\n# Total number of bits\nTOTAL_BITS = 96\n# We start from the year ezHome has started\nEPOCH_START = datetime(2015, 1, 1)\n# How many bits we reserve for the timestamp\nTIMESTAMP_BITS = 40\n# Remaining random bits\nRANDOM_BITS = TOTAL_BITS - TIMESTAMP_BITS\n\n\n# Modified Base 64 alphabet that preserves lexicographical ordering\nBASE64_ALPHABET = (\n '-0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n '_abcdefghijklmnopqrstuvwxyz'\n)\n\n\ndef get_flax_id_num(timestamp=None):\n \"\"\"\n Generate a Flax ID number, using the provided timestamp\n or the current moment\n \"\"\"\n # Get milliseconds fvalue for the epoch start\n epoch_ms = int(calendar.timegm(EPOCH_START.timetuple()) * 1000)\n # Get milliseconds from the start of the epoch\n ms = int((timestamp or time.time()) * 1000) - epoch_ms\n # Get random bits\n random_bits = random.getrandbits(RANDOM_BITS)\n # Combine random bits with the time bits\n id_num = (ms << RANDOM_BITS) + random_bits\n return id_num\n\n\ndef base64_lex_encode(num):\n \"\"\"\n Take a number and encode it as a string using the custom Base64\n alphabet.\n \"\"\"\n # Convert the number to binary and pad the zeroes\n bnum = format(num, 'b').zfill(TOTAL_BITS)\n s = ''\n for x in range(0, TOTAL_BITS, 6):\n s += BASE64_ALPHABET[int(bnum[x:x + 6], 2)]\n return s\n\n\ndef get_flax_id(timestamp=None):\n \"\"\"\n Generate a string Flax ID, using the provided timestamp or the current\n moment.\n \"\"\"\n return base64_lex_encode(get_flax_id_num(timestamp))\n\n\nif __name__ == '__main__':\n import timeit\n print(\n timeit.timeit(\n 'for x in range(1000): get_flax_id()',\n number=100,\n setup='from __main__ import get_flax_id'\n )\n )\n", "sub_path": "flax_id/flax_id.py", "file_name": "flax_id.py", "file_ext": "py", "file_size_in_byte": 2145, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "datetime.datetime", "line_number": 23, "usage_type": "call"}, {"api_name": "calendar.timegm", "line_number": 43, "usage_type": "call"}, {"api_name": "time.time", "line_number": 45, "usage_type": "call"}, {"api_name": "random.getrandbits", "line_number": 47, "usage_type": "call"}, {"api_name": "six.moves.range", "line_number": 61, "usage_type": "call"}, {"api_name": "timeit.timeit", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "198368696", "text": "import logging\nimport os\n\n\ndef init_logging(debug=False, file=None):\n level = logging.DEBUG if debug else logging.INFO\n handlers = [logging.StreamHandler()]\n if file:\n dirname = os.path.dirname(file)\n if dirname:\n os.makedirs(dirname, exist_ok=True)\n handlers.append(logging.FileHandler(file))\n logging.basicConfig(level=level, format='[%(levelname)s\\t%(asctime)s] %(message)s',\n handlers=handlers)\n", "sub_path": "common/common.py", "file_name": "common.py", "file_ext": "py", "file_size_in_byte": 466, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "logging.DEBUG", "line_number": 6, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 6, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "328211656", "text": "\"\"\"\nModule for Cloudpickle save patterns\n\"\"\"\n\n__author__ = \"Elisha Yadgaran\"\n\n\nfrom os import makedirs\nfrom os.path import dirname, isfile, join\nfrom typing import Any, Dict, Optional\n\nimport cloudpickle as pickle\n\nfrom simpleml.registries import FILEPATH_REGISTRY\nfrom simpleml.save_patterns.base import BaseSerializer\nfrom simpleml.utils.configuration import PICKLE_DIRECTORY\n\n\nclass CloudpicklePersistenceMethods(object):\n \"\"\"\n Base class for internal cloudpickle serialization/deserialization options\n \"\"\"\n\n @staticmethod\n def dump_object(obj: Any, filepath: str, overwrite: bool = True) -> None:\n \"\"\"\n Pickles an object to a string or to the filesystem. Assumes that a NULL\n filepath expects a serialized string returned\n\n Prepends path to SimpleML Pickle directory before saving. ONLY pass in\n a relative filepath from that location\n\n :param overwrite: Boolean indicating whether to first check if pickled\n object is already serialized. Defaults to not checking, but can be\n leverage by implementations that want the same artifact in multiple\n places\n \"\"\"\n if not overwrite:\n # Check if file was already serialized\n if isfile(filepath):\n return\n\n with open(filepath, \"wb\") as pickled_file:\n pickle.dump(obj, pickled_file) # , protocol=pickle.HIGHEST_PROTOCOL)\n\n @staticmethod\n def dumps_object(obj: Any) -> str:\n \"\"\"\n Pickles an object to a string or to the filesystem. Assumes that a NULL\n filepath expects a serialized string returned\n\n Prepends path to SimpleML Pickle directory before saving. ONLY pass in\n a relative filepath from that location\n\n :param overwrite: Boolean indicating whether to first check if pickled\n object is already serialized. Defaults to not checking, but can be\n leverage by implementations that want the same artifact in multiple\n places\n \"\"\"\n return pickle.dumps(obj) # , protocol=pickle.HIGHEST_PROTOCOL)\n\n @staticmethod\n def load_object(filepath: str) -> Any:\n \"\"\"\n Loads an object from a serialized string or filesystem. When stream is\n True, it tries to load the file directly from the string.\n\n Prepends path to SimpleML Pickle directory before loading. ONLY pass in\n a relative filepath from that location\n \"\"\"\n with open(filepath, \"rb\") as pickled_file:\n return pickle.load(pickled_file)\n\n @staticmethod\n def loads_object(data: str) -> Any:\n \"\"\"\n Loads an object from a serialized string or filesystem. When stream is\n True, it tries to load the file directly from the string.\n\n Prepends path to SimpleML Pickle directory before loading. ONLY pass in\n a relative filepath from that location\n \"\"\"\n return pickle.loads(data)\n\n\nclass CloudpickleFileSerializer(BaseSerializer):\n @staticmethod\n def serialize(\n obj: Any,\n filepath: str,\n format_directory: str = PICKLE_DIRECTORY,\n format_extension: str = \".pkl\",\n destination_directory: str = \"system_temp\",\n **kwargs,\n ) -> Dict[str, str]:\n\n # Append the filepath to the pickle storage directory\n filepath = join(format_directory, filepath + format_extension)\n full_path = join(FILEPATH_REGISTRY.get(destination_directory), filepath)\n # make sure the directory exists\n makedirs(dirname(full_path), exist_ok=True)\n\n CloudpicklePersistenceMethods.dump_object(obj, full_path)\n\n return {\"filepath\": filepath, \"source_directory\": destination_directory}\n\n @staticmethod\n def deserialize(\n filepath: str, source_directory: str = \"system_temp\", **kwargs\n ) -> Dict[str, Any]:\n full_path = join(FILEPATH_REGISTRY.get(source_directory), filepath)\n\n return {\"obj\": CloudpicklePersistenceMethods.load_object(full_path)}\n\n\nclass CloudpickleInMemorySerializer(BaseSerializer):\n @staticmethod\n def serialize(obj: Any, **kwargs) -> Dict[str, str]:\n data = CloudpicklePersistenceMethods.dumps_object(obj)\n return {\"obj\": data}\n\n @staticmethod\n def deserialize(obj: str, **kwargs) -> Dict[str, Any]:\n return {\"obj\": CloudpicklePersistenceMethods.loads_object(obj)}\n", "sub_path": "simpleml/save_patterns/serializers/cloudpickle.py", "file_name": "cloudpickle.py", "file_ext": "py", "file_size_in_byte": 4377, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "typing.Any", "line_number": 25, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 40, "usage_type": "call"}, {"api_name": "cloudpickle.dump", "line_number": 44, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 47, "usage_type": "name"}, {"api_name": "cloudpickle.dumps", "line_number": 60, "usage_type": "call"}, {"api_name": "cloudpickle.load", "line_number": 72, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 63, "usage_type": "name"}, {"api_name": "cloudpickle.loads", "line_number": 83, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 75, "usage_type": "name"}, {"api_name": "simpleml.save_patterns.base.BaseSerializer", "line_number": 86, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 89, "usage_type": "name"}, {"api_name": "simpleml.utils.configuration.PICKLE_DIRECTORY", "line_number": 91, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 99, "usage_type": "call"}, {"api_name": "simpleml.registries.FILEPATH_REGISTRY.get", "line_number": 99, "usage_type": "call"}, {"api_name": "simpleml.registries.FILEPATH_REGISTRY", "line_number": 99, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 101, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 95, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 111, "usage_type": "call"}, {"api_name": "simpleml.registries.FILEPATH_REGISTRY.get", "line_number": 111, "usage_type": "call"}, {"api_name": "simpleml.registries.FILEPATH_REGISTRY", "line_number": 111, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 110, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 110, "usage_type": "name"}, {"api_name": "simpleml.save_patterns.base.BaseSerializer", "line_number": 116, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 118, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 118, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 123, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 123, "usage_type": "name"}]} +{"seq_id": "376279720", "text": "\"\"\"Functions for document tokenization.\"\"\"\nimport string\n\nimport regex as re\n\n# This is designed to match:\n# - unicode separators (spaces, paragraph separators, etc.)\n# - unicode symbols (math symbols, currency symbols, etc.)\n# - other symbols (control characters, formatting indicators, etc.)\n# These typically will get in the way of algorithms operating at\n# the word level, so we want to substitute them out.\nSEPARATOR_SYMBOL_FILTER = re.compile(\n r'[\\p{Separator}\\p{Symbol}\\p{Other}]+',\n flags=re.MULTILINE\n)\n\n# This is designed to match a sequence of four symbols that are either:\n# - unicode punctuation\n# - unicode separators (spaces, paragraph separators, etc.)\n# We want to keep some punctuation so we can parse phrases, sentences, acronyms\n# and names with dots in them, but input text often has needless or excessive\n# punctuation as leftover entrails from whatever system we pulled the text\n# out of.\nEXCESSIVE_PUNCTUATION = re.compile(\n r'[\\p{Punctuation}\\p{Separator}]{4,}',\n flags=re.MULTILINE\n)\n\nSPECIAL_CHARACTERS = re.compile(\n r'[\\p{Punctuation}\\p{Separator}\\p{Symbol}\\p{Other}]+',\n flags=re.MULTILINE\n)\n\n# These are OntoNotes 5 / Penn TreeBank tags for Spacy tokens that\n# we probably don't want to include.\nFILTERED_TOKENS = (\n 'XX', # unknown tokens\n 'ADD', # email addresses\n 'CD', # cardinal number\n 'SP', # space(s)\n '$', # currency\n 'SYM' # symbols\n)\n\nPUNCTUATION = set(string.punctuation)\n\n\ndef clean_unicode_punctuation(input_str):\n \"\"\"\n Clean up Unicode punctuation in an input string.\n\n Replace unicode separators and symbols with ASCII spaces.\n Replace (punctuation, separator) sequences of >=4 with\n an ASCII period and a space.\n \"\"\"\n partial = SEPARATOR_SYMBOL_FILTER.sub(' ', input_str)\n return EXCESSIVE_PUNCTUATION.sub('. ', partial)\n\n\ndef str_is_all_punctuation(input_str):\n \"\"\"\n Return True if input_str is made up of ASCII punctuation.\n\n This is necessary because Spacy doesn't always figure out\n whether a token is punctuation, and this is easier than figuring\n out how to fix Spacy's tokenizer.\n \"\"\"\n return len(set(input_str).difference(PUNCTUATION)) == 0\n\n\ndef is_bad_token(token):\n \"\"\"Filter tokens that aren't good for machine learning.\n\n Returns True if the token is bad.\n\n The `token` argument needs to be a Spacy Token object.\n \"\"\"\n return (\n token.tag_ in FILTERED_TOKENS or\n token.is_punct or\n token.is_stop or\n token.like_num or\n token.like_url or\n # get rid of errant email addresses\n '@' in token.text or\n # this is going to clobber some useful words\n # like \"car\", but it also gets rid of a lot of crap.\n len(token) < 4 or\n str_is_all_punctuation(token.text)\n )\n\n\ndef tokenize_with_spacy(doc):\n \"\"\"Turn a single document into a generator of tokens.\n\n The `doc` argument needs to be a Spacy Document.\n \"\"\"\n for ent in doc.ents:\n if len(ent) > 1:\n ent.merge(ent.root.tag_, ent.text, ent.root.ent_type_)\n for token in doc:\n if is_bad_token(token):\n continue\n text = ' '.join(token.lemma_.strip().split()).replace(' ', '_')\n if text:\n yield text\n", "sub_path": "nlp_playground/lib/tokenize.py", "file_name": "tokenize.py", "file_ext": "py", "file_size_in_byte": 3254, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "regex.compile", "line_number": 12, "usage_type": "call"}, {"api_name": "regex.MULTILINE", "line_number": 14, "usage_type": "attribute"}, {"api_name": "regex.compile", "line_number": 24, "usage_type": "call"}, {"api_name": "regex.MULTILINE", "line_number": 26, "usage_type": "attribute"}, {"api_name": "regex.compile", "line_number": 29, "usage_type": "call"}, {"api_name": "regex.MULTILINE", "line_number": 31, "usage_type": "attribute"}, {"api_name": "string.punctuation", "line_number": 45, "usage_type": "attribute"}]} +{"seq_id": "342178663", "text": "\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport tensorflow as tf\ntf.compat.v1.enable_eager_execution()\n\nimport sys\nimport os\n\nimport warnings\n\nfrom tensorflow import keras\nimport numpy as np\nfrom tensorflow.data import Dataset\nfrom engine.image import *\nimport tensorflow.image\n\nimport numpy as np\nimport json\nimport cv2\nimport time\nimport json\n\n\ndef load_data(paths, train = True):\n '''\n objective: load image files\n param: paths to each image files\n return: image files of input image and ground-truth image\n '''\n for img_path in paths:\n gt_path = img_path.decode(\"utf-8\").replace('.jpg','.h5').replace('images','ground-truth')\n img = tf.io.read_file(img_path)\n img = tf.image.decode_jpeg(img, channels = 3)\n img = tf.cast(img, tf.float32)\n img = img/255.0 # normalizing the images to [0,1]\n\n gt_file = h5py.File(gt_path, 'r')\n target = np.asarray(gt_file['density'])\n\n target = cv2.resize(target,(int(target.shape[1]/8),int(target.shape[0]/8)),interpolation = cv2.INTER_CUBIC)*64\n\n\n yield (img, target)\n\ndef load_best_vals():\n\t'''\n\tobjective: load best mae values from previous values\n\t'''\n\tval = json.load(open('best_vals.txt', 'r'))\n\treturn val['best_mae_a'], val['best_mae_b']\n\ndef reset_best_vals(best_mae_a, best_mae_b):\n\t'''\n\tobjective: store best mae values \n\t'''\n\tmae_dict = {\n\t\t'best_mae_a': best_mae_a,\n\t\t'best_mae_b': best_mae_b\n\t}\n\t# store a text file\n\twith open('best_vals.txt', 'w') as json_file:\n\t\tjson.dump(mae_dict, json_file)\n\ndef load_datasets():\n\t'''\n\tobjective: load datasets from lists of paths and apply load function\n\treturn: train_dataset, test_dataset - tf.data.Dataset\n\t'''\n\ttrain_a_list, test_a_list, train_b_list, test_b_list = load_data_list()\n\n\t# part_A\n\t# load dataset from generator defined as load_data\n\ttrain_a_dataset = tf.data.Dataset.from_generator(\n\t\tload_data, args = [train_a_list],output_types = (tf.float32, tf.float32), output_shapes = ((None,None,3), (None,None)))\n\ttrain_a_dataset = train_a_dataset.shuffle(100000)\n\n\ttest_a_dataset = tf.data.Dataset.from_generator(\n\t\tload_data, args = [test_a_list], output_types = (tf.float32, tf.float32), output_shapes = ((None,None,3), (None,None)))\n\t\n\t# part_B\n\ttrain_b_dataset = tf.data.Dataset.from_generator(\n\t\tload_data, args = [train_b_list],output_types = (tf.float32, tf.float32), output_shapes = ((None,None,3), (None,None)))\n\ttrain_b_dataset = train_b_dataset.shuffle(100000)\n\n\ttest_b_dataset = tf.data.Dataset.from_generator(\n\t\tload_data, args = [test_b_list], output_types = (tf.float32, tf.float32), output_shapes = ((None,None,3), (None,None)))\n\t\n\n\treturn train_a_dataset, test_a_dataset, train_b_dataset, test_b_dataset\n\ndef loss_fn(model, input_image, gt_image):\n\t'''\n\tobjective: calculate loss from input image and ground-truth\n\treturn: loss \n\t'''\n\toutput = model(np.expand_dims(input_image,0), training = True)\n\toutput = tf.squeeze(output, [0,3])\n # mean squared error\n\tloss = tf.reduce_mean(tf.square(output - gt_image))\n\treturn loss\n\ndef grad(model, input_image, gt_image):\n\t'''\n\tobjective: apply gradient descent method to update model's weights\n\t'''\n\twith tf.GradientTape() as tape:\n\t\tloss = loss_fn(model, input_image, gt_image)\n\treturn tape.gradient(loss, model.trainable_weights)\n\ndef fit(model, part, epochs, learning_rate = 0.0001):\n\t'''\n\ttrain model with part variable (\"A\" or \"B\") \n\t'''\n\tif part == \"A\":\n\t\ttrain_dataset, test_dataset, b_train, b_test = load_datasets()\n\t\t# get lowest mae from previous trained models to compare\n\t\t# if it's lower than those values, store the whole model into h5 file\n\t\tbest_mae, _ = load_best_vals()\n\t\tprogress_range = 44850\n\n\telif part == \"B\":\n\t\ta_train, a_test, train_dataset, test_dataset = load_datasets()\n\t\t_, best_mae = load_best_vals()\n\t\tprogress_range = 79800\n\n\telse: return(\"Please put A or B\")\n\n\toptimizer = tf.keras.optimizers.Adam(learning_rate = learning_rate)\n\t\n\t# train model\n\tprint('Part {} Learning started. It takes sometime.'.format(part))\n\n\tfor epoch in range(epochs):\n\t\t# init values\n\t\tavg_loss = 0.\n\t\ttrain_step = 0\n\t\ttest_step = 0\n\t\ttest_mae = 0\n\n\t\tloss_list = []\n\t\tprogress = ProgressMonitor(length = progress_range)\n\n\t\t# train process\n\t\tfor step, (images, gt_images) in enumerate(train_dataset):\n\n\t\t\tgrads = grad(model, images, gt_images)\n\t\t\toptimizer.apply_gradients(zip(grads, model.trainable_variables))\n\t\t\tloss = loss_fn(model, images, gt_images)\n\t\t\tavg_loss += loss\n\t\t\ttrain_step += 1\n\n\t\t\tloss_list.append(loss)\n\t\t\tprogress.update(step, sum(loss_list)/len(loss_list))\n\n\t\tavg_loss = avg_loss / train_step\n\n\t\t# test process\n\t\tfor step, (images, gt_images) in enumerate(test_dataset): \n\n\t\t\toutput = model(np.expand_dims(images,0))\n\t\t\ttest_step += 1\n\n\t\t\ttest_mae += abs(np.sum(output)-np.sum(gt_images))\n\n\t\ttest_mae = test_mae / test_step\n\n\n\t\tprint('Epoch:', '{}'.format(epoch + 1), \n\t\t 'Test MAE = ', '{:.5f}'.format(test_mae))\n\n\t# when test_mae is smaller than the stored lowest mae,\n\t# store whole model into h5 file\n\tif (test_mae < best_mae): \n\t\tmodel.save('part_{}_best_model_{}.h5'.format(part, epochs))\n\t\tif part == \"A\":\n\t\t\treset_best_vals(test_mae, _)\n\t\telse:\n\t\t\treset_best_vals(_, test_mae)\n\n\tprint('Learning Finished!')\n\n\treturn model\n\t\n", "sub_path": "engine/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 5281, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "tensorflow.compat.v1.enable_eager_execution", "line_number": 6, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 6, "usage_type": "attribute"}, {"api_name": "tensorflow.io.read_file", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 34, "usage_type": "attribute"}, {"api_name": "tensorflow.image.decode_jpeg", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 42, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 51, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.data.Dataset.from_generator", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 75, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 76, "usage_type": "attribute"}, {"api_name": "tensorflow.data.Dataset.from_generator", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 79, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 80, "usage_type": "attribute"}, {"api_name": "tensorflow.data.Dataset.from_generator", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 83, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 84, "usage_type": "attribute"}, {"api_name": "tensorflow.data.Dataset.from_generator", "line_number": 87, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 87, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 88, "usage_type": "attribute"}, {"api_name": "numpy.expand_dims", "line_number": 98, "usage_type": "call"}, {"api_name": "tensorflow.squeeze", "line_number": 99, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 101, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 101, "usage_type": "call"}, {"api_name": "tensorflow.GradientTape", "line_number": 108, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 130, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 130, "usage_type": "attribute"}, {"api_name": "numpy.expand_dims", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 165, "usage_type": "call"}]} +{"seq_id": "123439500", "text": "from django.urls import path\nfrom . import views\n\napp_name ='pages'\n\nurlpatterns= [\n path('', views.index, name='index'),\n path('upload/', views.upload, name='upload'),\n path('detail//', views.detail, name='detail'),\n path('result//', views.result, name='result'),\n path('telegram/', views.telegram, name='telegram'),\n path('telegram_bot/', views.telegram_bot, name='telegram_bot'),\n # path('result/', views.result, name='result'),\n\n\n]", "sub_path": "pages/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 484, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "413319752", "text": "import pretrainedmodels\nfrom torch import nn\nimport torch\n\n\ndef get_model(model_name: str, num_classes=1000, pretrained: bool = False):\n \"\"\"\n\n Args:\n model_name: from the cadene list\n num_classes: number of classes for target models.\n pretrained: if model is pre-trained on imagenet.\n\n Returns:\n\n \"\"\"\n # create model\n if pretrained:\n print(f\"=> using pre-trained model '{model_name}'\")\n model = pretrainedmodels.__dict__[model_name](num_classes=1000, pretrained=\"imagenet\")\n else:\n print(f\"=> creating model '{model_name}'\")\n model = pretrainedmodels.__dict__[model_name](num_classes=1000, pretrained=None)\n\n if num_classes != 1000:\n dim_feats = model.last_linear.in_features\n model.last_linear = nn.Linear(dim_feats, num_classes, bias=True)\n\n if hasattr(model, \"avgpool\"):\n model.avgpool = nn.AdaptiveAvgPool2d(output_size=(1, 1))\n elif hasattr(model, \"avg_pool\"):\n model.avg_pool = nn.AdaptiveAvgPool2d(output_size=(1, 1))\n else:\n raise NotImplementedError(f\"No avgpool or avg_pool layer in the model {model_name}\")\n\n return Net(model, model_name)\n\n\nclass Net(nn.Module):\n def __init__(self, model, model_name):\n super(Net, self).__init__()\n\n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\n self.features = nn.Sequential(*list(model.children())[:-1]).to(device)\n self.last_linear = list(model.children())[-1]\n self.pool = nn.AdaptiveAvgPool2d(1)\n self.relu = nn.ReLU(inplace=True)\n self.model_name = model_name\n\n def forward(self, x):\n x = self.features(x)\n\n if \"densenet\" in self.model_name:\n x = self.relu(x)\n x = self.pool(x)\n\n x = x.view(x.size()[0], -1)\n\n x = self.last_linear(x)\n return x\n", "sub_path": "iglovikov_helper_functions/dl/pytorch/classification_models.py", "file_name": "classification_models.py", "file_ext": "py", "file_size_in_byte": 1882, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pretrainedmodels.__dict__", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pretrainedmodels.__dict__", "line_number": 23, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.nn.AdaptiveAvgPool2d", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.AdaptiveAvgPool2d", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 39, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.cuda.is_available", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 43, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 45, "usage_type": "name"}, {"api_name": "torch.nn.AdaptiveAvgPool2d", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "160822649", "text": "import numpy as np\nfrom matplotlib.patches import Circle\nfrom matplotlib.collections import PatchCollection\nimport matplotlib.lines as lines\nimport matplotlib.pyplot as plt\n\n\n\nclass Element:\n def __init__(self, top, left, width, height, color):\n self.top = top\n self.left = left\n self.width = width\n self.height = height\n self.color = color\n\n def get_center_x(self):\n return self.left + self.width / 2\n\n def get_center_y(self):\n return self.top - self.height / 2\n\n def get_bottom(self):\n return self.top - self.height\n\n def move_y(self, delta):\n self.top += delta\n\n\nclass Node(Element):\n def __init__(self, top, left, radius, color=\"gray\"):\n Element.__init__(self, top, left, radius*2, radius*2, color=color)\n self.radius = radius\n\n def get_radius(self):\n return self.radius\n\n def get_edge_out_position(self):\n return [self.left + self.width, self.top - self.height*0.5]\n\n def get_edge_in_position(self):\n return [self.left, self.top - self.height*0.5]\n\n def render(self, m_ax):\n circle = Circle((self.get_center_x(), self.get_center_y()), self.get_radius(), color=self.color)\n m_ax.add_patch(circle)\n return circle\n\n\ndef get_edge_positions(from_node, to_node):\n o = from_node.get_edge_out_position()\n d = to_node.get_edge_in_position()\n return ([o[0], d[0]], [o[1], d[1]])\n\ndef connect_nodes(node1, node2, ax):\n (a,b) = get_edge_positions(node1, node2)\n l1 = lines.Line2D(a,b, color=\"lightGray\", linewidth=4)\n ax.add_line(l1)\n\n\n\nclass Layer():\n def __init__(self, left, num_nodes, node_radius, title=None, spacing_nodes = 1, max_num_nodes_visible=None, color=\"gray\", font_size=36):\n self.nodes = []\n self.top = 0\n self.title = title\n self.font_size = font_size\n top = 0\n\n self.three_dots = None\n three_dots_index = -1\n restart_index = -1\n if (max_num_nodes_visible is not None) and (max_num_nodes_visible < num_nodes):\n three_dots_index = (max_num_nodes_visible // 2)\n restart_index = num_nodes - three_dots_index\n for i in range(num_nodes):\n if i > three_dots_index and i < restart_index:\n continue\n if i == three_dots_index:\n self.three_dots = ThreeDots(top, x = left + node_radius, dot_radius = max(min(4,node_radius*0.5),1), spacing = 2)\n top = self.three_dots.get_bottom() - spacing_nodes\n else:\n node = Node(top, left, radius = node_radius, color = color)\n self.nodes.append(node)\n top = node.get_bottom() - spacing_nodes\n self.bottom = top\n\n def set_top(self, top):\n delta = top - self.top\n for n in self.nodes:\n n.move_y(delta)\n self.top += delta\n self.bottom += delta\n\n\n def get_left(self):\n return self.nodes[0].left\n\n def get_width(self):\n return self.nodes[0].width\n\n def get_center_x(self):\n return self.get_left() + self.get_width() /2\n\n def get_right(self):\n return self.get_left() + self.get_width()\n\n def get_height(self):\n return abs(self.bottom - self.top)\n\n def get_bottom(self):\n return self.bottom\n\n def render(self, mAx):\n for n in self.nodes:\n n.render(mAx)\n if (self.three_dots is not None):\n self.three_dots.render(mAx)\n if (self.title is not None):\n mAx.text(x=self.get_center_x(), y=10, s=self.title, fontsize=self.font_size, horizontalalignment='center')\n\n\n def fully_connect(self, layer2, mAx):\n for a in self.nodes:\n for b in layer2.nodes:\n connect_nodes(a,b, mAx)\n\n\nclass ThreeDots(Layer):\n def __init__(self, top, x, dot_radius = 4, spacing = 2, color = \"lightGray\"):\n left = x - dot_radius\n Layer.__init__(self, left=left, num_nodes=3, node_radius=dot_radius, spacing_nodes=spacing, color=color)\n self.set_top(top)\n\n\nclass NNV():\n def __init__(self, layers_list, spacing_layer = 60, spacing_nodes=1, align = \"middle\", max_num_nodes_visible = 4, node_radius = 20, font_size = 18):\n self.layers = []\n left = 0\n self.left = 0\n self.spacing_layer = spacing_layer\n max_height = 0\n for l in layers_list:\n layer = Layer(left=left, num_nodes=l[\"units\"], node_radius=node_radius, title=l[\"title\"], color=l.get(\"color\", \"gray\"), max_num_nodes_visible=max_num_nodes_visible, font_size=font_size, spacing_nodes=spacing_nodes)\n self.layers.append(layer)\n left = layer.get_right() + self.spacing_layer\n max_height = max(max_height, layer.get_height())\n self.right = left\n\n # setting the alignment\n if (align == \"top\"):\n pass\n if (align == \"middle\"):\n for l in self.layers:\n empty_space = max_height - l.get_height()\n l.set_top(-empty_space/2)\n\n def render(self, save_to_file=None, do_not_show=False):\n mFig, mAx = plt.subplots()\n\n #creating layers\n for l in self.layers:\n l.render(mAx)\n\n\n #creating edges between nodes\n for i in range(len(self.layers) - 1):\n self.layers[i].fully_connect(self.layers[i+1], mAx)\n\n\n\n mAx.set_aspect(\"equal\") # same spacing on both axis\n mAx.autoscale(enable=True, axis='both', tight=None) # fit axis values\n\n # plt.rcParams[\"figure.figsize\"] = (200,10)\n plt.axis('off')\n if (save_to_file is not None):\n plt.savefig(save_to_file, bbox_inches='tight')\n\n if do_not_show == False:\n plt.show()\n\n return mFig, mAx", "sub_path": "build/lib/nnv/nnv.py", "file_name": "nnv.py", "file_ext": "py", "file_size_in_byte": 5770, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "matplotlib.patches.Circle", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.lines", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 176, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 176, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}]} +{"seq_id": "374232133", "text": "import os\nimport argparse\nimport numpy as np\n\n\n\nparser = argparse.ArgumentParser(description='Rainbow')\nparser.add_argument('--env_num', type=int, default=1, help='enviroment')\nargs = parser.parse_args()\n\n\ncounter = 0\nfor every in [1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50]:\n for update in [1, 2, 4, 6, 8, 10]:\n for size in [32, 64, 128, 256]:\n print (counter)\n os.system(f'python3 ./training.py \\\n --model_num {counter} \\\n --train_every {every} \\\n --learn_updates {update} \\\n --batch-size {size}')\n counter +=1\n", "sub_path": "20Agents/hyperparametersearch.py", "file_name": "hyperparametersearch.py", "file_ext": "py", "file_size_in_byte": 631, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 7, "usage_type": "call"}, {"api_name": "os.system", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "164079875", "text": "import pygame,sys,os,BaseHTTPServer,json\ntry:\n\tfrom queue import Queue\nexcept:\n\tfrom Queue import Queue\n\nfrom threading import Thread\n\n#mp3 Player\nclass Player:\n\tdef __init__(self,path,songs):\n\t\tpygame.init()\n\t\tself.path = path\n\t\tfor song in os.listdir(path):\n\t\t\tsongs.append(song)\n\t\tself.curr = 0\n\t\tself.songs = songs\n\n\tdef play(self):\n\t\tsong = open(path+\"/\"+self.songs[self.curr])\n\t\tpygame.mixer.init()\n\t\tpygame.mixer.music.load(song)\n\t\tpygame.mixer.music.play(0,0)\n\n\tdef pause(self):\n\t\tif pygame.mixer.music.get_busy:\n\t\t\tpygame.mixer.music.pause()\n\n\tdef run(self,q):\n\t\tclock = pygame.time.Clock()\n\t\tspeed = 10\n\t\twhile True:\n\t\t\twhile not q.empty():\n\t\t\t\tif q.get() == \"looool\":\n\t\t\t\t\tself.play()\n\t\t\tclock.tick(speed)\n\n#Server\neventQueue =Queue()\nHOST_NAME = '0.0.0.0'\nPORT = 3000\n\nclass Handler(BaseHTTPServer.BaseHTTPRequestHandler):\n\tdef do_HEAD(self):\n\t\tself.send_response(200)\n\t\tself.send_header(\"Content-type\",\"text/plain\")\n\t\tself.end_headers()\n\n\tdef do_GET(self):\n\t\tif self.path == \"/\":\n\t\t\tresp = json.dumps(songs)\n\t\t\tself.send_response(200)\n\t\telse:\n\t\t\tself.send_response(404)\n\t\t\tresp = \"not found\"\n\t\tself.send_header(\"Content-Type\",\"text/plain\")\n\t\tself.end_headers()\n\t\tself.wfile.write(resp)\n\n\tdef do_POST(self):\n\t\treq=self.rfile.read(int(self.headers['Content-Length']))\n\t\tself.send_response(200)\n\t\tself.send_header(\"Content-Type\",\"text/plain\")\n\t\tself.end_headers()\n\t\teventQueue.put(req)\n\ndef runPlayer(q,path,songs):\n\tplayer = Player(path,songs)\n\tplayer.run(q)\n\nsongs = []\npath = sys.argv[1]\nplayer = Thread(target=runPlayer,args=(eventQueue,path,songs))\nplayer.daemon = True\nplayer.start()\n\nserver_class = BaseHTTPServer.HTTPServer\nhttpd = server_class((HOST_NAME,PORT),Handler)\nprint(\"server started\")\ntry:\n\thttpd.serve_forever()\nexcept KeyboardInterrupt:\n\tpass\n\nhttpd.server_close()\nprint(\"server stoped\")", "sub_path": "server/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 1817, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pygame.init", "line_number": 12, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.mixer.init", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.pause", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 30, "usage_type": "attribute"}, {"api_name": "Queue.Queue", "line_number": 39, "usage_type": "call"}, {"api_name": "BaseHTTPServer.BaseHTTPRequestHandler", "line_number": 43, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 51, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 72, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 73, "usage_type": "call"}, {"api_name": "BaseHTTPServer.HTTPServer", "line_number": 77, "usage_type": "attribute"}]} +{"seq_id": "525009486", "text": "import os\nimport numpy as np\nimport pandas as pd\nimport random\nimport csv\nimport string\nimport sys\nimport json\nfrom scipy import stats\nfrom student import Textgen\nimport os\nimport bisect\n\n\nclass Student:\n genders = ['male', 'female']\n religions = ['christian', 'jewish', 'athiest', 'muslim', 'none']\n races = ['black', 'white', 'asian', 'hispanic', 'mixed', 'native american', 'other']\n personalities = [\"normie\", \"stoner\", \"brogrammer\", \"tryhard\", \"nerd\", \"alternative\"]\n counties = ['alameda', 'alpine', 'amador', 'butte', 'calaveras', 'colusa',\n 'contra costa', 'del norte', 'el dorado', 'fresno', 'glenn',\n 'humboldt', 'imperial', 'inyo', 'kern', 'kings', 'lake',\n 'lassen', 'los angeles', 'madera', 'marin', 'mariposa',\n 'mendocino', 'merced', 'modoc', 'mono', 'monterey', 'napa',\n 'nevada', 'orange', 'placer', 'plumas', 'riverside',\n 'sacramento', 'san benito', 'san bernardino', 'san diego',\n 'san francisco', 'san joaquin', 'san luis obispo', 'san mateo',\n 'santa barbara', 'santa clara', 'santa cruz', 'shasta', 'sierra',\n 'siskiyou', 'solano', 'sonoma', 'stanislaus', 'sutter', 'tehama',\n 'trinity', 'tulare', 'tuolumne', 'ventura', 'yolo', 'yuba']\n\n def __init__(self, county, race, gender, lastnames, boy_names, girl_names, schools, activities, areacodes, clubs,\n jobs, soft_skills):\n\n self.school_year = self.get_school_year()\n self.personality = self.get_personality()\n self.jobs = jobs\n self.work = self.get_work()\n self.last_names = lastnames\n self.boy_names = boy_names\n self.girl_names = girl_names\n self.schools = schools\n self.activities_data = activities\n self.areacodes = areacodes\n self.clubs = clubs\n self.gender = gender\n self.race = race\n self.county = county\n self.name = self.get_name()\n self.highschool, self.hometown, self.school_religion = self.get_highschool_and_hometown()\n self.phone = self.get_phone()\n self.email = self.get_email()\n self.religion = self.get_religion()\n self.activities = self.get_activities()\n self.clubs = self.get_clubs()\n self.soft_skills = self.get_skills(soft_skills)\n self.tech_skills = self.get_tech_skills()\n self.key = abs(hash(str(self)) % 100000000)\n print(self.key)\n\n def draw_from_distribution(self, values, counts, num=1):\n s = sum(counts)\n p = [c / s for c in counts]\n return list(np.random.choice(values, num, p=p))\n\n def get_school_year(self):\n return str(random.randint(1, 5))\n\n def get_work(self):\n cols = self.jobs.columns\n jobs = self.jobs[cols[0]].tolist()\n counts = self.jobs[cols[1]].tolist()\n chance = random.random()\n if self.school_year == 4 and chance < 0.85:\n if random.random() < 0.7:\n return self.draw_from_distribution(jobs, counts, 2)\n return self.draw_from_distribution(jobs, counts)\n elif self.school_year == 3 and chance < 70:\n return self.draw_from_distribution(jobs, counts)\n elif self.school_year == 2 and chance < 40:\n return self.draw_from_distribution(jobs, counts)\n elif chance < 10:\n return self.draw_from_distribution(jobs, counts)\n else:\n return []\n\n def get_personality(self):\n return self.personalities[random.randint(0, len(self.personalities) - 1)]\n\n def get_name(self):\n return self.get_first_name() + ' ' + self.get_last_name()\n\n def get_last_name(self):\n col = self.last_names.columns\n names = [n.lower().capitalize() for n in self.last_names[col[0]].tolist()]\n counts = [int(s.replace(\",\", \"\")) for s in self.last_names[col[1]].tolist()]\n return self.draw_from_distribution(names, counts)[0]\n\n def get_first_name(self):\n if self.gender is 'Male':\n col = self.boy_names.columns\n names = self.boy_names[col[0]].tolist()\n counts = self.boy_names[col[1]].tolist()\n race = self.boy_names[col[2]].tolist()\n else:\n col = self.girl_names.columns\n names = self.girl_names[col[0]].tolist()\n counts = self.girl_names[col[1]].tolist()\n race = self.girl_names[col[2]].tolist()\n return self.draw_from_distribution(names, counts)[0]\n\n def get_highschool_and_hometown(self):\n try:\n sample = self.schools[self.schools['County'] == self.county].sample()\n return tuple(sample[['School', 'City', 'Religion']].to_numpy()[0])\n except:\n print(self.county)\n\n def get_bio(self):\n if self.archetype is 'normie':\n return self.normie_bio()\n\n def get_email(self):\n first, last = self.name.split()\n return (first[0] + last[:random.randint(4, 8)] + \"@calpoly.edu\").lower()\n\n def get_activities(self):\n col = self.activities_data.columns\n active = ['normie', 'brogrammer', 'alternative']\n if self.personality in active:\n num_activities = random.randint(2, 5)\n else:\n num_activities = random.randint(2, 3)\n preference = 'masculine' if self.gender is 'male' else 'feminine'\n data = self.activities_data[self.activities_data[col[1]].str.match(self.personality)]\n activities = self.activities_data[col[0]].tolist()\n preferences = self.activities_data[col[2]].tolist()\n counts = []\n for p in preferences:\n if preference == p:\n counts.append(3)\n elif preference == 'none':\n counts.append(2)\n else:\n counts.append(1)\n activities = self.draw_from_distribution(activities, counts, num_activities)\n return list(set(activities))\n\n def get_phone(self):\n digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n data_small = self.areacodes[self.areacodes['city'] == self.hometown]\n if data_small.empty:\n phone = \"(\" + str(self.areacodes.sample()['code'].to_numpy()[0])\n else:\n phone = \"(\" + str(data_small.sample()['code'].to_numpy()[0])\n index = random.randint(1, 8)\n phone = phone + ')-' + digits[index]\n index2 = random.randint(0, 8)\n index3 = random.randint(0, 8)\n while (index == index2):\n index2 = random.randint(0, 8)\n phone = phone + digits[index2] + digits[index3] + '-'\n for i in range(3):\n phone = phone + digits[random.randint(0, 8)]\n return phone\n\n def get_clubs(self):\n # only one racial or religious club, make sure religion is the same\n df = self.clubs.copy()\n col = df.columns\n active = ['nerd', 'tryhard']\n if self.personality in active:\n num_activities = random.randint(1, 4)\n else:\n num_activities = random.randint(0, 3)\n preference = 'masculine' if self.gender is 'male' else 'feminine'\n probs = np.ones(len(df))\n df['probabilities'] = probs\n df.loc[df.academic == True, 'probabilities'] *= 3.5\n df.loc[df.personality == self.personality, 'probabilities'] *= 2\n df.loc[df.personality == 'none', 'probabilities'] *= 1.5\n df.loc[df.gender == preference, 'probabilities'] *= 1.5\n df.loc[df.race == self.race, 'probabilities'] *= 1.5\n df.loc[(df.race != self.race) & (df.race != 'none'), 'probabilities'] = 0\n df.loc[df.religion == self.religion, 'probabilities'] *= 1.5\n df.loc[(df.religion != self.religion) & (df.religion != 'none'), 'probabilities'] = 0\n\n # revised_data = data[data[col[1]].str.match(self.personality)].append(data[data[col[1]].str.match('normie')])\n activities = df[col[0]].tolist()\n p = df['probabilities'].tolist()\n self.draw_from_distribution(activities, p, num_activities)\n clubs1 = list(set(activities))\n return clubs1\n\n def get_religion(self):\n if self.school_religion != 'None':\n if random.random() < 0.7:\n return self.school_religion\n else:\n return 'none'\n elif self.personality == 'nerd':\n if random.random() < 0.5:\n return 'athiest'\n else:\n return 'none'\n else:\n s = random.random()\n if s < 0.7:\n return 'none'\n elif s < 0.88:\n return 'christian'\n elif s < 0.93:\n return 'jewish'\n elif s < 0.99:\n return 'athiest'\n else:\n return 'muslim'\n\n def to_dict(self):\n return {'name': self.name,\n 'year': self.school_year,\n 'gender': self.gender,\n 'county': self.county,\n 'race': self.race,\n 'personality': self.personality,\n 'highschool': self.highschool,\n 'hometown': self.hometown,\n 'phone': self.phone,\n 'email': self.email,\n 'religion': self.religion,\n 'activites': self.activities,\n 'clubs': self.clubs,\n 'work': self.work}\n\n def __str__(self):\n return (('Year: ' + self.school_year + '\\n' +\n 'Gender: ' + self.gender + '\\n' +\n 'Race: ' + self.race + '\\n' +\n 'County: ' + self.county + '\\n' +\n 'Name: ' + self.name + '\\n' +\n 'Personality: ' + self.personality + '\\n' +\n 'High School: ' + self.highschool + '\\n' +\n 'School Religion: ' + self.school_religion + '\\n' +\n 'Hometown: ' + self.hometown + '\\n' +\n 'Phone Number: ' + self.phone + '\\n' +\n 'Email: ' + self.email + '\\n' +\n 'Religion: ' + self.religion + '\\n' +\n 'Activities: ' + str(self.activities) + '\\n'\n 'Clubs: ' + str(self.clubs) + '\\n' +\n 'Jobs: ' + str(self.jobs) + '\\n'))\n\n def get_skills(self, soft_skills):\n col = soft_skills.columns\n num_activities = random.randint(2, 6)\n preference = self.personality\n soft = soft_skills[col[0]].tolist()\n preferences = soft_skills[col[1]].tolist()\n counts = []\n for p in preferences:\n if preference == p:\n counts.append(7)\n else:\n counts.append(1)\n s = sum(counts)\n counts = [c / s for c in counts]\n samples = stats.rv_discrete(values=(np.arange(len(counts)), counts)).rvs(size=num_activities)\n return list(set([soft[s] for s in samples]))\n\n def get_tech_skills(self):\n data = WeightedTuple({'Back end': 25, 'Front end': 30, 'Graphics/Games': 5, 'Low level': 5, \"Security\": 10,\n \"Machine Learning\": 25})\n return random.choice(data)\n\n\nclass WeightedTuple(object):\n\n def __init__(self, items):\n self.indexes = []\n self.items = []\n next_index = 0\n for key in sorted(items.keys()):\n val = items[key]\n self.indexes.append(next_index)\n self.items.append(key)\n next_index += val\n\n self.len = next_index\n\n def __getitem__(self, n):\n if n < 0:\n n = self.len + n\n if n < 0 or n >= self.len:\n raise IndexError\n\n idx = bisect.bisect_right(self.indexes, n)\n return self.items[idx - 1]\n\n def __len__(self):\n return self.len\n\n\ndef populate_table():\n index = []\n pop = []\n sum = 0\n for c in Student.counties:\n for r in Student.races:\n for g in Student.genders:\n index.append((c, r, g))\n pop.append(random.randint(10, 100))\n return index, pop\n\n\ndef read_students(data):\n l = json.loads(data)\n return [read_student(d) for d in l]\n\n\ndef read_student(d):\n s = Student(d['county'], d['race'], d['gender'])\n s.name = d['name']\n s.archetype = d['archetype']\n return s\n\n\ndef build_students(n=100):\n index, pop = populate_table()\n total = sum(pop)\n pop = [p / total for p in pop]\n students = stats.rv_discrete(values=(np.arange(len(pop)), pop)).rvs(size=n)\n location = os.path.dirname(os.path.realpath(__file__))\n lastnames = pd.read_csv(os.path.join(location, 'data', 'lastnames.csv'))\n boy_names, girl_names = np.split(pd.read_csv(os.path.join(location, 'data', 'names.csv'), index_col=False),\n np.arange(3, 6, 3), axis=1)\n schools = pd.read_csv(os.path.join(location, 'data', 'schools.csv'))\n activities = pd.read_csv(os.path.join(location, 'data', 'activities.csv'))\n areacodes = pd.read_csv(os.path.join(location, 'data', 'areacodes.csv'))\n clubs = pd.read_csv(os.path.join(location, 'data', 'clubs.csv'))\n soft = pd.read_csv(os.path.join(location, 'data', 'soft_skills.csv'))\n jobs = pd.read_csv(os.path.join(location, 'data', 'jobs.csv'))\n return [Student(*index[s], lastnames, boy_names, girl_names, schools, activities, areacodes, clubs, jobs, soft) for\n s in students]\n\n\ndef get_students(n=100):\n S = build_students(n)\n D = [s.to_dict() for s in S]\n return D\n\n\nif __name__ == '__main__':\n for s in build_students(int(sys.argv[1])):\n print(s)\n sys.stdout.flush()\n", "sub_path": "backend/student/resumegeneration/StudentGenerator-luis-3/student.py", "file_name": "student.py", "file_ext": "py", "file_size_in_byte": 13622, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "numpy.random.choice", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 64, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 67, "usage_type": "call"}, {"api_name": "random.random", "line_number": 73, "usage_type": "call"}, {"api_name": "random.random", "line_number": 75, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 88, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 125, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 131, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 133, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 156, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 158, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 159, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 161, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 164, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 173, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 177, "usage_type": "call"}, {"api_name": "random.random", "line_number": 197, "usage_type": "call"}, {"api_name": "random.random", "line_number": 202, "usage_type": "call"}, {"api_name": "random.random", "line_number": 207, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 254, "usage_type": "call"}, {"api_name": "scipy.stats.rv_discrete", "line_number": 266, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 266, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 266, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 272, "usage_type": "call"}, {"api_name": "bisect.bisect_right", "line_number": 295, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 310, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 315, "usage_type": "call"}, {"api_name": "scipy.stats.rv_discrete", "line_number": 330, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 330, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 330, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 331, "usage_type": "call"}, {"api_name": "os.path", "line_number": 331, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 331, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 332, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 332, "usage_type": "call"}, {"api_name": "os.path", "line_number": 332, "usage_type": "attribute"}, {"api_name": "numpy.split", "line_number": 333, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 333, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 333, "usage_type": "call"}, {"api_name": "os.path", "line_number": 333, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 334, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 335, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 335, "usage_type": "call"}, {"api_name": "os.path", "line_number": 335, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 336, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 336, "usage_type": "call"}, {"api_name": "os.path", "line_number": 336, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 337, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 337, "usage_type": "call"}, {"api_name": "os.path", "line_number": 337, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 338, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 338, "usage_type": "call"}, {"api_name": "os.path", "line_number": 338, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 339, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 339, "usage_type": "call"}, {"api_name": "os.path", "line_number": 339, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 340, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 340, "usage_type": "call"}, {"api_name": "os.path", "line_number": 340, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 352, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 354, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 354, "usage_type": "attribute"}]} +{"seq_id": "172346258", "text": "#This program will read an array of values from a spreadsheet,\n#then prompt a user to input a number from 1-5 and print the\n#correct value.\n\n#import openpyxl module function load_workbook\nfrom openpyxl import load_workbook\n\n#Define variable name for the workbook\nwb = load_workbook('samplebook.xlsx')\n\n#Define variable name for worksheet\nsheet = wb['Sheet1']\n\n#Read in values of cells C1 through C5 as list (array).\nCells_List=[]\nfor i in range(1,6):\n Cells_List.append(int(sheet.cell(i,3).value))\nprint(Cells_List)\n\n#Prompt user for the array item.\n#Cell_Wanted=input(\"Please enter the cell you want the value for: \")\n\n#Print the desired cell.\n#print(int(Cells_List[Cell_Wanted-1]))\n\n#Square the value of each cell and write it to a new list.\nCell_Squares=[]\nfor i in range (0,len(Cells_List)):\n Cell_Squares.append(Cells_List[i]*Cells_List[i])\nprint(Cell_Squares)\n\n#Cube the value of each cell and write it to a new list.\nCell_Cubes=[]\nfor i in range (0,len(Cells_List)):\n Cell_Cubes.append(Cells_List[i]*Cells_List[i]*Cells_List[i])\nprint(Cell_Cubes)\n\n#Take the even values only from Cells_List and write them to a new list.\nCell_Evens=[]\nfor i in range (0, len(Cells_List)):\n if (Cells_List[i])%2 == 0:\n Cell_Evens.append(Cells_List[i])\nprint(Cell_Evens)\n\n#Take the even values from Cell_Evens, square them, and write them to a new list.\nCell_Even_Squares=[]\nfor i in range (0,len(Cell_Evens)):\n Cell_Even_Squares.append(Cell_Evens[i]*Cell_Evens[i])\nprint(Cell_Even_Squares)\n\n#Print the values of list Cells_List.\n#for j in range(1,6):\n# print(Cells_List[j-1])", "sub_path": "WhichCell.py", "file_name": "WhichCell.py", "file_ext": "py", "file_size_in_byte": 1589, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "openpyxl.load_workbook", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "313195817", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('todo', '0004_auto_20160206_1552'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='todo',\n name='severity',\n field=models.IntegerField(default=5, choices=[(1, 'Urgente'), (2, 'Importante'), (3, 'Moderado'), (4, 'Bajo'), (5, 'Trivial')]),\n ),\n ]\n", "sub_path": "apps/todo/migrations/0005_todo_severity.py", "file_name": "0005_todo_severity.py", "file_ext": "py", "file_size_in_byte": 487, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "356444088", "text": "import requests\nimport time\nfrom funcs import add_word, get_word, leninc, indexinc\nfrom random import randint\nfrom key import token, groupaddr, bot_id\nfrom markovchain import read_text, mchain, message\n\nmyurl = 'https://api.groupme.com/v3/bots/post'\n\nrequest_params = { 'token':token}\n\nwhile True:\n response = requests.get(groupaddr, params = request_params)\n if (response.status_code == 200):\n response_messages = response.json()['response']['messages']\n \n\n for m in response_messages:\n text =''\n string = m['text']\n strlist = string.split() \n\n #Call/Response Harry\n if strlist[0] == '/help':\n text = 'COMMANDS\\n--------------------------------\\nadd word: \"Harry ____\"\\nget phrase: \"Show me Harry\"'\n \n if strlist[0] == 'Harry':\n add_word(strlist[1])\n text = 'added ' + strlist[1] + ' to the wordbank'\n \n if strlist[0] == 'Show' and strlist[1] == 'me' and strlist[2] == 'Harry':\n text = get_word()\n \n #Markov Chain Harry\n if strlist[0] != 'Harry' and strlist[0] != '/help' and 'h/' not in string:\n avglen = int(round(leninc(len(strlist)) / indexinc()))\n if m['name'] == 'Eliott Brown':\n txt = open(\"elog.txt\", \"a+\")\n txt.write(string + ' ')\n txt.close()\n contents = read_text(\"elog.txt\")\n chain1 = {}\n emarkovchain = mchain(contents, chain1)\n print(emarkovchain)\n if m['name'] == 'Ozan Ergungor':\n txt = open(\"olog.txt\", \"a+\")\n txt.write(string + ' ')\n txt.close()\n contents = read_text(\"olog.txt\")\n chain2 = {}\n omarkovchain = mchain(contents, chain2)\n print(omarkovchain)\n if m['name'] == 'Izuho Suzuki':\n txt = open(\"ilog.txt\", \"a+\")\n txt.write(string + ' ')\n txt.close()\n contents = read_text(\"ilog.txt\")\n chain3 = {}\n imarkovchain = mchain(contents, chain3)\n print(imarkovchain)\n if m['name'] == 'Martin Konstantinov':\n txt = open(\"mlog.txt\", \"a+\")\n txt.write(string + ' ')\n txt.close()\n contents = read_text(\"mlog.txt\")\n chain4 = {}\n mmarkovchain = mchain(contents, chain4)\n print(mmarkovchain)\n if m['name'] == 'Alvaro Matos':\n txt = open(\"alog.txt\", \"a+\")\n txt.write(string + ' ')\n txt.close()\n contents = read_text(\"alog.txt\")\n chain5 = {}\n amarkovchain = mchain(contents, chain5)\n print(amarkovchain)\n if m['name'] == 'Damian Viramontes': \n txt = open(\"dlog.txt\", \"a+\")\n txt.write(string + ' ')\n txt.close()\n contents = read_text(\"dlog.txt\")\n chain6 = {}\n dmarkovchain = mchain(contents, chain6)\n print(dmarkovchain)\n \n if strlist[0] == 'h/Eliott':\n text = message(emarkovchain, avglen)\n if strlist[0] == 'h/Ozan':\n text = message(omarkovchain, avglen)\n if strlist[0] == 'h/Izuho':\n text = message(imarkovchain, avglen)\n if strlist[0] == 'h/Martin':\n text = message(mmarkovchain, avglen)\n if strlist[0] == 'h/Alvaro':\n text = message(amarkovchain, avglen)\n if strlist[0] == 'h/Damian':\n text = message(dmarkovchain, avglen)\n\n body = {'bot_id': bot_id, 'text': text}\n #requests.post(url = myurl, data = body)\n request_params['since_id'] = m['id']\n break\n\n time.sleep(5)", "sub_path": "groupme_bot.py", "file_name": "groupme_bot.py", "file_ext": "py", "file_size_in_byte": 4264, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "key.token", "line_number": 10, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 13, "usage_type": "call"}, {"api_name": "key.groupaddr", "line_number": 13, "usage_type": "argument"}, {"api_name": "funcs.add_word", "line_number": 28, "usage_type": "call"}, {"api_name": "funcs.get_word", "line_number": 32, "usage_type": "call"}, {"api_name": "funcs.leninc", "line_number": 36, "usage_type": "call"}, {"api_name": "funcs.indexinc", "line_number": 36, "usage_type": "call"}, {"api_name": "markovchain.read_text", "line_number": 41, "usage_type": "call"}, {"api_name": "markovchain.mchain", "line_number": 43, "usage_type": "call"}, {"api_name": "markovchain.read_text", "line_number": 49, "usage_type": "call"}, {"api_name": "markovchain.mchain", "line_number": 51, "usage_type": "call"}, {"api_name": "markovchain.read_text", "line_number": 57, "usage_type": "call"}, {"api_name": "markovchain.mchain", "line_number": 59, "usage_type": "call"}, {"api_name": "markovchain.read_text", "line_number": 65, "usage_type": "call"}, {"api_name": "markovchain.mchain", "line_number": 67, "usage_type": "call"}, {"api_name": "markovchain.read_text", "line_number": 73, "usage_type": "call"}, {"api_name": "markovchain.mchain", "line_number": 75, "usage_type": "call"}, {"api_name": "markovchain.read_text", "line_number": 81, "usage_type": "call"}, {"api_name": "markovchain.mchain", "line_number": 83, "usage_type": "call"}, {"api_name": "markovchain.message", "line_number": 87, "usage_type": "call"}, {"api_name": "markovchain.message", "line_number": 89, "usage_type": "call"}, {"api_name": "markovchain.message", "line_number": 91, "usage_type": "call"}, {"api_name": "markovchain.message", "line_number": 93, "usage_type": "call"}, {"api_name": "markovchain.message", "line_number": 95, "usage_type": "call"}, {"api_name": "markovchain.message", "line_number": 97, "usage_type": "call"}, {"api_name": "key.bot_id", "line_number": 99, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 104, "usage_type": "call"}]} +{"seq_id": "595294341", "text": "import multiprocessing as mp\nimport time\n\ndef hello(proc_id, channel):\n print(\"start: \", proc_id)\n channel.put(str(proc_id))\n time.sleep(1)\n print(\"end: \", proc_id)\n \nif __name__ == '__main__':\n num_procs = 4\n channel = mp.Queue()\n procs = [mp.Process(target=hello, args=(proc_id, channel)) for proc_id in range(num_procs)]\n\n for p in procs:\n p.start()\n\n for p in procs:\n p.join()\n\n results = [channel.get() for p in procs]\n print(results)\n", "sub_path": "parallel/parallel_ex1.py", "file_name": "parallel_ex1.py", "file_ext": "py", "file_size_in_byte": 490, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "time.sleep", "line_number": 7, "usage_type": "call"}, {"api_name": "multiprocessing.Queue", "line_number": 12, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "325643033", "text": "import pygame\n\n\nclass Bullet(pygame.sprite.Sprite):\n def __init__(self,direction=0,posX=0,posY=0,hurt=100):\n #子弹发射的方向,子弹的位置,子弹的速度,子弹的生命,子弹的伤害量\n self.direction = direction\n self.posX = posX\n self.posY = posY\n self.hurt = hurt\n self.runTime = 2\n self.bullet_up = pygame.image.load('./image/bullet_up.png')\n self.bullet_down = pygame.image.load('./image/bullet_down.png')\n self.bullet_left = pygame.image.load('./image/bullet_left.png')\n self.bullet_right = pygame.image.load('./image/bullet_right.png')\n self.arrImage = [self.bullet_up,self.bullet_down,self.bullet_left,self.bullet_right]\n\n self.bulletShow = self.arrImage[direction]\n\n if self.direction == 0:\n self.detailX = posX*24 + 24\n self.detailY = posY*24\n elif self.direction == 1:\n self.detailX = posX*24 + 24\n self.detailY = posY*24 + 48\n elif self.direction == 2:\n self.detailX = posX*24\n self.detailY = posY*24 + 24\n elif self.direction == 3:\n self.detailX = posX*24 + 48\n self.detailY = posY*24 + 24\n\n pass\n\n def move(self):\n if self.runTime == 0:\n self.runTime = 2\n if self.direction == 0:\n self.posY -= 1\n elif self.direction == 1:\n self.posY += 1\n elif self.direction == 2:\n self.posX -= 1\n else:\n self.posX += 1\n\n if self.direction == 0:\n self.detailX = self.posX * 24 + 24 - 6\n self.detailY = self.posY * 24\n elif self.direction == 1:\n self.detailX = self.posX * 24 + 24 - 6\n self.detailY = self.posY * 24 + 48\n elif self.direction == 2:\n self.detailX = self.posX * 24\n self.detailY = self.posY * 24 + 24 - 6\n elif self.direction == 3:\n self.detailX = self.posX * 24 + 48\n self.detailY = self.posY * 24 + 24 - 6\n else:\n self.runTime -= 1\n pass\n\n\n", "sub_path": "Python基础/13PyGame/10坦克大战_敌方坦克与子弹制作/代码/TankGame/bullet.py", "file_name": "bullet.py", "file_ext": "py", "file_size_in_byte": 2214, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pygame.sprite", "line_number": 4, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 13, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 15, "usage_type": "attribute"}]} +{"seq_id": "252771429", "text": "\"\"\"\n(c) Copyright 2014,2015 Hewlett-Packard Development Company, L.P.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\"\"\"\n\nimport falcon\n\nfrom freezer_api.api.common import resource\nfrom freezer_api.common import exceptions as freezer_api_exc\nfrom freezer_api import policy\n\n\nclass BackupsCollectionResource(resource.BaseResource):\n \"\"\"\n Handler for endpoint: /v1/backups\n \"\"\"\n def __init__(self, storage_driver):\n self.db = storage_driver\n\n @policy.enforce('backups:get_all')\n def on_get(self, req, resp):\n # GET /v1/backups(?limit,offset) Lists backups\n user_id = req.get_header('X-User-ID')\n offset = req.get_param_as_int('offset') or 0\n limit = req.get_param_as_int('limit') or 10\n search = self.json_body(req)\n obj_list = self.db.search_backup(user_id=user_id, offset=offset,\n limit=limit, search=search)\n resp.body = {'backups': obj_list}\n\n @policy.enforce('backups:create')\n def on_post(self, req, resp):\n # POST /v1/backups Creates backup entry\n doc = self.json_body(req)\n if not doc:\n raise freezer_api_exc.BadDataFormat(\n message='Missing request body')\n user_name = req.get_header('X-User-Name')\n user_id = req.get_header('X-User-ID')\n backup_id = self.db.add_backup(\n user_id=user_id, user_name=user_name, doc=doc)\n resp.status = falcon.HTTP_201\n resp.body = {'backup_id': backup_id}\n\n\nclass BackupsResource(resource.BaseResource):\n \"\"\"\n Handler for endpoint: /v1/backups/{backup_id}\n \"\"\"\n def __init__(self, storage_driver):\n self.db = storage_driver\n\n @policy.enforce('backups:get')\n def on_get(self, req, resp, backup_id):\n # GET /v1/backups/{backup_id} Get backup details\n user_id = req.get_header('X-User-ID')\n obj = self.db.get_backup(user_id=user_id, backup_id=backup_id)\n if obj:\n resp.body = obj\n else:\n resp.status = falcon.HTTP_404\n\n @policy.enforce('backups:delete')\n def on_delete(self, req, resp, backup_id):\n # DELETE /v1/backups/{backup_id} Deletes the specified backup\n user_id = req.get_header('X-User-ID')\n obj = self.db.get_backup(user_id=user_id,\n backup_id=backup_id)\n if not obj:\n raise freezer_api_exc.DocumentNotFound(\n message='No Backup found with ID:{0}'.\n format(backup_id))\n else:\n self.db.delete_backup(\n user_id=user_id, backup_id=backup_id)\n resp.body = {'backup_id': backup_id}\n resp.status = falcon.HTTP_204\n", "sub_path": "freezer_api/api/v1/backups.py", "file_name": "backups.py", "file_ext": "py", "file_size_in_byte": 3205, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "freezer_api.api.common.resource.BaseResource", "line_number": 25, "usage_type": "attribute"}, {"api_name": "freezer_api.api.common.resource", "line_number": 25, "usage_type": "name"}, {"api_name": "freezer_api.policy.enforce", "line_number": 32, "usage_type": "call"}, {"api_name": "freezer_api.policy", "line_number": 32, "usage_type": "name"}, {"api_name": "freezer_api.common.exceptions.BadDataFormat", "line_number": 48, "usage_type": "call"}, {"api_name": "freezer_api.common.exceptions", "line_number": 48, "usage_type": "name"}, {"api_name": "falcon.HTTP_201", "line_number": 54, "usage_type": "attribute"}, {"api_name": "freezer_api.policy.enforce", "line_number": 43, "usage_type": "call"}, {"api_name": "freezer_api.policy", "line_number": 43, "usage_type": "name"}, {"api_name": "freezer_api.api.common.resource.BaseResource", "line_number": 58, "usage_type": "attribute"}, {"api_name": "freezer_api.api.common.resource", "line_number": 58, "usage_type": "name"}, {"api_name": "falcon.HTTP_404", "line_number": 73, "usage_type": "attribute"}, {"api_name": "freezer_api.policy.enforce", "line_number": 65, "usage_type": "call"}, {"api_name": "freezer_api.policy", "line_number": 65, "usage_type": "name"}, {"api_name": "freezer_api.common.exceptions.DocumentNotFound", "line_number": 82, "usage_type": "call"}, {"api_name": "freezer_api.common.exceptions", "line_number": 82, "usage_type": "name"}, {"api_name": "falcon.HTTP_204", "line_number": 89, "usage_type": "attribute"}, {"api_name": "freezer_api.policy.enforce", "line_number": 75, "usage_type": "call"}, {"api_name": "freezer_api.policy", "line_number": 75, "usage_type": "name"}]} +{"seq_id": "516613811", "text": "\nimport sys\nimport csv\nimport re\nfrom pyspark import SparkContext, SparkConf\n\nconf = SparkConf().setAppName(\"myapp\")\nsc = SparkContext(conf=conf)\n\ntext_file = sc.textFile(\"input\")\n\nmydict = {}\nreader = csv.reader(open('new_lemmatizer.csv'), delimiter = ',')\ni = 0\nfor row in reader:\n\tkey = row[0]\n\tif key in mydict:\n\t\tpass\n\tmydict[key] = filter(None, row[1:])\n\ndef mapper_stub(word1, word2):\n\tword1 = \" \".join(re.findall(\"[a-zA-Z]+\", word1))\n\tword1 = word1.replace(\"j\", \"i\")\n\tword1 = word1.replace(\"v\", \"u\")\n\tword2 = \" \".join(re.findall(\"[a-zA-Z]+\", word2))\n\tword2 = word2.replace(\"j\", \"i\")\n\tword2 = word2.replace(\"v\", \"u\")\n\tif word1 in mydict:\n\t\tif mydict[word1] != \"\":\n\t\t\tword1 = mydict[word1]\n\telse:\n\t\tword1 = [word1]\n\tif word2 in mydict:\n\t\tif mydict[word2] != \"\":\n\t\t\tword2 = mydict[word2]\n\telse:\n\t\tword2 = [word2]\n\treturn word1, word2\n\t\ndef mapper(line):\n\tline = line.strip()\n\ttry:\n\t\tline = str(unicode(line))\n\texcept:\n\t\tline = line\n\tmylist = []\n\tif(len(line.split(\">\")) <= 1):\n\t\treturn [((('random', 'random'), ['random']))]\n\tlocation, words = line.split(\">\")\n\tlocation = location + \">\"\n\twords = words.split()\n\tfor i in range(0, len(words)):\n\t\tfor j in range(i+1, len(words)):\n\t\t\tpair1, pair2 = mapper_stub(words[i], words[j])\n\t\t\tfor val1 in pair1:\n\t\t\t\tfor val2 in pair2:\n\t\t\t\t\tmylist.append(((val1, val2), [location]))\n\tif mylist:\n\t\treturn mylist\n\treturn [(('random', 'random'), ['random'])]\n\t\n\t\t\ncounts = text_file.flatMap(mapper).sortByKey().reduceByKey(lambda a, b: a+b)\ncounts.saveAsTextFile(\"output2\")\n", "sub_path": "Parallel text processing using Spark/2_gram.py", "file_name": "2_gram.py", "file_ext": "py", "file_size_in_byte": 1512, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pyspark.SparkConf", "line_number": 7, "usage_type": "call"}, {"api_name": "pyspark.SparkContext", "line_number": 8, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 13, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 22, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "211228274", "text": "import SUASSystem\nimport multiprocessing\nfrom SUASSystem.logging import log\nfrom time import sleep\n\ndef run_sda_process(logger_queue, waypoints, sda_status, sda_avoid_coords, vehicle_state_data, mission_information_data):\n SUASSystem.logging.logger_worker_configurer(logger_queue)\n logger_name = multiprocessing.current_process().name\n\n log(logger_name, \"Instantiating SDA converter\")\n while True:\n try:\n sda_converter = SUASSystem.SDAConverter(vehicle_state_data[0].get_location(), mission_information_data[0][\"fly_zones\"])\n break\n except:\n sleep(0.1)\n log(logger_name, \"SDA converter instantiated\")\n\n while True:\n if \"enabled\" in str(sda_status.value).lower():\n current_location = vehicle_state_data[0].get_location()\n\n current_waypoint_number = vehicle_state_data[0].get_current_waypoint_number()\n if current_waypoint_number != 0:\n current_uav_waypoint = waypoints[current_waypoint_number - 1]\n sda_converter.set_waypoint(SUASSystem.Location(current_uav_waypoint.x, current_uav_waypoint.y, current_uav_waypoint.z * 3.28084))\n else:\n sda_converter.set_waypoint(SUASSystem.Location(waypoints[1].x, waypoints[1].y, waypoints[1].z))\n\n sda_converter.reset_obstacles()\n for stationary_obstacle in mission_information_data[\"stationary_obstacles\"]:\n sda_converter.add_obstacle(get_obstacle_location(stationary_obstacle, MSL_ALT), stationary_obstacle)\n for moving_obstacle in mission_information_data[\"moving_obstacles\"]:\n sda_converter.add_obstacle(get_obstacle_location(moving_obstacle, MSL_ALT), moving_obstacle)\n\n sda_converter.set_uav_position(current_location)\n sda_converter.avoid_obstacles()\n\n if not sda_converter.has_uav_completed_guided_path():\n try:\n sda_avoid_coords[0] = sda_converter.get_uav_avoid_coordinates()\n except:\n sda_avoid_coords.append(sda_converter.get_uav_avoid_coordinates())\n\n sleep(0.5)\n", "sub_path": "SUASSystem/SUASSystem/sda.py", "file_name": "sda.py", "file_ext": "py", "file_size_in_byte": 2143, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "SUASSystem.logging.logger_worker_configurer", "line_number": 7, "usage_type": "call"}, {"api_name": "SUASSystem.logging", "line_number": 7, "usage_type": "attribute"}, {"api_name": "multiprocessing.current_process", "line_number": 8, "usage_type": "call"}, {"api_name": "SUASSystem.logging.log", "line_number": 10, "usage_type": "call"}, {"api_name": "SUASSystem.SDAConverter", "line_number": 13, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 16, "usage_type": "call"}, {"api_name": "SUASSystem.logging.log", "line_number": 17, "usage_type": "call"}, {"api_name": "SUASSystem.Location", "line_number": 26, "usage_type": "call"}, {"api_name": "SUASSystem.Location", "line_number": 28, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "128904980", "text": "from urllib import request\nfrom fake_useragent import UserAgent\nimport ssl\nimport requests\n\n\nclass HtmlDownloader(object):\n\tdef download(self, url):\n\t\tif url is not None:\n\t\t\t\n\t\t\ttry:\n\t\t\t\tssl._create_default_https_context = ssl._create_unverified_context\n\t\t\t\t\n\t\t\t\tua = UserAgent()\n\t\t\t\t\n\t\t\t\theaders = {\n\t\t\t\t\t'User-Agent': ua.random\n\t\t\t\t}\n\t\t\t\treq = request.Request(url=url, headers=headers)\n\t\t\t\t\n\t\t\t\tresponse = request.urlopen(req, timeout=10)\n\t\t\t\t\n\t\t\t\tif response.getcode() == 200:\n\t\t\t\t\tr = requests.get(url)\n\t\t\t\t\tr.encoding = 'utf-8'\n\t\t\t\t\treturn r.text\n\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\treturn None\n\t\t\t\n\t\t\texcept Exception as e:\n\t\t\t\t\n\t\t\t\tprint(str(e))\n\t\t\n\t\telse:\n\t\t\t\n\t\t\treturn None\n", "sub_path": "spiderFiction/html_downloader.py", "file_name": "html_downloader.py", "file_ext": "py", "file_size_in_byte": 667, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "ssl._create_default_https_context", "line_number": 12, "usage_type": "attribute"}, {"api_name": "ssl._create_unverified_context", "line_number": 12, "usage_type": "attribute"}, {"api_name": "fake_useragent.UserAgent", "line_number": 14, "usage_type": "call"}, {"api_name": "urllib.request.Request", "line_number": 19, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 19, "usage_type": "name"}, {"api_name": "urllib.request.urlopen", "line_number": 21, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 21, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "425754072", "text": "from PIL import Image\nfrom numpy import linalg\nimport json\ndataPath = r'F:/SWrk/gpTasks/data/originData/'\nresultPath = r'F:/SWrk/gpTasks/data/resources/'\nDefValNumpy = ['0', '1', '2', '3', '4', '5', '6', '7', '8', 'p', 'end']\nface = ['s1', 's2', 'vict']\nfaceDir = {}\nDefDir = {}\n\n\ndef get_hash(img):\n img_L = img.convert(\"L\")\n pixels = list(img_L.getdata())\n temp = []\n for i in range(0, 10):\n a = i * 10\n b = a + 10\n temp.append(pixels[a:b])\n for p in range(0, len(temp)):\n for q in range(0, len(temp[p])):\n if temp[p][q] is 192:\n temp[p][q] = 0\n else:\n temp[p][q] = 1\n # for p in range(0, len(pixels)):\n # if pixels[p] is 192 or pixels[p] is 128:\n # pixels[p] = 0\n # else:\n # pixels[p] = 1\n return temp\n\n\ndef get_sparseMatrix(img):\n img_L = img.convert(\"L\")\n pixels = list(img_L.getdata())\n temp = []\n for i in range(0, 9):\n temp.append(pixels[i*9:i*9+9])\n for p in range(0, len(temp)):\n for q in range(0, len(temp[p])):\n if temp[p][q] is 225:\n temp[p][q] = 0\n else:\n temp[p][q] = 1\n return temp\n\n\ndef get_maxEigvals(img):\n # matrix = get_sparseMatrix(get_hash(img))\n return max(linalg.eigvals(get_hash(img)))\n # emmm 好奇葩的特征值...\n # return linalg.eigvals(matrix)\n\n\ndef get_code(maxEigvals):\n return DefDir[str(maxEigvals)]\n\n\ndef get_faceEigvals(img):\n matrix = get_sparseMatrix(img)\n # for i in matrix:\n # print(i)\n # print('---------------')\n return max(linalg.eigvals(matrix))\n\n\n# # 创建模板特征值存储的json文件.1 时用到\n# for ietm in range(0, len(DefValNumpy)):\n# img_1 = Image.open(dataPath+DefValNumpy[ietm]+\".png\")\n# DefDir[str(get_maxEigvals(img_1))] = ietm\n# jsObj = json.dumps(DefDir)\n# # print(jsObj)\n# with open((dataPath+'DefMaxEigvals.josn').encode('utf-8'), \"w\") as ftemp:\n# ftemp.write(jsObj)\n\n# for ietm in range(0, len(DefValNumpy)):\n# img_1 = Image.open(dataPath+DefValNumpy[ietm]+\".png\")\n# # img_1 = Image.open(dataPath+\"8.png\")\n# for i in get_hash(img_1):\n# print(i)\n# print('------------'+DefValNumpy[ietm]+'-------------')\n\n# 从文件获取模板图片的特征值存入字典\nwith open((dataPath+'DefMaxEigvals.josn').encode('utf-8'), \"r\") as ftemp:\n DefDir = json.load(ftemp)\n# print(DefDir)\n\n# 从文件获取模板图片的特征值存入字典\nwith open((dataPath+'faceEigvals.josn').encode('utf-8'), \"r\") as ftemp:\n faceDir = json.load(ftemp)\nfor i in faceDir:\n faceDir[i] = complex(faceDir[i])\n# print(faceDir)\n\n# # 板特征值存储的json文件.2 时用到\n# for ietm in range(0, len(face)):\n# img_1 = Image.open(dataPath+face[ietm]+\".png\")\n# # print(face[ietm])\n# faceDir[face[ietm]] = str(get_faceEigvals(img_1))\n# jsObj = json.dumps(faceDir)\n# # print(jsObj)\n# with open((dataPath+'faceEigvals.josn').encode('utf-8'), \"w\") as ftemp:\n# ftemp.write(jsObj)\n", "sub_path": "Task5/cheB.py", "file_name": "cheB.py", "file_ext": "py", "file_size_in_byte": 3048, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "numpy.linalg.eigvals", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 51, "usage_type": "name"}, {"api_name": "numpy.linalg.eigvals", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 65, "usage_type": "name"}, {"api_name": "json.load", "line_number": 86, "usage_type": "call"}, {"api_name": "json.load", "line_number": 91, "usage_type": "call"}]} +{"seq_id": "647643038", "text": "import keras\r\nfrom keras import backend as K\r\nfrom keras.datasets import mnist\r\nimport matplotlib.pyplot as plt\r\nimport cv2\r\n\r\n# mnist\r\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\r\nx_train = x_train.astype('float32')\r\nx_test = x_test.astype('float32')\r\nx_train /= 255\r\nx_test /= 255\r\n\r\n\r\n# loss\r\ndef triplet_loss(y_true, y_pred):\r\n del y_true\r\n \r\n loss = K.variable(0, dtype='float32')\r\n g = K.constant(1.0, shape=[1], dtype='float32')\r\n \r\n batch_size = len(y_pred)\r\n\r\n for i in range(0, batch_size, 3):\r\n q_embedding = y_pred[i+0]\r\n p_embedding = y_pred[i+1]\r\n n_embedding = y_pred[i+2]\r\n D_q_p = K.sqrt(K.sum((q_embedding - p_embedding)**2))\r\n D_q_n = K.sqrt(K.sum((q_embedding - n_embedding)**2))\r\n loss = (loss + g + D_q_p - D_q_n )\r\n\r\n loss = loss/(batch_size/3)\r\n zero = K.constant(0.0, shape=[1], dtype='float32')\r\n return K.maximum(loss,zero)\r\n\r\n\r\n\r\n", "sub_path": "thyroid segmentation/loss&mnist.py", "file_name": "loss&mnist.py", "file_ext": "py", "file_size_in_byte": 950, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "keras.datasets.mnist.load_data", "line_number": 8, "usage_type": "call"}, {"api_name": "keras.datasets.mnist", "line_number": 8, "usage_type": "name"}, {"api_name": "keras.backend.variable", "line_number": 19, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 19, "usage_type": "name"}, {"api_name": "keras.backend.constant", "line_number": 20, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 20, "usage_type": "name"}, {"api_name": "keras.backend.sqrt", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 28, "usage_type": "name"}, {"api_name": "keras.backend.sum", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.backend.sqrt", "line_number": 29, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 29, "usage_type": "name"}, {"api_name": "keras.backend.sum", "line_number": 29, "usage_type": "call"}, {"api_name": "keras.backend.constant", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 33, "usage_type": "name"}, {"api_name": "keras.backend.maximum", "line_number": 34, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "600179633", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 15 18:18:24 2017\n\n@author: hyj\n\nexample: python draw_trajcory imu\n\"\"\"\nimport os\nimport sys\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\ndef draw_trajecttory(argv):\n np.set_printoptions(suppress = True) #是否压缩由科学计数法表示的浮点数\n filepath=os.path.abspath('..')+\"/bin\" #表示当前所处的文件夹上一级文件夹的绝对路径\n\n sensor = argv[0]\n\n position = []\n quaterntions = []\n timestamp = []\n tx_index = 5\n position = np.loadtxt(filepath + '/' + sensor + '_pose.txt', usecols=(tx_index, tx_index + 1, tx_index + 2))\n\n position1 = []\n quaterntions1 = []\n timestamp1 = []\n data = np.loadtxt(filepath + '/' + sensor + '_int_pose.txt')\n # timestamp1 = data[:,0]\n # quaterntions1 = data[:,[tx_index + 6, tx_index + 3, tx_index + 4, tx_index + 5]] # qw,qx,qy,qz\n position1 = data[:, [tx_index, tx_index + 1, tx_index + 2]]\n\n position2 = []\n quaterntions2 = []\n timestamp2 = []\n data = np.loadtxt(filepath + '/' + sensor + '_int_pose_noise.txt')\n # timestamp2 = data[:,0]\n # quaterntions2 = data[:,[tx_index + 6, tx_index + 3, tx_index + 4, tx_index + 5]] # qw,qx,qy,qz\n position2 = data[:, [tx_index, tx_index + 1, tx_index + 2]]\n\n position3 = []\n quaterntions3 = []\n timestamp3 = []\n data = np.loadtxt(filepath + '/' + sensor + '_int_pose_midpoint.txt')\n # timestamp3 = data[:,0]\n # quaterntions3 = data[:,[tx_index + 6, tx_index + 3, tx_index + 4, tx_index + 5]] # qw,qx,qy,qz\n position3 = data[:, [tx_index, tx_index + 1, tx_index + 2]]\n\n position4 = []\n quaterntions4 = []\n timestamp4 = []\n data = np.loadtxt(filepath + '/' + sensor + '_int_pose_noise_midpoint.txt')\n # timestamp4 = data[:,0]\n # quaterntions4 = data[:,[tx_index + 6, tx_index + 3, tx_index + 4, tx_index + 5]] # qw,qx,qy,qz\n position4 = data[:, [tx_index, tx_index + 1, tx_index + 2]]\n\n ### plot 3d\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n\n ax.plot(position[:, 0], position[:, 1], position[:, 2], label='gt')\n # ax.plot(position1[:, 0], position1[:, 1], position1[:, 2], label=sensor + '_int')\n # ax.plot(position2[:, 0], position2[:, 1], position2[:, 2], label=sensor + '_int_noise')\n ax.plot(position3[:, 0], position3[:, 1], position3[:, 2], label=sensor + '_midint')\n ax.plot(position4[:, 0], position4[:, 1], position4[:, 2], label=sensor + '_midint_noise')\n ax.plot([position[0, 0]], [position[0, 1]], [position[0, 2]], 'r.', label='start')\n\n ax.legend()\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n plt.show()\n\n\nif __name__ == '__main__':\n draw_trajecttory(sys.argv[1:])", "sub_path": "python_tool/draw_trajcory.py", "file_name": "draw_trajcory.py", "file_ext": "py", "file_size_in_byte": 2788, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "numpy.set_printoptions", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.loadtxt", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 81, "usage_type": "attribute"}]} +{"seq_id": "397420614", "text": "#!/usr/bin/python\r\n# -*- coding:UTF-8 -*-\r\n\r\nimport codecs\r\nimport cProfile\r\nimport pstats\r\nimport json\r\nimport time\r\nimport sys\r\nfrom optparse import OptionParser\r\nfrom utils import price\r\n\r\nfrom comment import comment\r\n\r\n\r\ndef init_parser():\r\n\tparser = OptionParser()\r\n\tparser.add_option('-i', '--input', action='store', dest='input',\r\n\t\thelp='set the input filename', metavar='FILE')\r\n\tparser.add_option('--encoding', action='store', dest='encoding',\r\n\t\tdefault='utf-8', help='set the encoding of input filename', metavar='FILE')\r\n\tparser.add_option('--output', action='store', dest='output',\r\n\t\tdefault='./output/result.csv', help='set the output filename', metavar='FILE')\r\n\tparser.add_option('--output-mode', action='store', dest='output_mode',\r\n\t\tdefault='result', help='set the output mode')\r\n\tparser.add_option('--unmatch', action='store', dest='unmatch',\r\n\t\tdefault='./output/unmatch.txt', help='set the unmatch filename', metavar='FILE')\r\n\tparser.add_option('--count', action='store', dest='max_count',\r\n\t\tdefault=sys.maxsize, help='set the max count to process')\r\n\tparser.add_option('--log', action='store', dest='logfile',\r\n\t\tdefault='./output/failed.json', help='set the log file')\r\n\tparser.add_option('--statistics', action='store', dest='statistics',\r\n\t\tdefault='./output/statistic', help='set the statistics file')\r\n\treturn parser.parse_args()\r\n\r\n\r\ndef main():\r\n\t(options, args) = init_parser()\r\n\tdeduplication = {}\r\n\tsummary = {}\r\n\tstatistic = {}\r\n\tfor field in comment.FIELDS:\r\n\t\tstatistic[field] = 0\r\n\tcount = 0\r\n\r\n\tprices = price.load_price(price.PRICE_FILE)\r\n\r\n\tinput_file = codecs.open(options.input, 'r', encoding=options.encoding, errors='strict')\r\n\toutput_file = codecs.open(options.output, 'w', encoding='utf-8')\r\n\tunmatch_file = codecs.open(options.unmatch, 'w', encoding='utf-8')\r\n\tlog_file = codecs.open(options.logfile, 'w', encoding='utf-8')\r\n\tstatistics_file = codecs.open(options.statistics, 'w', encoding='utf-8')\r\n\r\n\tif options.output_mode == 'result':\r\n\t\toutput_file.write((','.join(comment.OUT_HEADERS) + '\\n'))\r\n\r\n\tc = comment.Comment()\r\n\tfor line in input_file.readlines():\r\n\t\ttry:\r\n\t\t\t_json = json.loads(line)\r\n\t\t\t# get id\r\n\t\t\tif 'id' in _json:\r\n\t\t\t\tid = _json['id']\r\n\t\t\telif 'commentId' in _json:\r\n\t\t\t\tid = _json['commentId']\r\n\t\t\telse:\r\n\t\t\t\traise IndexError\r\n\t\t\t# deduplication\r\n\t\t\tif id in deduplication:\r\n\t\t\t\tcontinue\r\n\t\t\telse:\r\n\t\t\t\tdeduplication[id] = True\r\n\t\t\t# skip len less than 15\r\n\t\t\tif len(_json['content']) < 15:\r\n\t\t\t\tcontinue\r\n\t\t\t# init comment object\r\n\t\t\tc.clean_and_fill(_json)\r\n\t\t\t# match regexes\r\n\t\t\tfields = c.match(_json['content'], comment.matchRegex)\r\n\t\texcept:\r\n\t\t\tlog_file.write(line)\r\n\t\t\tcontinue\r\n\t\t_match_count = len(fields)\r\n\r\n\t\t# log\r\n\t\tif divmod(count, 1000)[1] == 0:\r\n\t\t\tprint('process {0} comments, {1}'.format(count, time.process_time()))\r\n\r\n\t\tif _match_count > 5:\r\n\t\t\t# set price\r\n\t\t\ttry:\r\n\t\t\t\tc.data['price'] = price.get_price(_json['referenceId'], prices)\r\n\t\t\texcept:\r\n\t\t\t\tcontinue\r\n\t\t\t# output\r\n\t\t\tif options.output_mode == 'relay':\r\n\t\t\t\toutput_file.write(json.dumps({id: c.data, 'match': _match_count}, ensure_ascii=False) + '\\n')\r\n\t\t\telif options.output_mode == 'result':\r\n\t\t\t\toutput_file.write(str(c) + '\\n')\r\n\t\telif _match_count == 0:\r\n\t\t\t# unmatch output\r\n\t\t\tunmatch_file.write(_json['content'] + '\\n')\r\n\t\t# summary and statistic\r\n\t\ttry:\r\n\t\t\tsummary[_match_count] += 1\r\n\t\texcept:\r\n\t\t\tsummary[_match_count] = 1\r\n\t\tfor field in fields:\r\n\t\t\tstatistic[field] += 1\r\n\t\tcount += 1\r\n\t\tif count == int(options.max_count):\r\n\t\t\tbreak\r\n\tsum = 0\r\n\tfor key in statistic:\r\n\t\tsum += statistic[key]\r\n\tstrs = 'statistic: {0}, \\r\\nsummary: {1}, \\r\\ncomment count {2}, field match count {3}, process {4}s'.format(\r\n\t\tjson.dumps(statistic, indent=2, sort_keys=True), json.dumps(summary, indent=2, sort_keys=True),\r\n\t\tcount, sum, time.process_time())\r\n\tstatistics_file.write(strs)\r\n\tprint(strs)\r\n\r\n\r\nif __name__ == '__main__':\r\n\tcProfile.run('main()', 'timeit')\r\n\tp = pstats.Stats('timeit')\r\n\tp.sort_stats('time').print_stats(8)\r\n", "sub_path": "analysis_regex.py", "file_name": "analysis_regex.py", "file_ext": "py", "file_size_in_byte": 3989, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "optparse.OptionParser", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.maxsize", "line_number": 29, "usage_type": "attribute"}, {"api_name": "comment.comment.FIELDS", "line_number": 42, "usage_type": "attribute"}, {"api_name": "comment.comment", "line_number": 42, "usage_type": "name"}, {"api_name": "utils.price.load_price", "line_number": 46, "usage_type": "call"}, {"api_name": "utils.price", "line_number": 46, "usage_type": "name"}, {"api_name": "utils.price.PRICE_FILE", "line_number": 46, "usage_type": "attribute"}, {"api_name": "codecs.open", "line_number": 48, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 49, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 50, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 51, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 52, "usage_type": "call"}, {"api_name": "comment.comment.OUT_HEADERS", "line_number": 55, "usage_type": "attribute"}, {"api_name": "comment.comment", "line_number": 55, "usage_type": "name"}, {"api_name": "comment.comment.Comment", "line_number": 57, "usage_type": "call"}, {"api_name": "comment.comment", "line_number": 57, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 60, "usage_type": "call"}, {"api_name": "comment.comment.matchRegex", "line_number": 79, "usage_type": "attribute"}, {"api_name": "comment.comment", "line_number": 79, "usage_type": "name"}, {"api_name": "time.process_time", "line_number": 87, "usage_type": "call"}, {"api_name": "utils.price.get_price", "line_number": 92, "usage_type": "call"}, {"api_name": "utils.price", "line_number": 92, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 97, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 117, "usage_type": "call"}, {"api_name": "time.process_time", "line_number": 118, "usage_type": "call"}, {"api_name": "cProfile.run", "line_number": 124, "usage_type": "call"}, {"api_name": "pstats.Stats", "line_number": 125, "usage_type": "call"}]} +{"seq_id": "459482140", "text": "import xlwt\n\nbook = xlwt.Workbook() # Initialize a new excel workbook\nws = book.add_sheet('First Sheet') # Add an excel sheet to it\n\n\nf = open('HOPDataSet1.txt', 'r+') # Open the debug file\ndata = f.readlines() # Read each line of the file\nws.write(0, 0, \"Hydraulic Oil Pressure\") # Write the column name on the excel sheet\nrow_count, prev, repeat_count = 1, 0, 0 # Initializing the variables\n\n\nfor i in range(len(data)): # Iterate through the lines of the file\n\n # Each line has some string, the data starts at 25th index, so only those are taken and converted to float\n row = float(data[i][25:])\n if prev != row: # To check if the values are repeating\n ws.write(row_count, 0, row) # If it not repeating, the data is written into the excel sheet\n prev = row # Previous value is updated\n row_count += 1 # The index of the row is updated\n\n else:\n repeat_count += 1 # If the data is repeating, do not write the data but increment the count\n\n\nprint(repeat_count) # To display the number of repeated values\nbook.save('SampleValues - Newer' + '.xls') # Save the excel sheet\nf.close() # The debug message file is closed\n", "sub_path": "Debug file to excel sheet.py", "file_name": "Debug file to excel sheet.py", "file_ext": "py", "file_size_in_byte": 1245, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "xlwt.Workbook", "line_number": 3, "usage_type": "call"}]} +{"seq_id": "30374071", "text": "from calendar import timegm\nimport datetime\nfrom utils.custom_authentication import JWTHelper\nfrom apps.logger.models import RequestLog\nfrom django.conf import settings\nfrom django.http import HttpResponseBadRequest\n\n\nclass ResponseMiddleware:\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n # Code to be executed for each request before\n # the view (and later middleware) are called.\n # 处理请求\n\n metainfo = request.META\n\n # 生产环境下过滤请求 UA 和 REFERER\n refuse = False\n if settings.DEBUG == False and metainfo.get('PATH_INFO', '').find('/BY') == -1 and metainfo.get('HTTP_REFERER', '').find('https://servicewechat.com/wxcdbc765e408bd49c') != 0:\n refuse = True\n\n # 记录每一条请求的信息\n try:\n if metainfo.get('PATH_INFO', '').find('/BY') == -1 and metainfo.get('PATH_INFO', '').find('/favicon.ico') == -1 and metainfo.get('PATH_INFO', '').find('/static') == -1:\n record = RequestLog(\n HTTP_AUTHORIZATION=metainfo.get(\n 'HTTP_AUTHORIZATION', None),\n HTTP_HOST=metainfo.get('HTTP_HOST', None),\n HTTP_REFERER=metainfo.get('HTTP_REFERER', None),\n HTTP_UA=metainfo.get(\n 'HTTP_UA', None) if not refuse else '已拒绝请求',\n HTTP_USER_AGENT=metainfo.get('HTTP_USER_AGENT', None),\n PATH_INFO=metainfo.get('PATH_INFO', None),\n QUERY_STRING=metainfo.get('QUERY_STRING', None),\n REMOTE_ADDR=metainfo.get('REMOTE_ADDR', None),\n REMOTE_HOST=metainfo.get('REMOTE_HOST', None),\n REQUEST_METHOD=metainfo.get('REQUEST_METHOD', None),\n SERVER_NAME=metainfo.get('SERVER_NAME', None),\n SERVER_PORT=metainfo.get('SERVER_PORT', None)\n )\n record.save()\n except Exception:\n pass\n\n # 生产环境下过滤请求 UA 和 REFERER\n if refuse:\n return HttpResponseBadRequest()\n\n response = self.get_response(request)\n\n # Code to be executed for each request/response after\n # the view is called.\n # 处理相应\n\n token = request.META.get('JWT', None)\n if token is not None:\n # 检查 JWT 的过期时间\n remote = request.META.get('REMOTE_ADDR')\n ua_string = request.META.get('HTTP_UA', '')\n payload = JWTHelper().decode(token, ua_string)\n if payload['error'] is None:\n # 如果在一天之内即将过期则刷新 JWT\n check = timegm((datetime.datetime.utcnow() +\n datetime.timedelta(days=1)).utctimetuple())\n if check > payload['exp']:\n uid = payload['uid']\n new_token = JWTHelper().encode(uid, ua_string)\n response['Authorization'] = 'JWT ' + \\\n new_token.decode(\"utf-8\")\n\n return response\n", "sub_path": "projects/Beiyang1895BEVersion1/utils/middleware/response.py", "file_name": "response.py", "file_ext": "py", "file_size_in_byte": 3171, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "django.conf.settings.DEBUG", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 22, "usage_type": "name"}, {"api_name": "apps.logger.models.RequestLog", "line_number": 28, "usage_type": "call"}, {"api_name": "django.http.HttpResponseBadRequest", "line_number": 50, "usage_type": "call"}, {"api_name": "utils.custom_authentication.JWTHelper", "line_number": 63, "usage_type": "call"}, {"api_name": "calendar.timegm", "line_number": 66, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 66, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 66, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 67, "usage_type": "call"}, {"api_name": "utils.custom_authentication.JWTHelper", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "39326399", "text": "\"\"\"Compute gaussian features.\"\"\"\n\nimport warnings\nfrom functools import partial\nfrom itertools import repeat\n\nfrom multiprocessing import Pool, cpu_count\nfrom bycycle.group.utils import progress_bar\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom scipy.optimize import curve_fit\nfrom scipy import stats as st\nfrom bycycle.cyclepoints import find_extrema, find_zerox\nfrom neurodsp.sim.cycles import sim_skewed_gaussian_cycle\n\n\n\n###################################################################################################\n###################################################################################################\n\n\ndef compute_gaussian_features(df_samples, sig, fs, maxfev=2000, tol=1.49e-6, n_jobs=-1, chunksize=1,\n progress=None, z_thresh_k=0.5, z_thresh_cond=0.5, rsq_thresh=0.5):\n \"\"\"Compute gaussian features.\n\n Parameters\n ----------\n df_samples : pandas.DataFrame\n Contains cyclepoint locations for each spike.\n sig : 1d array\n Voltage time series.\n fs : float\n Sampling rate, in Hz.\n maxfev : int, optional, default: 2000\n The maximum number of calls in curve_fit.\n tol : float, optional, default: 10e-6\n Relative error desired.\n n_jobs : int, optional, default: -1\n The number of jobs to compute features in parallel.\n chunksize : int, optional, default: 1\n Number of chunks to split spikes into. Each chunk is submitted as a separate job.\n With a large number of spikes, using a larger chunksize will drastically speed up\n runtime. An optimal chunksize is typically np.ceil(n_spikes/n_jobs).\n progress : {None, 'tqdm', 'tqdm.notebook'}\n Specify whether to display a progress bar. Uses 'tqdm', if installed.\n z_thresh_k : float, optional, default: 0.5\n Potassium (k) current z-score threshold.\n z_thresh_cond : float, optional, default: 0.5\n Conductive current z-score threshold.\n rsq_thresh : float, optional, default: 0.5\n Na current r-squared threshold. Used to stop conductive/K fits in cycles\n with bad Na current fits.\n\n Returns\n -------\n params : dict\n Fit parameter values.\n \"\"\"\n\n n_jobs = cpu_count() if n_jobs == -1 else n_jobs\n\n indices = [*range(len(df_samples))]\n\n # Compute features in parallel\n with Pool(processes=n_jobs) as pool:\n\n mapping = pool.imap(partial(_compute_gaussian_features_cycle, df_samples=df_samples,\n sig=sig, fs=fs, maxfev=maxfev, tol=tol,\n z_thresh_k=0.5, z_thresh_cond=0.5, rsq_thresh=0.5),\n indices, chunksize=chunksize)\n\n params = list(progress_bar(mapping, progress, len(df_samples)))\n\n return np.array(params)\n\n\ndef _compute_gaussian_features_cycle(index, df_samples=None, sig=None, fs=None,\n f_ranges=(300, 2000), maxfev=2000, tol=1.49e-6,\n z_thresh_k=0.5, z_thresh_cond=0.5, rsq_thresh=0.5):\n \"\"\"Compute gaussian features for one cycle.\"\"\"\n\n start = df_samples.iloc[index]['sample_start'].astype(int)\n end = df_samples.iloc[index]['sample_end'].astype(int)\n sample_trough = df_samples.iloc[index]['sample_trough'].astype(int)\n\n # Adjust samples to start at zero\n sample_trough -= start\n\n # Get signal and time\n sig_cyc = sig[start:end+1]\n cyc_len = len(sig_cyc)\n times_cyc = np.arange(0, cyc_len/fs, 1/fs)\n\n # Fit single skewed gaussian to Na current\n na_params, na_gaus = _single_gaus_fit(index, sample_trough, sig_cyc, cyc_len, times_cyc, fs,\n extrema_type=\"trough\", maxfev=maxfev, tol=tol)\n if not np.isnan(na_gaus).any():\n\n # Get Na center and std\n na_center = int(na_params[0]*cyc_len)\n na_std = int(na_params[1]*cyc_len)\n\n # Determine Na current region\n upper_std = na_center + (2* na_std)\n lower_std = na_center - (2* na_std)\n\n # Calculate Na current r-squared\n na_rsq = calculate_r_squared(sig_cyc[lower_std:upper_std], na_gaus[lower_std:upper_std])\n\n # Check if Na r-squared is above threshold\n if na_rsq < rsq_thresh:\n na_rsq = np.nan\n na_params = np.append(na_params, na_rsq)\n\n k_params = np.array([np.nan] * len(na_params))\n cond_params = np.array([np.nan] * len(na_params))\n warnings.warn(\"Failed fits for index = \" + str(index))\n\n else:\n\n na_params = np.append(na_params, na_rsq)\n\n # Substract Na current gaussian fit\n rem_sig = sig_cyc - na_gaus\n\n # Split remaining signal into left of Na current (K current)\n # and right (conductive current)\n rem_sigs, times, z_scores = calculate_side_regions(na_center, rem_sig, times_cyc, fs,\n z_thresh_k, z_thresh_cond)\n\n side_current_region = zip(rem_sigs, [z_thresh_k, z_thresh_cond], z_scores, times)\n\n side_current_params = []\n side_current_gaus = []\n\n for rem_sig, z_thresh, z_score, times in side_current_region:\n\n if any(z >= z_thresh for z in z_score):\n # Get peak of remaining signal\n peak = get_current_peak(rem_sig, fs, f_ranges, z_thresh, z_score)\n\n if peak == None:\n params = np.array([np.nan] * len(na_params))\n gaus = np.array([np.nan] * len(times))\n\n else:\n # Fit single skewed gaussian to K current\n params, gaus = _single_gaus_fit(index, peak, rem_sig, len(rem_sig),\n times, fs, extrema_type=\"peak\",\n maxfev=maxfev, tol=tol)\n\n # Calculate r-squared\n rsq = calculate_r_squared(rem_sig, gaus)\n params = np.append(params, rsq)\n\n else:\n params = np.array([np.nan] * len(na_params))\n gaus = np.array([np.nan] * len(times))\n\n side_current_params.append(params)\n side_current_gaus.append(gaus)\n\n # Unpack results\n k_params, cond_params = side_current_params\n k_gaus, cond_gaus = side_current_gaus\n\n else:\n na_rsq = np.nan\n na_params = np.append(na_params, na_rsq)\n\n k_params = np.array([np.nan] * len(na_params))\n cond_params = np.array([np.nan] * len(na_params))\n warnings.warn(\"Failed fits for index = \" + str(index))\n\n all_params = [*cond_params, *na_params, *k_params]\n\n return all_params\n\n\ndef estimate_params(extrema, sig_cyc, fs, extrema_type=\"trough\", n_decimals=2):\n \"\"\"Initial gaussian parameter estimates.\n\n Parameters\n ----------\n extrema : int\n extrema position (peak or trough) of sig_cyc\n sig_cyc : 1d array\n Voltage time series.\n fs : float\n Sampling rate, in Hz.\n extrema_type : string, optional, default: \"trough\"\n Type of extrema, trough or peak.\n n_decimals : int, optional, default: 2\n Number of decimals to round parameters to.\n\n Returns\n -------\n params : 1d array\n Estimated centers, stds, alphas, heights.\n \"\"\"\n\n cyc_len = len(sig_cyc)\n\n centers = []\n stds = []\n heights = []\n\n # Define parameters\n if extrema_type == \"trough\":\n height0 = sig_cyc[extrema] - np.mean(sig_cyc)\n else:\n height0 = sig_cyc[extrema]\n\n center0 = extrema / cyc_len\n\n std0 = _estimate_std(sig_cyc, extrema_type=extrema_type, plot=False)\n\n centers.append(center0.round(n_decimals))\n stds.append(std0.round(n_decimals))\n heights.append(height0.round(n_decimals))\n\n # Assume no skew\n alphas = [0] * len(centers)\n params = [*centers, *stds, *alphas, *heights]\n\n return np.array(params)\n\n\ndef _estimate_bounds(sig_cyc, centers, stds, heights):\n \"\"\"Estimate parameter's lower and upper bounds.\"\"\"\n\n # Define bounds\n lower_heights = [height * .5 if height > 0 else height * 1.5 for height in heights]\n upper_heights = [height * 1.5 if height > 0 else height * .5 for height in heights]\n\n lower_stds = [std * .5 for std in stds]\n upper_stds = [std * 1.5 for std in stds]\n\n lower_alphas = [-3 for std in stds]\n upper_alphas = [3 for std in stds]\n\n lower_centers = [center * .5 for center in centers]\n upper_centers = [center * 1.5 for center in centers]\n\n upper_max = np.max(sig_cyc) - np.min((sig_cyc[0], sig_cyc[-1]))\n\n bounds = [\n [*lower_centers, *lower_stds, *lower_alphas, *lower_heights, 0, -1, 0],\n [*upper_centers, *upper_stds, *upper_alphas, *upper_heights, upper_max, 1, 1]\n ]\n\n return bounds\n\n\ndef _fit_gaussians(xs, ys, guess, tol, maxfev, index, bounds=None):\n \"\"\"Fit gaussians with scipy's curve_fit.\"\"\"\n\n try:\n # Fit gaussians\n warnings.filterwarnings(\"ignore\")\n params, _ = curve_fit(_sim_gaussian_cycle, xs, ys, p0=guess)\n\n except:\n # Raise warning for failed fits\n warn_str = \"Failed fit for index {idx}.\".format(idx=index)\n warnings.warn(warn_str, RuntimeWarning)\n params = np.array([np.nan] * len(guess))\n\n return params\n\n\n###################################################################################################\n###################################################################################################\n\n\ndef _sim_gaussian_cycle(times, *params):\n \"\"\"Proxy function for compatibility between sim_skewed_gaussian and curve_fit.\n\n Parameters\n ----------\n times : 1d array\n Time definition of the cycle.\n params : floats\n Variable number of centers, stds, alphas, and heights arguments, respectively. The number\n of these variable parameters determines the number of gaussians simulated. An additional\n three trailing arguments to define a sigmoid baseline as maximum, growth, midpoint.\n\n Returns\n -------\n sig_cycle : 1d array\n Simulated action potential.\n \"\"\"\n sing_gaus = sim_skewed_gaussian_cycle(1, len(times), *params)\n\n return sing_gaus\n\n\ndef _single_gaus_fit(index, extrema, sig_cyc, cyc_len, times_cyc,\n fs, extrema_type=\"trough\", maxfev=2000, tol=None):\n \"\"\"Calculate guassian fits for single current \"\"\"\n\n # Initial parameter estimation\n _params = estimate_params(extrema, sig_cyc, fs, extrema_type=extrema_type, n_decimals=2)\n\n # Initial bound estimation for Na current\n _bounds = _estimate_bounds(sig_cyc, *_params.reshape(4, -1)[[0, 1, 3]])\n\n # Fit single skewed gaussian\n _params_fit = _fit_gaussians(times_cyc, sig_cyc, _params, tol, maxfev, index, bounds=_bounds)\n\n if np.isnan(_params_fit).any():\n _gaus = np.array([np.nan] * len(times_cyc))\n\n else:\n _gaus = sim_skewed_gaussian_cycle(1, cyc_len, *_params_fit)\n\n return _params_fit, _gaus\n\n\ndef calculate_side_regions(na_center, rem_sig, times_cyc, fs, z_thresh_k, z_thresh_cond):\n \"\"\"Calculate K current and conductive current regions\n of the signal based on the center of the Na current.\n \"\"\"\n\n rem_sig_k = rem_sig[na_center:,]\n rem_sig_cond = rem_sig[:na_center,]\n\n times_k = times_cyc[na_center:,]\n times_cond = times_cyc[:na_center,]\n\n # Calculate z scores\n z_score_k = st.zscore(rem_sig_k)\n z_score_cond = st.zscore(rem_sig_cond)\n\n rem_sigs = [rem_sig_k, rem_sig_cond]\n times = [times_k, times_cond]\n z_scores = [z_score_k,z_score_cond]\n\n return [rem_sigs, times, z_scores]\n\n\n###################################################################################################\n###################################################################################################\n\ndef _estimate_std(spike, extrema_type='trough', plot=False):\n \"\"\"Estimate std of spike\"\"\"\n\n spike = -spike if extrema_type == 'peak' else spike\n\n height, height_idx = np.min(spike), np.argmin(spike)\n half_height = height / 2\n\n right = spike[height_idx:]\n left = np.flip(spike[:height_idx+1])\n\n if plot:\n plt.plot(-spike if extrema_type=='peak' else spike)\n plt.axvline(height_idx, color='r')\n\n right_idx = _get_closest(right, spike, half_height)\n left_idx = _get_closest(left, spike, half_height)\n\n if right_idx == None:\n right_idx = left_idx\n\n if left_idx == None:\n left_idx = right_idx\n\n fwhm = (right_idx + left_idx + 1)\n\n std = fwhm / (2 * len(spike) * np.sqrt(2 * np.log(2)))\n\n return std\n\n\ndef _get_closest(flank, spike, half_height):\n\n for idx, volt in enumerate(flank):\n\n if volt > half_height:\n\n # Get closest sample left or right of half max location\n closest = np.argmin([volt - half_height,\n half_height - flank[idx-1]])\n\n idx = [idx, idx-1][closest]\n\n return idx\n\n\ndef get_current_peak(sig, fs, f_ranges, z_thresh, z_score):\n\n peaks, troughs = find_extrema(sig, fs, f_ranges, first_extrema=None, pass_type='bandpass')\n\n if len(peaks) == 0:\n return None\n elif len(peaks) > 1:\n #select highest peak\n max_volt = max( (v, i) for i, v in enumerate(sig[peaks]) )[1]\n peak = peaks[max_volt]\n\n else:\n peak = peaks[0]\n\n # check if peak is over z score threshold\n if z_score[peak] > z_thresh:\n return peak\n else:\n return None\n\n\ndef calculate_r_squared(sig_cyc, sig_cyc_est):\n\n residuals = sig_cyc - sig_cyc_est\n ss_res = np.sum(residuals**2)\n ss_tot = np.sum((sig_cyc - np.mean(sig_cyc))**2)\n\n r_squared = 1 - (ss_res / ss_tot)\n\n return r_squared\n\n", "sub_path": "bycycle/spikes/features/gaussians.py", "file_name": "gaussians.py", "file_ext": "py", "file_size_in_byte": 13792, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "multiprocessing.cpu_count", "line_number": 62, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 67, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 69, "usage_type": "call"}, {"api_name": "bycycle.group.utils.progress_bar", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 114, "usage_type": "attribute"}, {"api_name": "numpy.append", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 117, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 118, "usage_type": "attribute"}, {"api_name": "warnings.warn", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 145, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 146, "usage_type": "attribute"}, {"api_name": "numpy.append", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 159, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 160, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 170, "usage_type": "attribute"}, {"api_name": "numpy.append", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 173, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 174, "usage_type": "attribute"}, {"api_name": "warnings.warn", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 247, "usage_type": "call"}, {"api_name": "warnings.filterwarnings", "line_number": 262, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 263, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 268, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 269, "usage_type": "attribute"}, {"api_name": "neurodsp.sim.cycles.sim_skewed_gaussian_cycle", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 313, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 314, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 314, "usage_type": "attribute"}, {"api_name": "neurodsp.sim.cycles.sim_skewed_gaussian_cycle", "line_number": 317, "usage_type": "call"}, {"api_name": "scipy.stats.zscore", "line_number": 334, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 334, "usage_type": "name"}, {"api_name": "scipy.stats.zscore", "line_number": 335, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 335, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 356, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 359, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 359, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 360, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 360, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 373, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 373, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 385, "usage_type": "call"}, {"api_name": "bycycle.cyclepoints.find_extrema", "line_number": 395, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 417, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 418, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 418, "usage_type": "call"}]} +{"seq_id": "439241484", "text": "#!/usr/bin/env python\n# coding=utf-8\n\nimport os\nimport pandas as pd\nfrom conf.const import HOUSE_DETAIL_INFO\nfrom lib.utility.date import get_date_string\n\n\ndef read_data(file_name, encoding='utf_8_sig'):\n data = None\n try:\n if file_name.endswith('.csv'):\n data = pd.read_csv(file_name, engine='python', encoding=encoding)\n elif file_name.endswith('.xlsx'):\n data = pd.read_excel(file_name)\n else:\n print('unknown file format:', file_name)\n except Exception as e:\n print('error happended:', file_name)\n\n return data\n\n\ndef get_dir_file_names(dir_name):\n ret_list = []\n for root, dirs, files in os.walk(dir_name):\n for file_path in files:\n ret_list.append(os.path.join(root, file_path))\n\n return ret_list\n\n\ndef merge_file(dir_names, district='futianqu', columns=None):\n total_files = get_dir_file_names(dir_names)\n data = pd.DataFrame()\n\n for file_name in total_files:\n if district in file_name:\n df_data = read_data(file_name)\n df_data.columns = columns\n data = data.append(df_data, ignore_index=True)\n\n return data\n\nif __name__ == '__main__':\n merge_type = r'ershou' # r'ershou'\n merge_district = r'nanshanqu' # r'nanshanqu'\n\n today = get_date_string()\n input_dir = r'/Users/a123/PycharmProjects/lianjia-beike-spider/data/ke/'+ merge_type + r'/sz/'+today\n print('input_dir:', input_dir)\n out_file = os.path.join('/Users/a123/PycharmProjects/lianjia-beike-spider/data/ke/sz',\n merge_district+r'_'+ merge_type +'.csv')\n print('out_file:', out_file)\n\n\n xiaoqu_columns = [r'日期', r'区', r'片区', r'小区', r'参考均价', r'在售套数', r'房屋年代', r'90天成交', r'在租房源', r'户型数',\n r'建筑类型', r'物业费用', r'物业公司', r'开发商', r'楼栋总数', r'房屋总数']\n\n ershou_columns = [r'日期', r'区', r'片区', r'小区', r'总价', r'关注人数', r'发布时间'] + HOUSE_DETAIL_INFO + \\\n ['建筑年代']\n\n columns = []\n if merge_type == r'xiaoqu':\n columns = xiaoqu_columns\n elif merge_type == r'ershou':\n columns = ershou_columns\n else:\n print('error merge_type', merge_type)\n\n df_data = merge_file(input_dir, merge_district, columns)\n if merge_type == r'ershou':\n df_data['总价'] = df_data['总价'].map(lambda x: x.rstrip('万'))\n df_data['建筑面积'] = df_data['建筑面积'].map(lambda x: x.rstrip('㎡'))\n df_data['参考均价'] =df_data.apply(lambda x: '%.2f' % (10000* float(x['总价']) / float(x['建筑面积']) ), axis=1)\n df_data.insert(5, '参考均价', df_data.pop('参考均价'))\n\n\n df_data.to_csv(out_file, index=False, encoding='utf-8-sig')\n", "sub_path": "tool/file_util.py", "file_name": "file_util.py", "file_ext": "py", "file_size_in_byte": 2835, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pandas.read_csv", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 16, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 36, "usage_type": "call"}, {"api_name": "lib.utility.date.get_date_string", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "conf.const.HOUSE_DETAIL_INFO", "line_number": 61, "usage_type": "name"}]} +{"seq_id": "87530504", "text": "from setuptools import setup\n\nwith open(\"README.md\", \"r\") as f:\n long_descr = f.read()\n\nsetup(\n name=\"4chandl\",\n entry_points={\"console_scripts\": [\"4chandl = 4chandl:main\"]},\n version=\"0.0.1\",\n description=\"Download all images in 4chan thread.\",\n long_description=long_descr,\n author=\"Connor Duffin\",\n author_email=\"connor.p.duffin@gmail.com\"\n)\n", "sub_path": "4chandl/setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 369, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "setuptools.setup", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "504411112", "text": "from django.urls import path\nfrom . import views\n\n\napp_name = 'courses'\n\nurlpatterns = [\n path('courses', views.SubjectListView.as_view(), name='subject-list'),\n path('courses/', views.SubjectCoursesListView.as_view(), name='subject-courses-list'),\n path('courses/create/new', views.CourseCreateView.as_view(), name='course-create'),\n path('courses//', views.CourseDetailView.as_view(), name='course-detail'),\n path('courses///delete', views.CourseDeleteView.as_view(), name='course-delete'),\n path('courses///edit', views.CourseUpdateView.as_view(), name='course-update'),\n path('ajax/courses/get-outcome-media', views.get_outcome_media, name='ajax-get-outcome-media'),\n]\n\n", "sub_path": "sirius/courses/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 780, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "351465418", "text": "from __future__ import print_function\nimport json\nfrom os.path import abspath\nfrom ibm_watson import VisualRecognitionV3, ApiException\n\ntest_url = 'https://www.ibm.com/ibm/ginni/images' \\\n '/ginni_bio_780x981_v4_03162016.jpg'\n\n# If service instance provides IAM API key authentication\nservice = VisualRecognitionV3(\n '2018-03-19',\n ## url is optional, and defaults to the URL below. Use the correct URL for your region.\n url='https://gateway.watsonplatform.net/visual-recognition/api',\n iam_apikey='YOUR APIKEY')\n\n# with open(abspath('resources/cars.zip'), 'rb') as cars, \\\n# open(abspath('resources/trucks.zip'), 'rb') as trucks:\n# classifier = service.create_classifier('Cars vs Trucks',\n# positive_examples={'cars': cars},\n# negative_examples=trucks).get_result()\n# print(json.dumps(classifier, indent=2))\n\ncar_path = abspath(\"resources/cars.zip\")\ntry:\n with open(car_path, 'rb') as images_file:\n car_results = service.classify(\n images_file=images_file,\n threshold='0.1',\n classifier_ids=['default']).get_result()\n print(json.dumps(car_results, indent=2))\nexcept ApiException as ex:\n print(ex)\n\n# classifier = service.get_classifier('YOUR CLASSIFIER ID').get_result()\n# print(json.dumps(classifier, indent=2))\n\n# with open(abspath('resources/car.jpg'), 'rb') as image_file:\n# classifier = service.update_classifier('CarsvsTrucks_1479118188',\n# positive_examples={'cars_positive_examples': image_file}).get_result()\n# print(json.dumps(classifier, indent=2))\n\n# faces_result = service.detect_faces(url=test_url).get_result()\n# print(json.dumps(faces_result, indent=2))\n\n# response = service.delete_classifier(classifier_id='YOUR CLASSIFIER ID').get_result()\n# print(json.dumps(response, indent=2))\n\nclassifiers = service.list_classifiers().get_result()\nprint(json.dumps(classifiers, indent=2))\n\nface_path = abspath('resources/face.jpg')\nwith open(face_path, 'rb') as image_file:\n face_result = service.detect_faces(images_file=image_file).get_result()\n print(json.dumps(face_result, indent=2))\n\n#Core ml model example\n# model_name = '{0}.mlmodel'.format(classifier_id)\n# core_ml_model = service.get_core_ml_model(classifier_id).get_result()\n# with open('/tmp/{0}'.format(model_name), 'wb') as fp:\n# fp.write(core_ml_model.content)\n", "sub_path": "examples/visual_recognition_v3.py", "file_name": "visual_recognition_v3.py", "file_ext": "py", "file_size_in_byte": 2464, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "ibm_watson.VisualRecognitionV3", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 23, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 30, "usage_type": "call"}, {"api_name": "ibm_watson.ApiException", "line_number": 31, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 51, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "21440719", "text": "from ._version import get_versions\n\n__version__ = get_versions()['version']\ndel get_versions\n\nfrom logging.handlers import SysLogHandler\nfrom logging.handlers import SYSLOG_UDP_PORT\nimport json\nimport socket\nimport traceback\nimport logging\n\nSYSLOG_LEVELS = {\n logging.CRITICAL: 2,\n logging.ERROR: 3,\n logging.WARNING: 4,\n logging.INFO: 6,\n logging.DEBUG: 7,\n}\n\n\n#see http://github.com/hoffmann/graypy/blob/master/graypy/handler.py\ndef get_full_message(exc_info, message):\n return '\\n'.join(traceback.format_exception(*exc_info)) if exc_info else message\n\n\n#see http://github.com/hoffmann/graypy/blob/master/graypy/handler.py\ndef make_message_dict(record, debugging_fields, extra_fields, fqdn, localname, facility=None):\n if fqdn:\n host = socket.getfqdn()\n elif localname:\n host = localname\n else:\n host = socket.gethostname()\n message_dict = {\n 'version': \"1.0\",\n 'host': host,\n 'short_message': record.getMessage(),\n 'message': get_full_message(record.exc_info, record.getMessage()),\n 'timestamp': record.created,\n 'level': SYSLOG_LEVELS.get(record.levelno, record.levelno),\n 'facility': facility or record.name,\n 'source_facility': facility or record.name,\n }\n\n if facility is not None:\n message_dict.update({\n '_logger': record.name\n })\n\n if debugging_fields:\n message_dict.update({\n 'file': record.pathname,\n 'line': record.lineno,\n '_function': record.funcName,\n '_pid': record.process,\n '_thread_name': record.threadName,\n })\n # record.processName was added in Python 2.6.2\n pn = getattr(record, 'processName', None)\n if pn is not None:\n message_dict['_process_name'] = pn\n if extra_fields:\n message_dict = get_fields(message_dict, record)\n return message_dict\n\n\n#See http://github.com/hoffmann/graypy/blob/master/graypy/handler.py\ndef get_fields(message_dict, record):\n # skip_list is used to filter additional fields in a log message.\n # It contains all attributes listed in\n # http://docs.python.org/library/logging.html#logrecord-attributes\n # plus exc_text, which is only found in the logging module source,\n # and id, which is prohibited by the GELF format.\n skip_fields = (\n 'args', 'asctime', 'created', 'exc_info', 'exc_text', 'filename',\n 'funcName', 'id', 'levelname', 'levelno', 'lineno', 'module',\n 'msecs', 'message', 'msg', 'name', 'pathname', 'process',\n 'processName', 'relativeCreated', 'thread', 'threadName')\n\n for key, value in record.__dict__.items():\n if key not in skip_fields and not key.startswith('_'):\n if isinstance(value, basestring):\n message_dict['_%s' % key] = value\n else:\n message_dict['_%s' % key] = repr(value)\n return message_dict\n\n\nclass CeeSysLogHandler(SysLogHandler):\n \"\"\"\n A syslog handler that formats extra fields as a CEE compatible structured log message. A CEE compatible message is\n a syslog log entry that contains a cookie string \"@cee:\" in its message part. Everything behind the colon is\n expected to be a JSON dictionary (containing no lists as children).\n\n See the following links for the specification of the CEE syntax:\n http://www.rsyslog.com/doc/mmpstrucdata.html\n http://cee.mitre.org\n http://cee.mitre.org/language/1.0-beta1/clt.html#appendix-1-cee-over-syslog-transport-mapping\n\n The handler is compatible to graypy and emits the same structured log messages as the graypy gelf handler does.\n\n Usage::\n\n import logging\n from cee_syslog_handler import CeeSysLogHandler\n\n logger = logging.getLogger('simple_example')\n logger.setLevel(logging.DEBUG)\n\n ch = CeeSysLogHandler(address=(\"10.2.160.20\", 514))\n ch.setLevel(logging.DEBUG)\n logger.addHandler(ch)\n\n logger.debug('debug message')\n logger.info('info message', extra=dict(foo=\"bar\"))\n\n Expected Ouput on the syslog side::\n\n Sep 9 09:31:11 10.128.4.107 : @cee: {\"message\": \"XXXXXXXXXXXX debug message\", \"level\": 7}\n Sep 9 09:31:11 10.128.4.107 : @cee: {\"_foo\": \"bar\", \"message\": \"XXXXXXXXXXX info message\", \"level\": 6}\n\n \"\"\"\n\n def __init__(self, address=('localhost', SYSLOG_UDP_PORT), socktype=socket.SOCK_DGRAM,\n debugging_fields=True, extra_fields=True, facility=None):\n \"\"\"\n\n :param address: Address of the syslog server (hostname, port)\n :param socktype: If specified (socket.SOCK_DGRAM or socket.SOCK_STREAM) uses UDP or TCP respectively\n :param debugging_fields: Whether to include file, line number, function, process and thread id in the log\n :param extra_fields: Whether to include extra fields (submitted via the keyword argument extra to a logger)\n in the log dictionary\n :param facility: If not specified uses the logger's name as facility\n \"\"\"\n super(CeeSysLogHandler, self).__init__(address, facility=SysLogHandler.LOG_USER, socktype=socktype)\n self._debugging_fields = debugging_fields\n self._extra_fields = extra_fields\n self._facility = facility\n\n def format(self, record):\n message = make_message_dict(record,\n self._debugging_fields,\n self._extra_fields,\n False,\n self._facility)\n return \": @cee: %s\" % json.dumps(message)\n", "sub_path": "cee_syslog_handler/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 5625, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "_version.get_versions", "line_number": 3, "usage_type": "call"}, {"api_name": "_version.get_versions", "line_number": 4, "usage_type": "name"}, {"api_name": "logging.CRITICAL", "line_number": 14, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 15, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 16, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 17, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 18, "usage_type": "attribute"}, {"api_name": "traceback.format_exception", "line_number": 24, "usage_type": "call"}, {"api_name": "socket.getfqdn", "line_number": 30, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 34, "usage_type": "call"}, {"api_name": "logging.handlers.SysLogHandler", "line_number": 90, "usage_type": "name"}, {"api_name": "logging.handlers.SYSLOG_UDP_PORT", "line_number": 125, "usage_type": "name"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 125, "usage_type": "attribute"}, {"api_name": "logging.handlers.SysLogHandler.LOG_USER", "line_number": 136, "usage_type": "attribute"}, {"api_name": "logging.handlers.SysLogHandler", "line_number": 136, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 147, "usage_type": "call"}]} +{"seq_id": "120113266", "text": "import psycopg2\n\nfrom settings.settings import DATABASES\n\n\nclass DbConnection:\n \"\"\"\n Class to handle all db operations.\n \"\"\"\n conn = None\n\n def __init__(self):\n try:\n # connect to the PostgresSQL server\n self.conn = psycopg2.connect(DATABASES['DATABASE_URL'], sslmode='require')\n except Exception as error:\n print('Error while connection to database', error)\n\n def insert_data(self, user_id, query):\n \"\"\"\n Method in insert data in user search table\n :param user_id: User id integer\n :param query: query string\n :return: None\n \"\"\"\n try:\n cursor = self.conn.cursor()\n postgres_check_query = \"\"\" SELECT QUERY from user_search where USER_ID = %s and QUERY like %s\"\"\"\n record_to_insert = (user_id, query)\n cursor.execute(postgres_check_query, record_to_insert)\n records = cursor.fetchall()\n if not records:\n postgres_insert_query = \"\"\" INSERT INTO user_search (USER_ID, QUERY) VALUES (%s,%s)\"\"\"\n record_to_insert = (user_id, query)\n cursor.execute(postgres_insert_query, record_to_insert)\n self.conn.commit()\n count = cursor.rowcount\n print(count, 'Record inserted successfully into user search table')\n except Exception as error:\n print('Failed to insert record into user search table', error)\n finally:\n # closing database connection.\n if (self.conn):\n cursor.close()\n self.conn.close()\n print(\"PostgreSQL connection is closed\")\n\n def get_data(self, user_id, search):\n \"\"\"\n Method to get the recent search data from user search table\n :param user_id: User id integer\n :param search: user search string\n :return:\n \"\"\"\n try:\n cursor = self.conn.cursor()\n postgres_search_query = \"\"\" SELECT QUERY from user_search where USER_ID = %s and QUERY LIKE %s\"\"\"\n record_to_insert = (user_id, '%'+search+'%')\n cursor.execute(postgres_search_query, record_to_insert)\n records = cursor.fetchall()\n count = cursor.rowcount\n print(count, 'Record inserted successfully into user search table')\n return [record[0] for record in records], False\n except Exception as error:\n print('Failed to search record into user search table', error)\n finally:\n # closing database connection.\n if self.conn:\n cursor.close()\n self.conn.close()\n print(\"PostgreSQL connection is closed\")\n return [], True\n", "sub_path": "discord_bot/database_query.py", "file_name": "database_query.py", "file_ext": "py", "file_size_in_byte": 2767, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "psycopg2.connect", "line_number": 15, "usage_type": "call"}, {"api_name": "settings.settings.DATABASES", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "588306891", "text": "from AnalyseHTML import*\nfrom Tools import*\nimport datetime\nimport requests\nimport os\n\ndef ImageSave(imgUrl, imgDir, imgName, Referer, num_i, num_j):\n # 准备header\n Host = imgUrl\n Host = Host.replace('http://', '')\n Host = Host.replace('https://', '')\n Host = Host[:Host.find('/')]\n headers = {\n 'Host': Host,\n 'Referer': Referer,\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.82 Safari/537.36'\n }\n\n # 下载\n response = requests.get(imgUrl , headers = headers , stream = True)\n image = response.content\n \n print('保存文件'+str(num_i)+'_'+str(num_j)+'\\n')\n with open(imgDir+imgName, 'wb') as img:\n img.write(image)\n\ndef check_status_code(status_code) :\n if status_code == 200 :\n print(\"Success!\")\n else :\n print(status_code)\n\ndef makefold(folddir, foldname) :\n folddir = os.getcwd()+folddir+'\\\\'+foldname\n if not os.path.exists(folddir) : \n os.mkdir(folddir)\n \n# 模拟登陆\ndef login() :\n headers = { \n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36',\n 'Host': 'accounts.pixiv.net',\n 'Origin': 'https://accounts.pixiv.net',\n 'Referer': 'https://accounts.pixiv.net/login?lang=zh&source=pc&view_type=page&ref=wwwtop_accounts_index'\n }\n \n # 获取post_key\n login_page = \"https://accounts.pixiv.net/login?lang=zh&source=pc&view_type=page&ref=wwwtop_accounts_index\"\n response = s.get(login_page,headers = headers)\n check_status_code(response.status_code)\n \n mark1 = '\"post_key\" value=\"'\n mark2 = '\"><' \n post_key = getMark(response.text,mark1,mark2)\n\n # 模拟登录\n login_data = {\n 'pixiv_id': '1315441174@qq.com',\n 'password': 'xX17272542',\n 'captcha': '',\n 'g_recaptcha_response': '',\n 'post_key': post_key,\n 'source': 'pc'\n }\n \n login_url = 'https://accounts.pixiv.net/api/login?lang=zh'\n response = s.post(login_url, data = login_data, headers = headers)\n check_status_code(response.status_code)\n\n# 获取作品地址\ndef getpainting() :\n rank_page = \"http://www.pixiv.net/ranking_area.php?type=detail&no=6\"\n response = s.get(rank_page)\n check_status_code(response.status_code)\n \n # 获取作品地址\n mark1 = '

0:\r\n delete_user_from_course(get_year,\"1\",userid,to_delete_A,currUniversity)\r\n if len(to_delete_B) > 0:\r\n delete_user_from_course(get_year,\"2\",userid,to_delete_B,currUniversity)\r\n if len(to_add_A) > 0:\r\n add_user_to_course(get_year,\"1\",userid,to_add_A,currUniversity)\r\n if len(to_add_B) > 0:\r\n add_user_to_course(get_year,\"2\",userid,to_add_B,currUniversity)\r\n \r\n #user.chosenCoursesLists = courses\r\n user.chosenCoursesLists[get_year] = courses\r\n logging.info(\"write-10-save_data\")\r\n return user.put()\r\n\r\n@ndb.transactional\r\ndef delete_user_from_course(get_year,currSemester,userid,courses_to_del,currUniversity):\r\n course_stats = ndb.Key('n_University', currUniversity,'n_Courses_stats','semstats'+get_year + str(currSemester)).get()\r\n logging.info(\"read-10-delete_user_from_course\")\r\n if course_stats is None:\r\n return\r\n is_changed = False\r\n for course_to_del in courses_to_del:\r\n students_list = course_stats.stats.get(course_to_del)\r\n if students_list is not None and students_list.__contains__(userid):\r\n students_list.remove(userid)\r\n is_changed = True\r\n if is_changed:\r\n course_stats.put()\r\n logging.info(\"write-10-delete_user_from_course\")\r\n\r\n@ndb.transactional(xg=True)\r\ndef add_user_to_course(get_year,currSemester,userid,courses_to_add,currUniversity):\r\n \r\n course_stats = ndb.Key('n_University', currUniversity,'n_Courses_stats','semstats'+get_year + str(currSemester)).get()\r\n logging.info(\"read-10-add_user_to_course\")\r\n if course_stats is None:\r\n ancestor_key = ndb.Key('n_University', currUniversity)\r\n course_stats = n_Courses_stats(id ='semstats'+get_year + str(currSemester), semester = get_year + currSemester,parent = ancestor_key)\r\n is_changed = False\r\n for course_to_add in courses_to_add:\r\n students_list = course_stats.stats.get(course_to_add)\r\n if students_list is None:\r\n students_list = [userid]\r\n is_changed = True\r\n elif not students_list.__contains__(userid):\r\n students_list.append(userid)\r\n is_changed = True\r\n else:\r\n continue\r\n course_stats.stats[course_to_add] = students_list\r\n if is_changed:\r\n course_stats.put()\r\n logging.info(\"write-10-add_user_to_course\")\r\n\r\n\r\ndef load_data(currUniversity, userid, email, nick):\r\n \r\n get_year = get_recent_year()\r\n user = ndb.Key('n_University', currUniversity,'n_User', userid).get()\r\n logging.info(\"read-10-load_data\")\r\n if user is None or user.saved_data.get(get_year) is None:\r\n ancestor_key = ndb.Key('n_University', currUniversity)\r\n user = n_User(id= userid,userId = userid , userEmail =email,nickName = nick,saved_data = {get_year:{'displaySettingsArr' : [{},{}], 'coursesInfoArr' : [[],[]]}},parent = ancestor_key)\r\n logging.info(\"write-10-load_data\")\r\n user.put() \r\n logging.info(user)\r\n logging.info(user.saved_data)\r\n logging.info(user.saved_data.get(get_year))\r\n logging.info(user.saved_data.get(get_year)['coursesInfoArr'])\r\n logging.info(user.saved_data.get(get_year)['coursesInfoArr'][0])\r\n for course in user.saved_data.get(get_year)['coursesInfoArr'][0]:\r\n curr_course = create_query_of_course_info(currUniversity, \"1\", course['cNum'][:4], course['cNum'])\r\n if curr_course is None:\r\n user.saved_data.get(get_year)['coursesInfoArr'][0].remove(course)\r\n else:\r\n course['havurotNum'] = curr_course['havurotNum']\r\n course['zchutHours'] = curr_course['zchutHours']\r\n course['kvutzaData'] = curr_course['kvutzaData']\r\n for course in user.saved_data.get(get_year)['coursesInfoArr'][1]:\r\n curr_course = create_query_of_course_info(currUniversity, \"2\", course['cNum'][:4], course['cNum'])\r\n if curr_course is None:\r\n user.saved_data.get(get_year)['coursesInfoArr'][1].remove(course)\r\n else:\r\n course['havurotNum'] = curr_course['havurotNum']\r\n course['zchutHours'] = curr_course['zchutHours']\r\n course['kvutzaData'] = curr_course['kvutzaData'] \r\n return user.saved_data.get(get_year)\r\n\r\ndef create_query_of_friend_list_for_course(currUniversity,currSemester,departNum,courseNum,friends):\r\n get_year = get_recent_year()\r\n uni = n_University.get_by_id(currUniversity)\r\n logging.info(\"read-4-create_query_of_friend_list_for_course\")\r\n course_stats = ndb.Key('n_University', currUniversity,'n_Courses_stats','semstats'+get_year + str(currSemester)).get()\r\n logging.info(\"read-4-create_query_of_friend_list_for_course\")\r\n if course_stats is None:\r\n return [[],0]\r\n students_list = course_stats.stats.get(courseNum)\r\n if students_list is None:\r\n return [[],0]\r\n friends_in_course = []\r\n for friend in friends:\r\n google_id = uni.facebook_to_google_map.get(friend[\"id\"])\r\n if google_id is None:\r\n continue\r\n if students_list.__contains__(google_id):\r\n friends_in_course.append(friend)\r\n \r\n return [friends_in_course,len(students_list)]\r\n\r\n@ndb.transactional\r\ndef save_fb_and_google_ids(currUniversity,fbID,googleID):\r\n univ = n_University.get_by_id(currUniversity)\r\n logging.info(\"read-4-save_fb_and_google_ids\")\r\n univ.facebook_to_google_map[fbID] = googleID\r\n logging.info(\"write-4-save_fb_and_google_ids\")\r\n return univ.put()\r\n\r\ndef save_events(currUniversity,userid,eventLinkList,email,nick):\r\n get_year = get_recent_year()\r\n user = ndb.Key('n_University', currUniversity,'n_User', userid).get()\r\n logging.info(\"read-10-save_events\")\r\n if user is None:\r\n ancestor_key = ndb.Key('n_University', currUniversity)\r\n user = n_User(id= userid,userId = userid , userEmail =email,nickName = nick,saved_data = {get_year:{'displaySettingsArr' : [{},{}], 'coursesInfoArr' : [[],[]]}},parent = ancestor_key)\r\n user.events = eventLinkList\r\n logging.info(\"write-10-save_events\")\r\n return user.put()\r\n\r\ndef load_events(currUniversity,userId):\r\n user = ndb.Key('n_University', currUniversity,'n_User', userId).get()\r\n if user is None:\r\n return [[],[],[]]\r\n logging.info(\"read-10-load_events\")\r\n return user.events\r\n\r\ndef getFriendCourses(currUniversity,userFacebookId):\r\n get_year = get_recent_year()\r\n univ = n_University.get_by_id(currUniversity)\r\n logging.info(\"read-4-getFriendCourses\")\r\n gID = univ.facebook_to_google_map.get(userFacebookId)\r\n user = ndb.Key('n_University', currUniversity,'n_User', gID).get()\r\n logging.info(\"read-8-getFriendCourses\")\r\n return user.saved_data[get_year]['displaySettingsArr']\r\n", "sub_path": "DB/DbSearchQueries.py", "file_name": "DbSearchQueries.py", "file_ext": "py", "file_size_in_byte": 12213, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "google.appengine.ext.ndb.Key", "line_number": 14, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 14, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 15, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 22, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 22, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 23, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 31, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 31, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 32, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 37, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 37, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 38, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 49, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 49, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 50, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 55, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 55, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 56, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 67, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 67, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 68, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 76, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 76, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 77, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 85, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 85, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 128, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 133, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 133, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 134, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 145, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.transactional", "line_number": 131, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.ndb", "line_number": 131, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 150, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 150, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 151, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 153, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 153, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 169, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.transactional", "line_number": 147, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 147, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 175, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 175, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 176, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 178, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 178, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 180, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 182, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 183, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 184, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 185, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 186, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 208, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 209, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 209, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 210, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 229, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 231, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.transactional", "line_number": 226, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.ndb", "line_number": 226, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 236, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 236, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 237, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 239, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 239, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 242, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 246, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 246, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 249, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 255, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 257, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 257, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 258, "usage_type": "call"}]} +{"seq_id": "588346090", "text": "from flask import Flask\nfrom flask import render_template\nfrom flask import request\nfrom flask import jsonify\nfrom flask import abort\n\nimport couchdb\nfrom uuid import uuid4\n\napp = Flask( __name__ )\n\ncouch = couchdb.Server( 'http://192.168.99.101:3306/' )\n\nif 'matches' not in couch:\n couch.create( 'matches' )\nmatch_table = couch[ 'matches' ]\n\nif 'categories' not in couch:\n db = couch.create( 'categories' )\n default_categories = [\n 'Rock', 'Pop', 'Electronica', 'Rap', 'Rock alternativo', 'Hip hop',\n 'Reggae', 'Reggaeton', 'Bachata', 'Clasica', 'Balada', 'Salsa', 'Punk', 'Jazz',\n 'Rock sinfonico', 'Grunge', 'Cumbia', 'Dance', 'Ska', 'Tecno', 'Disco',\n 'Blues', 'Opera', 'Tango', 'Vallenato', 'Ranchera', 'Samba', 'Mambo',\n 'Bolero', 'Protesta'\n ]\n for cat in default_categories:\n doc_id = uuid4( ).hex\n db[ doc_id ] = {\n 'category_id' : uuid4( ).hex,\n 'category_name' : cat\n }\n\ncategory_table = couch[ 'categories' ]\n\nif 'user_categories' not in couch:\n couch.create( 'user_categories' )\nuser_categories_table = couch[ 'user_categories' ]\n\nmatch_doc = {\n 'user_id' : -1,\n 'file_id' : -1,\n 'category_id' : -1\n}\n\nmatches_by_user = ''' function( doc ) {\n emit( doc.user_id, [ doc.file_id, doc.category_id ] );\n}\n'''\n\nmatches_by_user_file_id = ''' function( doc ) {\n emit( doc.user_id, [ doc._id, doc.file_id ] )\n}\n'''\n\ndefault_categories_view = ''' function( doc ) {\n emit( doc.category_id, doc.category_name )\n}\n'''\n\nuser_categories_view = ''' function( doc ) {\n emit( doc.user_id, [ doc.category_id, doc.category_name, doc._id ] )\n}\n'''\n\n@app.route( '/' )\ndef index( ):\n return 'Classifier ms is working B|'\n\n################################################ MATCH ROUTES ##########################################################\n\n@app.route( '/user//match', methods=[ 'POST', 'PUT' ] )\ndef new_match( user_id ):\n if not request.json or not 'file_id' in request.json or not 'category_id' in request.json:\n abort( 400 )\n\n category_found = False\n results = category_table.query( default_categories_view )\n for row in results:\n if row.key == request.json[ 'category_id' ]:\n category_found = True\n break\n\n if not category_found:\n results = user_categories_table.query( user_categories_view )\n for row in results[ user_id ]:\n if row.value[ 0 ] == request.json[ 'category_id' ]:\n category_found = True\n break\n\n if not category_found:\n return jsonify({\n 'error' : 'Category with id %s does not exist.' % ( request.json[ 'category_id' ] )\n }), 400\n\n match = {\n 'user_id' : user_id,\n 'file_id' : int( request.json[ 'file_id' ] ),\n 'category_id' : request.json[ 'category_id' ]\n }\n\n results = match_table.query( matches_by_user_file_id )\n\n if request.method == 'POST':\n\n for row in results[ user_id ]:\n if row.value[ 1 ] == int( request.json[ 'file_id' ] ):\n return jsonify({\n 'error' : 'File with id %s already has a category match.' % ( request.json[ 'file_id' ] )\n }), 400\n\n match_id = uuid4( ).hex\n match_table[ match_id ] = match\n\n return jsonify( match ), 201\n\n elif request.method == 'PUT':\n\n for row in results[ user_id ]:\n if row.value[ 1 ] == int( request.json[ 'file_id' ] ):\n doc = match_table[ row.value[ 0 ] ]\n doc[ 'category_id' ] = request.json[ 'category_id' ]\n match_table[ row.value[ 0 ] ] = doc\n return jsonify( match ), 200\n\n return jsonify({\n 'error' : 'There is no match for file with id %s.' % ( request.json[ 'file_id' ] )\n }), 400\n\n@app.route( '/user//match/', methods=[ 'DELETE' ] )\ndef destroy_match( user_id, file_id ):\n results = match_table.query( matches_by_user_file_id )\n\n for row in results[ user_id ]:\n if row.value[ 1 ] == file_id:\n del match_table[ row.value[ 0 ] ]\n return jsonify({\n 'message': 'Match deleted.'\n }), 200\n\n return jsonify({\n 'error' : 'There is no match for file with id %i.' % ( file_id )\n }), 400\n\n@app.route( '/user//matches' )\ndef get_user_matches( user_id ):\n results = match_table.query( matches_by_user )\n data = []\n for row in results[ user_id ]:\n data.append({\n 'file_id' : row.value[ 0 ],\n 'category_id' : row.value[ 1 ]\n })\n\n return jsonify({\n 'data' : data\n }), 200\n\n############################################## END MATCH ROUTES ########################################################\n\n######################################### DEFAULT CATEGORIES ROUTES ####################################################\n\n@app.route( '/categories' )\ndef default_categories( ):\n results = category_table.query( default_categories_view )\n data = []\n for row in results:\n data.append({\n 'category_id' : row.key,\n 'category_name' : row.value\n })\n return jsonify( data ), 200\n\n####################################### END DEFAULT CATEGORIES ROUTES ##################################################\n\n########################################### USER CATEGORIES ROUTES #####################################################\n\n@app.route( '/user//categories', methods=[ 'GET', 'POST', 'PUT', 'DELETE' ] )\ndef user_categories( user_id ):\n results = user_categories_table.query( user_categories_view )\n results2 = category_table.query( default_categories_view )\n\n if request.method == 'GET':\n data = []\n for row in results[ user_id ]:\n data.append({\n 'category_id' : row.value[ 0 ],\n 'category_name' : row.value[ 1 ]\n })\n return jsonify( data ), 200\n elif request.method == 'POST':\n if not request.json or not 'category_name' in request.json:\n abort( 400 )\n\n for row in results[ user_id ]:\n if row.value[ 1 ].lower( ) == request.json[ 'category_name' ].lower( ):\n return jsonify({\n 'error' : 'There is a category with the same name.'\n }), 400\n\n for row in results2:\n if row.value.lower( ) == request.json[ 'category_name' ].lower( ):\n return jsonify({\n 'error' : 'There is a category with the same name.'\n }), 400\n\n doc_id = uuid4( ).hex\n user_categories_table[ doc_id ] = {\n 'user_id' : user_id,\n 'category_id' : uuid4( ).hex,\n 'category_name' : request.json[ 'category_name' ]\n }\n return jsonify( user_categories_table[ doc_id ] ), 201\n elif request.method == 'PUT':\n if not request.json or not 'category_id' in request.json or not 'category_name' in request.json:\n abort( 400 )\n\n for row in results[ user_id ]:\n if row.value[ 0 ] == request.json[ 'category_id' ]:\n doc = user_categories_table[ row.value[ 2 ] ]\n doc[ 'category_name' ] = request.json[ 'category_name' ]\n user_categories_table[ row.value[ 2 ] ] = doc\n\n return jsonify( user_categories_table[ row.value[ 2 ] ] ), 200\n\n\n elif request.method == 'DELETE':\n if not request.json or not 'category_id' in request.json:\n abort( 400 )\n\n for row in results[ user_id ]:\n if row.value[ 0 ] == request.json[ 'category_id' ]:\n del user_categories_table[ row.value[ 2 ] ]\n return jsonify({\n 'message': 'User category deleted.'\n }), 200\n\n######################################### END USER CATEGORIES ROUTES ###################################################\n\n############################################## QUERYING ROUTES #########################################################\n\n@app.route( '/user//category_for_file/' )\ndef category_for_file( user_id, file_id ):\n\n results = match_table.query( matches_by_user )\n for row in results[ user_id ]:\n if row.value[ 0 ] == file_id:\n\n cat_name = \"\"\n user_categories_results = user_categories_table.query( user_categories_view )\n default_categories_results = category_table.query( default_categories_view )\n\n found = False\n for k in default_categories_results[ row.value[ 1 ] ]:\n cat_name = k.value\n found = True\n break\n\n if not found:\n for k in user_categories_results[ user_id ]:\n if k.value[ 0 ] == row.value[ 1 ]:\n cat_name = k.value[ 1 ]\n break\n\n return jsonify({\n 'category_id' : row.value[ 1 ],\n 'category_name' : cat_name\n }), 200\n\n return jsonify({\n 'error' : 'There is no file with id %i for user with id %i.' % ( file_id, user_id )\n })\n\n@app.route( '/user//files_for_category/' )\ndef files_for_category( user_id, category_id ):\n\n results = match_table.query( matches_by_user )\n data = []\n for row in results[ user_id ]:\n if category_id == row.value[ 1 ]:\n data.append({\n 'file_id' : row.value[ 0 ]\n })\n return jsonify({\n 'data' : data\n }), 200\n\n\n############################################ END QUERYING ROUTES #######################################################\n\nif __name__ == '__main__':\n app.run( host='0.0.0.0', port=3004 )\n", "sub_path": "category_classifier/category_classifier_ms/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 9800, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}, {"api_name": "couchdb.Server", "line_number": 12, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 28, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 74, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 74, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 80, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 80, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 87, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 87, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 93, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 93, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 98, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 98, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 99, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 99, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 104, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 104, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 107, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 107, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 108, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 109, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 109, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 112, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 115, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 117, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 117, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 120, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 120, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 122, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 122, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 124, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 126, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 127, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 127, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 137, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 141, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 155, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 172, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 183, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 183, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 190, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 191, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 191, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 192, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 192, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 193, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 196, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 196, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 197, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 202, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 202, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 203, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 207, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 210, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 211, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 211, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 213, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 214, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 214, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 215, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 215, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 216, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 219, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 219, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 221, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 221, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 224, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 227, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 227, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 228, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 228, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 229, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 232, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 232, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 234, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 265, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 270, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 284, "usage_type": "call"}]} +{"seq_id": "337829706", "text": "import pygame\nfrom sys import exit\nfrom pygame.locals import *\nfrom data.backgrounds import Backgound as Back\n\n# Class that control the Skilss menu\nclass Skills(object):\n def __init__(self, screen, nivel):\n self.screen = screen\n self.nivel = nivel\n self.painel = pygame.image.load(\"resources/image/skills/painel.png\").convert_alpha()\n self.title = pygame.image.load(\"resources/image/title/MinoTrolls1.png\").convert_alpha()\n self.arrows = []\n self.arrowsPosition = [(145, 160), (525, 160)]\n self.backgrounds = Back(screen)\n self.allCard = []\n self.cards = ['kickingCard', 'slashingCard', 'battleaxCard', 'fireCard', 'bluefireCard']\n self.description = [('5%', '5%', 'unlimited'), ('20%', '20%', 'unlimited'), ('30%', '25%', 'unlimited'), ('40%', '100%', 'limited'), ('45%', '100%', 'limited')]\n self.descrPos = 0\n self.copyCards = ['kickingCard', 'slashingCard', 'battleaxCard', 'fireCard', 'bluefireCard']\n self.state = [False,False,False,False,False]\n self.font = pygame.font.Font(\"resources/font/montserrat-font/MontserratMedium-nRxlJ.ttf\", 16)\n self.font.set_bold(True)\n self.cardsDiscription = ''\n self.skillsOfPlayer()\n self.skillsControl = []\n self.allCardsPosition = []\n self.cardsActive = True\n self.backButtom = ''\n self.currentCard = self.cards[int(len(self.cards)/2)]\n self.count = 0\n self.level = 0\n self.stateLock = False\n\n # Method to make a list it a option in skills\n def displayButtoms(self):\n self.allCard = []\n self.allCardsPosition = []\n self.arrows = []\n right = []\n img1 = ''\n self.x, self.y = 228, 130\n count = 0\n for (card, state) in zip(self.cards, self.state):\n if(self.currentCard == card):\n if self.cardsActive:\n img1 = pygame.image.load(\"resources/image/skills/pergaminios/\"+self.currentCard+\"2-\"+str(state)+\".png\").convert_alpha()\n self.arrows.append(pygame.image.load(\"resources/image/skills/arrows/leftarrow1.png\").convert_alpha())\n self.arrows.append(pygame.image.load(\"resources/image/skills/arrows/rightarrow1.png\").convert_alpha())\n self.backButtom = pygame.image.load(\"resources/image/skills/Back1.png\").convert_alpha()\n self.stateLock = state\n else:\n img1 = pygame.image.load(\"resources/image/skills/pergaminios/\"+self.currentCard+\"0.png\").convert_alpha()\n self.arrows.append(pygame.image.load(\"resources/image/skills/arrows/leftarrow0.png\").convert_alpha())\n self.arrows.append(pygame.image.load(\"resources/image/skills/arrows/rightarrow0.png\").convert_alpha())\n self.backButtom = pygame.image.load(\"resources/image/skills/Back2.png\").convert_alpha()\n self.descrPos = count \n self.cardsDiscription = pygame.image.load(\"resources/image/skills/description/pergaminio-\"+str(state)+\".png\")\n self.x = 390\n else:\n img = pygame.image.load(\"resources/image/skills/pergaminios/\"+card+\"1.png\").convert_alpha()\n if(count < self.cards.index(self.currentCard)):\n self.allCard.append(img)\n self.allCardsPosition.append((self.x, self.y))\n self.x += 40\n elif(count > self.cards.index(self.currentCard)):\n right.append(img)\n self.allCardsPosition.append((self.x, self.y))\n self.x -= 40\n count += 1\n self.allCard += right[::-1]\n self.x, self.y = 300, 114\n self.allCard.append(img1)\n self.allCardsPosition.append((self.x, self.y))\n \n # method to draw the painel skills on the screen\n def drawingSkillsPainel(self):\n self.backgrounds.settingBackgroundMenu(2)\n self.screen.blit(self.painel, (75, 45))\n self.screen.blit(self.title, (270, 55))\n # cardName = pygame.image.load(\"resources/image/skills/\"+self.currentCard+\".png\")\n # self.screen.blit(cardName, (270, 100))\n [self.screen.blit(card, pos) for card, pos in zip(self.allCard, self.allCardsPosition)]\n [self.screen.blit(arrows, pos) for arrows, pos in zip(self.arrows, self.arrowsPosition)]\n\n # display description\n \n self.screen.blit(self.backButtom, (260, 380))\n self.screen.blit(self.cardsDiscription, (225,240))\n ty = 280\n aux = ['Damage: ', 'Precision: ', 'Attempts: ']\n for text, text1 in zip(aux, self.description[self.descrPos]):\n size = pygame.font.Font.size(self.font, text+text1)\n line = self.font.render(text+text1, True, (0, 0,0))\n self.screen.blit(line, ((700/2-size[0]/2), ty))\n ty += 20\n\n self.levelToUnlockTheCards()\n # display of the level to unlock\n def levelToUnlockTheCards(self):\n count = 0\n for card in self.copyCards:\n if(card == self.currentCard):\n if(self.copyCards.index(card)==1):\n self.level = count\n else:\n self.level = count - 1\n break\n count += 1\n if(not self.stateLock):\n self.font.set_bold(True)\n line = self.font.render(str(self.level), True, (0, 0,0))\n self.screen.blit(line, (415, 253))\n\n # method to Know, how many skills the player already have\n def skillsOfPlayer(self):\n count = 0\n maxSkills = 0\n for state in self.state:\n if (self.nivel == 0):\n maxSkills = 2\n\n elif(self.nivel == 1):\n maxSkills = 4\n\n elif(self.nivel >= 2):\n maxSkills = 5\n\n n = self.state[count]\n if(self.state.index(n) 10)):\n self.movingLeftInSkillsDisplay()\n self.count = 0\n elif(self.cardsActive and key_pressed[K_LEFT] and (self.count > 10)):\n self.movingRightInSkillsDisplay()\n self.count = 0\n\n self.count += 1 \n self.currentCard = self.cards[int(len(self.cards)/2)]\n\n # method to move in the painel of the skills\n def movingInPainelSkills(self):\n key_pressed = pygame.key.get_pressed()\n self.movingInSkillsDsiplay()\n self.displayButtoms()\n self.drawingSkillsPainel()\n\n # Setting the move in skills painel\n if (key_pressed[K_DOWN] and self.cardsActive and (self.count > 10)):\n self.cardsActive = False\n self.count = 0\n elif(key_pressed[K_UP] and not self.cardsActive and (self.count > 10)):\n self.cardsActive = True\n self.count = 0\n\n # control the painelState\n if(not self.cardsActive and (key_pressed[K_RETURN] or key_pressed[K_KP_ENTER])):\n return 3,self.cardsActive\n else:\n return 9,self.cardsActive\n \n # method of tuturial of the skills\n def skillsTuturial(self):\n pass\n", "sub_path": "data/menus/skills/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 9095, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pygame.image.load", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 12, "usage_type": "attribute"}, {"api_name": "data.backgrounds.Backgound", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.font.Font", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 47, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 49, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 50, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 53, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 54, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 55, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 56, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 58, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 61, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 61, "usage_type": "attribute"}, {"api_name": "pygame.font.Font.size", "line_number": 93, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 93, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 179, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 179, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 194, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 194, "usage_type": "attribute"}]} +{"seq_id": "485918604", "text": "from utils import run_extractor, AbundanceMapsExtractor, profile_code\n\nimport argparse\nimport cProfile\nimport numpy as np\nfrom sklearn import manifold\n\nclass LLEExtractor(AbundanceMapsExtractor):\n\tdef __init__(self, args, profile):\n\t\tself.profile = profile\n\t\tself.profile_filename = \"atgp.prof\"\n\t\tself.model = manifold.LocallyLinearEmbedding(**args)\n\t\tsuper(LLEExtractor, self).__init__()\n\n\tdef extract_abundance_maps(self, hsi_3d, n_endmembers):\n\t\thsi_2d = hsi_3d.reshape( (-1, hsi_3d.shape[2]) )\n\t\tabundance_maps = profile_code(\n\t\t\t\tself.profile, self.profile_filename)(\n\t\t\t\t\t\tself.model.fit_transform)(hsi_2d)\n\t\tabundance_maps = np.moveaxis(abundance_maps, 1, 0)\n\t\tabundance_maps = np.reshape(abundance_maps,\n\t\t\t\t(abundance_maps.shape[0], hsi_3d.shape[0], hsi_3d.shape[1]))\n\t\treturn abundance_maps\n\ndef main(in_filename, out_filename, lle_args, profile):\n\textractor = LLEExtractor(lle_args, profile)\n\trun_extractor(in_filename,\n\t\t\tout_filename, extractor, lle_args[\"n_components\"])\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser(description=\"Locally Linear Embeding\",\n\t\t\tformatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n\tparser.add_argument(\"--lle-n-neighbors\",\n\t\t\ttype=int, default=5, metavar=\"int\",\n\t\t\thelp=\"number of neighbors to consider for each point\")\n\tparser.add_argument(\"--lle-n-components\",\n\t\t\ttype=int, default=2, metavar=\"int\",\n\t\t\thelp=\"number of coordinates for the manifold\")\n\tparser.add_argument(\"--lle-reg\",\n\t\t\ttype=float, default=0.001, metavar=\"float\",\n\t\t\thelp=\"regularization constant, multiplies the trace of the local covariance matrix of the distances\")\n\tparser.add_argument(\"--lle-eigen-solver\",\n\t\t\ttype=str, default=\"auto\", metavar=\"str\",\n\t\t\tchoices=[\"auto\", \"arpack\", \"dense\"],\n\t\t\thelp=\"choices: {0}\".format([\"auto\", \"arpack\", \"dense\"]))\n\tparser.add_argument(\"--lle-tol\",\n\t\t\ttype=float, default=0.0, metavar=\"float\",\n\t\t\thelp=\"convergence tolerance passed to arpack or lobpcg; not used if eigen_solver == 'dense'\")\n\tparser.add_argument(\"--lle-max-iter\",\n\t\t\ttype=int, default=None, metavar=\"int\",\n\t\t\thelp=\"maximum number of iterations for the arpack solver; not used if eigen_solver == 'dense'\")\n\tparser.add_argument(\"--lle-method\",\n\t\t\ttype=str, default=\"standard\", metavar=\"str\",\n\t\t\tchoices=[\"standard\", \"hessian\", \"modified\", \"ltsa\"],\n\t\t\thelp=\"locally linear embedding algorithm - choices: {0}\".format([\"standard\", \"hessian\", \"modified\", \"ltsa\"]))\n\tparser.add_argument(\"--lle-hessian-tol\",\n\t\t\ttype=float, default=0.001, metavar=\"float\",\n\t\t\thelp=\"tolerance for hessian eigenmapping method; only used if method == 'hessian'\")\n\tparser.add_argument(\"--lle-modified-tol\",\n\t\t\ttype=float, default=1e-12, metavar=\"float\",\n\t\t\thelp=\"tolerance for modified LLE method; only used if method == 'modified'\")\n\tparser.add_argument(\"--lle-neighbors-algorithm\",\n\t\t\ttype=str, default=\"auto\", metavar=\"str\",\n\t\t\tchoices=[\"auto\", \"brute\", \"kd_tree\", \"ball_tree\"],\n\t\t\thelp=\"algorithm to use for nearest neighbors search - choices {0}\".format([\"auto\", \"brute\", \"kd_tree\", \"ball_tree\"]))\n\tparser.add_argument(\"--lle-random-state\",\n\t\t\ttype=int, default=None, metavar=\"int\",\n\t\t\thelp=\"seed used by the random number generator\")\n\tparser.add_argument(\"--lle-n-jobs\",\n\t\t\ttype=int, default=1, metavar=\"int\",\n\t\t\thelp=\"number of parallel jobs; if -1, then the number of jobs is set to the number of cores\")\n\n\tparser.add_argument(\"in_filename\", type=str, metavar=\"infile\")\n\tparser.add_argument(\"out_filename\", type=str, metavar=\"outfile\")\n\t\n\tparser.add_argument(\"--profile\",\n\t\t\taction=\"store_true\",\n\t\t\thelp=\"profile program execution\")\n\t\n\targuments = vars(parser.parse_args())\n\t\n\targs = {}\n\tfor key,value in arguments.iteritems():\n\t\tif key.startswith(\"lle_\"):\n\t\t\targs.setdefault(\"lle_args\", {})[key[len(\"lle_\"):]] = value\n\t\telse:\n\t\t\targs[key] = value\n\t\n\tmain(**args)\n", "sub_path": "Notebooks/unmixing_scripts/lle.py", "file_name": "lle.py", "file_ext": "py", "file_size_in_byte": 3784, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "utils.AbundanceMapsExtractor", "line_number": 8, "usage_type": "name"}, {"api_name": "sklearn.manifold.LocallyLinearEmbedding", "line_number": 12, "usage_type": "call"}, {"api_name": "sklearn.manifold", "line_number": 12, "usage_type": "name"}, {"api_name": "utils.profile_code", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.moveaxis", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 21, "usage_type": "call"}, {"api_name": "utils.run_extractor", "line_number": 27, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 31, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 32, "usage_type": "attribute"}]} +{"seq_id": "270431636", "text": "# Copyright 2019 Iguazio\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom mlrun import import_function\nimport os\nimport pandas as pd\nfrom xgb_serving import XGBoostModel\n\ndef test_local_xgb_serving():\n # importing data preparation function (gen_class_data) locally\n fn = import_function(\"hub://gen_class_data\")\n gen_data_run = fn.run(params={\"n_samples\": 10_000,\n \"m_features\": 5,\n \"k_classes\": 2,\n \"header\": None,\n \"weight\": [0.5, 0.5],\n \"sk_params\": {\"n_informative\": 2},\n \"file_ext\": \"csv\"},\n local=True,\n artifact_path=\"./\")\n\n # importing model training function (xgb_trainer) locally\n fn = import_function(\"../xgb_trainer/function.yaml\")\n xgb_trainer_run = fn.run(params={\"model_type\": \"classifier\",\n \"CLASS_tree_method\": \"hist\",\n \"CLASS_objective\": \"binary:logistic\",\n \"CLASS_booster\": \"gbtree\",\n \"FIT_verbose\": 0,\n \"label_column\": \"labels\"},\n local=True,\n inputs={\"dataset\": gen_data_run.artifact('classifier-data').url},\n artifact_path='./')\n\n # because this class is implemented with MLModelServer, creating a class instance and not to_mock_server(V2_Model_Server).\n model = xgb_trainer_run.artifact('model').url\n my_server = XGBoostModel(\"my-model\", model_dir=model)\n my_server.load()\n # Testing the model\n xtest = pd.read_csv(gen_data_run.artifact('classifier-data').url)\n preds = my_server.predict({\"instances\": xtest.values[:10, :-1].tolist()})\n assert (True if preds == [1, 0, 0, 0, 0, 0, 1, 1, 0, 1] else False) is True", "sub_path": "xgb_serving/test_xgb_serving.py", "file_name": "test_xgb_serving.py", "file_ext": "py", "file_size_in_byte": 2492, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "mlrun.import_function", "line_number": 22, "usage_type": "call"}, {"api_name": "mlrun.import_function", "line_number": 34, "usage_type": "call"}, {"api_name": "xgb_serving.XGBoostModel", "line_number": 47, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "548124060", "text": "\"\"\"\nauthor: audreyc\nlast updated: 01/29/2017\n\"\"\"\n\nimport pickle\nimport json\n\nfile_name = 'error_analysis/exptype_eval_de'\nwith open(file_name, 'r', encoding='utf-8') as f:\n line_count = 0\n no_ds_response = 0\n no_user_history = 0\n no_expensetypes = 0\n total_assessed = 0\n et_correct = 0\n et_inresponse = 0\n et_same = 0\n for line in f:\n line_count += 1\n if line_count % 10000 == 0:\n print(line_count)\n p = line.split('\\t')\n datekey = p[0]\n ds_req = p[1]\n ds_resp = p[2]\n location_country = p[3]\n expense_type_name = p[4]\n exp_key = p[5].replace('\\n', '')\n\n if len(ds_resp) > 2:\n ds_response = json.loads(ds_resp)\n else:\n no_ds_response += 1\n continue\n\n if 'expenseTypes' not in ds_response['tokensV2'].keys():\n no_expensetypes += 1\n continue\n\n total_assessed += 1\n out_keys = ds_response['tokensV2']['expenseTypes']\n my_expkey = out_keys[0]['value']\n\n # print(\"my exp\", out_keys)\n # print(type(my_expkey), my_expkey)\n # print(\"real exp key\", exp_key)\n # print(type(exp_key))\n # input(\"pause\")\n\n if my_expkey == exp_key:\n et_correct += 1\n\n print(\"line count: \" + str(line_count))\n print(\"no ds request: \" + str(no_ds_response))\n print(\"no expense types in ds response: \" + str(no_expensetypes))\n print(\"total assess: \" + str(total_assessed))\n print(\"total et correct: %d (%.2f)\" % (et_correct, et_correct / total_assessed * 100))\n", "sub_path": "compare_langs.py", "file_name": "compare_langs.py", "file_ext": "py", "file_size_in_byte": 1593, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "json.loads", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "599605829", "text": "from areaWide import AreaWide\nimport drawing as draw\nimport sounddevice as sd\nfrom utils import interpolate\n\nclass ListOfDevicesWide(AreaWide):\n def __init__(self, left = 1, top = 19):\n super().__init__(left, top) \n self.page = 0\n \n def redraw(self):\n draw.clearRect(self.left, self.top + 1, self.WIDTH, self.HEIGHT);\n draw.rectangle(self.left, self.top + 1, self.WIDTH, self.HEIGHT, '@light')\n devices = sd.query_devices()\n for index in range(self.page * 6, self.page * 6 + 6): \n if index < len(devices):\n dev = devices[index]\n text = str(index) + ': ' + dev['name']\n draw.text(text, self.left, self.top + 1 + index % 6, '#333333 console topleft')\n\n def redrawTitle(self):\n draw.clearRect(self.left, self.top, self.WIDTH, 1);\n draw.rectangle(self.left, self.top, self.WIDTH, 1, '@neutral')\n draw.text('Список доступных устройств', self.left, interpolate('{self.top}ch + 2p'), '#ffffff console topleft')\n\n def rightPressed(self):\n devices = sd.query_devices()\n if (self.page + 1) * 6 < len(devices):\n self.page += 1\n self.redraw()\n\n def leftPressed(self):\n if self.page > 0:\n self.page -= 1\n self.redraw()", "sub_path": "listOfDevicesWide.py", "file_name": "listOfDevicesWide.py", "file_ext": "py", "file_size_in_byte": 1349, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "areaWide.AreaWide", "line_number": 6, "usage_type": "name"}, {"api_name": "drawing.clearRect", "line_number": 12, "usage_type": "call"}, {"api_name": "drawing.rectangle", "line_number": 13, "usage_type": "call"}, {"api_name": "sounddevice.query_devices", "line_number": 14, "usage_type": "call"}, {"api_name": "drawing.text", "line_number": 19, "usage_type": "call"}, {"api_name": "drawing.clearRect", "line_number": 22, "usage_type": "call"}, {"api_name": "drawing.rectangle", "line_number": 23, "usage_type": "call"}, {"api_name": "drawing.text", "line_number": 24, "usage_type": "call"}, {"api_name": "utils.interpolate", "line_number": 24, "usage_type": "call"}, {"api_name": "sounddevice.query_devices", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "416798176", "text": "# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\n\nimport logging\n_logger = logging.getLogger(__name__)\n\nclass IBASSaleIndigo(models.Model):\n _inherit = 'sale.order.line'\n margin_percent = fields.Float(compute='_compute_margin_percent', string='Margin %', store=True, digits=(12,2))\n \n # Margin % = (Selling Price - Cost) / Selling Price\n # w/ Discount: Margin % = (Selling Price*(100% - Discount%) - Cost) / Selling Price*(100% - Discount%)\n @api.depends('price_unit','purchase_price','discount')\n def _compute_margin_percent(self):\n for rec in self:\n discount = (100 - rec.discount) / 100\n if rec.purchase_price > 0:\n #if rec.discount > 0:\n #discount = (100 - rec.discount) / 100\n rec.margin_percent = ((rec.price_unit * discount - rec.purchase_price) / (rec.price_unit * discount)) * 100\n #else:\n #rec.margin_percent = ((rec.price_unit - rec.purchase_price) / rec.purchase_price) * 100\n elif rec.purchase_price == 0 and rec.product_id.standard_price > 0 :\n #if rec.discount > 0:\n #discount = (100 - rec.discount) / 100\n rec.margin_percent = ((rec.price_unit * discount - rec.product_id.standard_price) / (rec.price_unit * discount)) * 100\n #else:\n #rec.margin_percent = ((rec.price_unit - rec.product_id.standard_price) / rec.product_id.standard_price) * 100\n else:\n rec.margin_percent = 100\n\nclass IBASSaleOrder(models.Model):\n _inherit = 'sale.order'\n\n total_margin_percent = fields.Float(compute='_compute_total_margin_percent', string='Total Margin %', \n store=True,digits=(12,2))\n \n # Total Margin % = (Total - Total Cost) / Total\n @api.depends('amount_total')\n def _compute_total_margin_percent(self):\n for rec in self:\n total_cost = 0\n for line in rec.order_line:\n purchase_price = line.purchase_price * line.product_uom_qty\n total_cost += purchase_price\n \n if total_cost > 0:\n # rec.total_margin_percent = total_cost / len(rec.order_line)\n rec.total_margin_percent = ((rec.amount_total - total_cost) / rec.amount_total) * 100\n\n\n# class ibas_indigo(models.Model):\n# _name = 'ibas_indigo.ibas_indigo'\n\n# name = fields.Char()\n# value = fields.Integer()\n# value2 = fields.Float(compute=\"_value_pc\", store=True)\n# description = fields.Text()\n#\n# @api.depends('value')\n# def _value_pc(self):\n# self.value2 = float(self.value) / 100", "sub_path": "ibas_indigo/models/.ipynb_checkpoints/models-checkpoint.py", "file_name": "models-checkpoint.py", "file_ext": "py", "file_size_in_byte": 2670, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "logging.getLogger", "line_number": 6, "usage_type": "call"}, {"api_name": "odoo.models.Model", "line_number": 8, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 8, "usage_type": "name"}, {"api_name": "odoo.fields.Float", "line_number": 10, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 10, "usage_type": "name"}, {"api_name": "odoo.api.depends", "line_number": 14, "usage_type": "call"}, {"api_name": "odoo.api", "line_number": 14, "usage_type": "name"}, {"api_name": "odoo.models.Model", "line_number": 33, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 33, "usage_type": "name"}, {"api_name": "odoo.fields.Float", "line_number": 36, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 36, "usage_type": "name"}, {"api_name": "odoo.api.depends", "line_number": 40, "usage_type": "call"}, {"api_name": "odoo.api", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "428494162", "text": "# Classification for mushrooms detection\r\n# Here's an application of machine learning that could save your life!\r\n# We will be working with the UCI Mushroom Data Set\r\n# stored in readonly/mushrooms.csv.\r\n# The data will be used to train a model to predict whether or not a mushroom is poisonous.\r\n# The following attributes are provided:\r\n\r\n#Attribute Information:\r\n\r\n# 1. cap-shape: bell=b, conical=c, convex=x, flat=f, knobbed=k, sunken=s\r\n# 2. cap-surface: fibrous=f, grooves=g, scaly=y, smooth=s\r\n# 3. cap-color: brown=n, buff=b, cinnamon=c, gray=g, green=r, pink=p, purple=u, red=e, white=w, yellow=y\r\n# 4. bruises?: bruises=t, no=f\r\n# 5. odor: almond=a, anise=l, creosote=c, fishy=y, foul=f, musty=m, none=n, pungent=p, spicy=s\r\n# 6. gill-attachment: attached=a, descending=d, free=f, notched=n\r\n# 7. gill-spacing: close=c, crowded=w, distant=d\r\n# 8. gill-size: broad=b, narrow=n\r\n# 9. gill-color: black=k, brown=n, buff=b, chocolate=h, gray=g, green=r, orange=o, pink=p,\r\n# purple=u, red=e, white=w, yellow=y\r\n# 10. stalk-shape: enlarging=e, tapering=t\r\n# 11. stalk-root: bulbous=b, club=c, cup=u, equal=e, rhizomorphs=z, rooted=r, missing=?\r\n# 12. stalk-surface-above-ring: fibrous=f, scaly=y, silky=k, smooth=s\r\n# 13. stalk-surface-below-ring: fibrous=f, scaly=y, silky=k, smooth=s\r\n# 14. stalk-color-above-ring: brown=n, buff=b, cinnamon=c, gray=g, orange=o, pink=p, red=e, white=w, yellow=y\r\n# 15. stalk-color-below-ring: brown=n, buff=b, cinnamon=c, gray=g, orange=o, pink=p, red=e, white=w, yellow=y\r\n# 16. veil-type: partial=p, universal=u\r\n# 17. veil-color: brown=n, orange=o, white=w, yellow=y\r\n# 18. ring-number: none=n, one=o, two=t\r\n# 19. ring-type: cobwebby=c, evanescent=e, flaring=f, large=l, none=n, pendant=p, sheathing=s, zone=z\r\n# 20. spore-print-color: black=k, brown=n, buff=b, chocolate=h, green=r, orange=o, purple=u, white=w, yellow=y\r\n# 21. population: abundant=a, clustered=c, numerous=n, scattered=s, several=v, solitary=y\r\n# 22. habitat: grasses=g, leaves=l, meadows=m, paths=p, urban=u, waste=w, woods=d\r\n#\r\n# The data in the mushrooms dataset is currently encoded with strings.\r\n# These values will need to be encoded to numeric to work with sklearn.\r\n# We'll use pd.get_dummies to convert the categorical variables into indicator variables.\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n\r\nmush_df = pd.read_csv('mushrooms.csv')\r\nmush_df2 = pd.get_dummies(mush_df)\r\n\r\nX_mush = mush_df2.iloc[:,2:]\r\ny_mush = mush_df2.iloc[:,1]\r\n\r\n# use the variables X_train2, y_train2 for phase 1\r\nX_train2, X_test2, y_train2, y_test2 = train_test_split(X_mush, y_mush, random_state=0)\r\n\r\n# For performance reasons in phases 2,3, we will create a smaller version of the\r\n# entire mushroom dataset for use in those phases. For simplicity we'll just re-use\r\n# the 25% test split created above as the representative subset.\r\n#\r\n# Use the variables X_subset, y_subset for phases 2,3.\r\nX_subset = X_test2\r\ny_subset = y_test2\r\n\r\n# ------------------------------ Phase 1 --------------------------------\r\n\r\n# Using X_train2 and y_train2 from the preceeding cell,\r\n# train a DecisionTreeClassifier with default parameters and random_state=0.\r\n# What are the 5 most important features found by the decision tree?\r\n# As a reminder, the feature names are available in the X_train2.columns property,\r\n# and the order of the features in X_train2.columns matches\r\n# the order of the feature importance values in the classifier's feature_importances_ property.\r\n# This function should return a list of length 5 containing the feature names in descending order of importance.\r\n# Note: remember that you also need to set random_state in the DecisionTreeClassifier.\r\n\r\ndef phase_one():\r\n from sklearn.tree import DecisionTreeClassifier\r\n import pandas as pd\r\n clf = DecisionTreeClassifier(random_state=0).fit(X_train2, y_train2)\r\n #print('Accuracy of Decision Tree classifier on training set: {:.2f}'.format(clf.score(X_train2, y_train2)))\r\n #print('Accuracy of Decision Tree classifier on test set: {:.2f}'.format(clf.score(X_test2, y_test2)))\r\n data = np.array(clf.feature_importances_)\r\n index = X_train2.columns\r\n feature_imp_ser = pd.Series(data, index)\r\n sorted_feature_imp_ser = feature_imp_ser.sort_values(ascending=False)\r\n final_list = sorted_feature_imp_ser[0:5].index.tolist()\r\n return final_list\r\n#print(phase_one())\r\n\r\n# ------------------------------ Phase 2 --------------------------------\r\n#For this phase, we're going to use the validation_curve function in sklearn.model_selection\r\n# to determine training and test scores for a Support Vector Classifier (SVC)\r\n# with varying parameter values. Recall that the validation_curve function,\r\n# in addition to taking an initialized unfitted classifier object, takes a dataset as input\r\n# and does its own internal train-test splits to compute results.\r\n\r\n# Because creating a validation curve requires fitting multiple models,\r\n# for performance reasons this phase will use just a subset of the original mushroom dataset:\r\n# We use the variables X_subset and y_subset as input to the validation curve function\r\n# (instead of X_mush and y_mush) to reduce computation time.\r\n\r\n# The initialized unfitted classifier object we'll be using is a Support Vector Classifier\r\n# with radial basis kernel. So the first step is to create an SVC object with default parameters\r\n# (i.e. kernel='rbf', C=1) and random_state=0. Recall that the kernel width of the RBF kernel\r\n# is controlled using the gamma parameter.\r\n\r\n# With this classifier, and the dataset in X_subset, y_subset,\r\n# we explore the effect of gamma on classifier accuracy by using the validation_curve function\r\n# to find the training and test scores for 6 values of gamma from 0.0001 to 10\r\n# (i.e. np.logspace(-4,1,6)).\r\n\r\n# Recall that we can specify what scoring metric we want validation_curve\r\n# to use by setting the \"scoring\" parameter. In this case, we want to use \"accuracy\" as the scoring metric.\r\n\r\n# For each level of gamma, validation_curve will fit 3 models on different subsets of the data,\r\n# returning two 6x3 (6 levels of gamma x 3 fits per level) arrays of the scores for the training and test sets.\r\n\r\n# We will find the mean score across the three models for each level of gamma for both arrays,\r\n# creating two arrays of length 6, and return a tuple with the two arrays.\r\n\r\n# e.g.\r\n\r\n# if one of your array of scores is\r\n\r\n# array([[ 0.5, 0.4, 0.6],\r\n# [ 0.7, 0.8, 0.7],\r\n# [ 0.9, 0.8, 0.8],\r\n# [ 0.8, 0.7, 0.8],\r\n# [ 0.7, 0.6, 0.6],\r\n# [ 0.4, 0.6, 0.5]])\r\n# it should then become\r\n#\r\n# array([ 0.5, 0.73333333, 0.83333333, 0.76666667, 0.63333333, 0.5])\r\n# This function should return one tuple of numpy arrays\r\n# (training_scores, test_scores) where each array in the tuple has shape (6,).\r\n\r\ndef phase_two():\r\n from sklearn.svm import SVC\r\n from sklearn.model_selection import validation_curve\r\n import numpy as np\r\n clf = SVC(kernel='rbf', C=1)\r\n param_range = np.logspace(-4, 1, 6)\r\n train_scores, test_scores = validation_curve(clf, X_subset, y_subset,\r\n param_name='gamma',\r\n param_range=param_range, cv=3, scoring=\"accuracy\")\r\n train_scores_mean = np.mean(train_scores, axis=1)\r\n test_scores_mean = np.mean(test_scores, axis=1)\r\n\r\n\r\n return (train_scores_mean, test_scores_mean)\r\n\r\n\r\n#print(phase_two())\r\n\r\n# ------------------------------ Phase 3 --------------------------------\r\n# Based on the scores from phase 2, we can explore what gamma value corresponds to a model that is underfitting\r\n# (and has the worst test set accuracy), what gamma value corresponds to a model\r\n# that is overfitting (and has the worst test set accuracy) and what choice of gamma would\r\n# be the best choice for a model with good generalization performance on this dataset\r\n# high accuracy on both training and test set).\r\n\r\n\r\n# This function will return one tuple with the degree values in this order:\r\n# (Underfitting, Overfitting, Good_Generalization)\r\n\r\n# This Auxiliary plot code based on scikit-learn validation_plot example\r\n# See: http://scikit-learn.org/stable/auto_examples/model_selection/plot_validation_curve.html\r\nplt.figure()\r\ntrain_scores_mean = phase_two()[0]\r\n#train_scores_std = np.std(train_scores, axis=1)\r\ntest_scores_mean = phase_two()[1]\r\n#test_scores_std = np.std(test_scores, axis=1)\r\n\r\nplt.title('Validation Curve with SVM')\r\nplt.xlabel('$\\gamma$ (gamma)')\r\nplt.ylabel('Score')\r\nplt.ylim(0.4, 1.1)\r\nlw = 2\r\nparam_range = np.logspace(-4, 1, 6)\r\nplt.semilogx(param_range, train_scores_mean, 'o-', label='Training score',\r\n color='darkorange', lw=lw)\r\n\r\n#plt.fill_between(param_range, train_scores_mean - train_scores_std,\r\n# train_scores_mean + train_scores_std, alpha=0.2,\r\n# color='darkorange', lw=lw)\r\n\r\nplt.semilogx(param_range, test_scores_mean, 'o-', label='Cross-validation score',\r\n color='navy', lw=lw)\r\n\r\n#plt.fill_between(param_range, test_scores_mean - test_scores_std,\r\n# test_scores_mean + test_scores_std, alpha=0.2,\r\n# color='navy', lw=lw)\r\n\r\nplt.legend(loc='lower center')\r\nplt.show()\r\n\r\n\r\ndef phase_three():\r\n (Underfitting, Overfitting, Good_Generalization) = (0.001, 10, 0.1) # According to the plot above\r\n return (Underfitting, Overfitting, Good_Generalization)\r\nprint(phase_three())", "sub_path": "ML for mushrooms detection.py", "file_name": "ML for mushrooms detection.py", "file_ext": "py", "file_size_in_byte": 9519, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pandas.read_csv", "line_number": 43, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 44, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 50, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 77, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 79, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.logspace", "line_number": 137, "usage_type": "call"}, {"api_name": "sklearn.model_selection.validation_curve", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.logspace", "line_number": 174, "usage_type": "call"}]} +{"seq_id": "81191593", "text": "import time\nimport datetime\n\n# This is my functions module\n\n# Checks in the input number is prime or not\ndef isprime(num):\n if type(num) != int:\n try:\n num = int(num)\n except ValueError:\n return \"Invalid input value.\"\n\n if num > 1:\n for i in range(2,num):\n if (num % i) == 0:\n return f\"{num} is not a prime number\\n{i} times {num//i} is {num}\"\n\n else:\n return f\"{num} is a prime number\"\n\n else:\n return f\"{num} is not a prime number\"\n\n# Returns fibonacci numbers up to n\ndef fib(n):\n if type(n) != int:\n try:\n n = int(n)\n except ValueError:\n return \"Invalid input value.\"\n\n fib1, fib2 = 0, 1\n result = []\n while fib1 < n:\n result.append(fib1)\n fib1, fib2 = fib2, fib1 + fib2\n return f\"Fibonacci numbers up to {n} are: {str(result)[1:-1]}\"\n\n# Logs different events for the bot\ndef log(msg=\"\", trig=0, cmd=\"\", resp=\"\", user=\"\"):\n # Time and Date formatting\n s = str(time.asctime(time.gmtime(time.time() + 21600)))\n t = datetime.datetime.strptime(s, \"%a %b %d %H:%M:%S %Y\")\n t = t.strftime(\"%a %d/%m/%Y %I:%M:%S %p\")\n \n # Response to trigger 0. Log start.\n if trig == 0:\n s = f\"[Bot has been connected to Discord] [Started logging at: '{t}']\"\n \n # Response to trigger 1. Manual log.\n if trig == 1:\n s = f\"[Manual log at: '{t}'] [Message: '{msg}']\"\n \n # Response to trigger 2. Comman log.\n if trig == 2:\n s = f\"['{user}' triggered '{cmd}' command at: '{t}'] [Message: '{msg}'] [Response: '{resp}']\"\n \n # Writing the log to a file.\n f = open(\"log.log\", \"a+\")\n f.write(s+\"\\n\")\n f.close()\n", "sub_path": "mf.py", "file_name": "mf.py", "file_ext": "py", "file_size_in_byte": 1731, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "time.asctime", "line_number": 43, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 43, "usage_type": "call"}, {"api_name": "time.time", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 44, "usage_type": "attribute"}]} +{"seq_id": "576899822", "text": "from django.db import models\nfrom django.core.validators import MaxValueValidator, MinValueValidator \n# Create your models here.\n\n# Creates the carriers table \nclass Carrier(models.Model):\n carrierId = models.AutoField(primary_key=True, editable=False)\n carrierName = models.CharField(max_length=40)\n carrierAcctNum = models.CharField(max_length=80)\n\n def __str__(self):\n return f'{self.carrierName} - {self.carrierAcctNum}'\n\n class Meta:\n ordering = ['carrierName', 'carrierAcctNum']\n\n# Creates the Products table\nclass Product(models.Model):\n prodID = models.AutoField(primary_key=True, editable=False)\n product = models.CharField(max_length=80)\n\n def __str__(self):\n return self.product\n\n class Meta:\n ordering = ['product']\n\n# Creates the bills table\nclass Bill(models.Model):\n billID = models.IntegerField(primary_key=True, validators=[MinValueValidator(1), MaxValueValidator(10000)], editable=False)\n carrierID = models.ForeignKey(Carrier, on_delete=models.CASCADE, verbose_name='Related Carrier')\n billDate = models.DateField(verbose_name='Billed Date')\n dueDate = models.DateField(verbose_name='Due Date')\n prodID = models.ForeignKey(Product, on_delete=models.CASCADE, verbose_name='related product')\n charge = models.DecimalField(max_digits=65, decimal_places=2)\n anc_fees = models.DecimalField(max_digits=65, decimal_places=2, verbose_name='Ancilliary Fees')\n taxes = models.DecimalField(max_digits=65, decimal_places=2)\n credit = models.DecimalField(max_digits=65, decimal_places=2)\n\n def dates(self):\n date = self.dueDate.strftime('%b %y')\n return date\n\n def __str__(self):\n date = self.dueDate.strftime('%b %y')\n return f'{self.carrierID} - {date}'\n\n class Meta:\n ordering = ['billDate']\n\n\n# Creates the Bills Paid table\nclass BillPaid(models.Model):\n def id_default():\n id = self.paidID.max()\n return id \n\n paidID = models.IntegerField(primary_key=True, validators=[MinValueValidator(1), MaxValueValidator(10000)],\n verbose_name='Paid ID', editable=False)\n paidDate = models.DateField(verbose_name='Date Paid', null=True, blank=True)\n billID = models.ForeignKey(Bill, on_delete=models.CASCADE, verbose_name='related bill')\n notes = models.CharField(max_length=100, default='N/A', verbose_name='Notes')\n paidBool = models.BooleanField(verbose_name='Paid (True or False)')\n totalPaid = models.DecimalField(max_digits=65, decimal_places=2, verbose_name='Total Paid')\n \n def __str__(self):\n return f'{self.billID} - ID# {self.paidID}'\n\n class Meta:\n ordering = ['paidDate', 'billID']\n\n \n# Create a monthly summary table\nclass MonthlyBreakdown(models.Model):\n def id_default():\n id = self.mbdID.max()\n return id \n\n mbdID = models.IntegerField(primary_key=True, validators=[MinValueValidator(1), MaxValueValidator(10000)],\n default=id_default, verbose_name='Monthly Billed ID', editable=False)\n myPaid = models.DateField(verbose_name='Month/Year Paid')\n totalPaid = models.DecimalField(max_digits=65, decimal_places=2, verbose_name='Total Paid')\n \n def __str__(self):\n return f'{self.myPaid}'\n\n class Meta:\n ordering = ['mbdID', 'myPaid']\n\n", "sub_path": "billing/bills/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 3351, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "django.db.models.Model", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 6, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 7, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 8, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 9, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 29, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.core.validators.MinValueValidator", "line_number": 30, "usage_type": "call"}, {"api_name": "django.core.validators.MaxValueValidator", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 31, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.db.models.DateField", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 34, "usage_type": "attribute"}, {"api_name": "django.db.models.DecimalField", "line_number": 35, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 35, "usage_type": "name"}, {"api_name": "django.db.models.DecimalField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.models.DecimalField", "line_number": 37, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 37, "usage_type": "name"}, {"api_name": "django.db.models.DecimalField", "line_number": 38, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 53, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 53, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 58, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 58, "usage_type": "name"}, {"api_name": "django.core.validators.MinValueValidator", "line_number": 58, "usage_type": "call"}, {"api_name": "django.core.validators.MaxValueValidator", "line_number": 58, "usage_type": "call"}, {"api_name": "django.db.models.DateField", "line_number": 60, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 60, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 61, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 61, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 61, "usage_type": "attribute"}, {"api_name": "django.db.models.CharField", "line_number": 62, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 62, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 63, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 63, "usage_type": "name"}, {"api_name": "django.db.models.DecimalField", "line_number": 64, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 64, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 74, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 74, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 79, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 79, "usage_type": "name"}, {"api_name": "django.core.validators.MinValueValidator", "line_number": 79, "usage_type": "call"}, {"api_name": "django.core.validators.MaxValueValidator", "line_number": 79, "usage_type": "call"}, {"api_name": "django.db.models.DateField", "line_number": 81, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 81, "usage_type": "name"}, {"api_name": "django.db.models.DecimalField", "line_number": 82, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 82, "usage_type": "name"}]} +{"seq_id": "291256648", "text": "from django.shortcuts import render, redirect\nfrom django import views\nfrom django.http import HttpResponse\nfrom team.forms import TeamForm\nfrom team.models import Team, TeamMembers, TeamBoards\nfrom auth.models import Login, User\nfrom django.db.models import Q\n\nimport shortuuid\nimport json\n\n\nclass GetAllTeamViews(views.View):\n def get(self, request):\n context = dict(request.context)\n if not request.user:\n return redirect('auth:login')\n\n try:\n teams = TeamMembers.objects.filter(user=request.user, deleted=False)\n context['teams'] = teams\n\n return render(request, 'team/all.html', context)\n except Team.DoesNotExist:\n return redirect('auth:login')\n\n def post(self, request, team_uid):\n return HttpResponse(team_uid)\n\n\nclass GetTeamByUidViews(views.View):\n def get(self, request, team_uid):\n context = dict(request.context)\n if not request.user:\n return redirect('auth:login')\n try:\n team = Team.objects.get(uid=team_uid, deleted=False)\n context['team'] = team\n\n return render(request, 'team/get.html', context)\n except Team.DoesNotExist:\n return redirect('auth:login')\n\n def post(self, request, team_uid):\n return HttpResponse(team_uid)\n\n\nclass GetTeamMembersViews(views.View):\n def get(self, request, team_uid):\n context = dict(request.context)\n if not request.user:\n return redirect('auth:login')\n try:\n team = Team.objects.get(uid=team_uid, deleted=False)\n team_members = TeamMembers.objects.filter(\n team=team,\n deleted=False,\n )\n\n context['team_nav'] = \"members\"\n context['team_members'] = team_members\n context['team'] = team\n\n return render(request, 'team/get_members.html', context)\n except Team.DoesNotExist:\n return redirect('/auth/login/')\n\n def post(self, request, team_uid):\n return HttpResponse(team_uid)\n\n\nclass TeamCreateViews(views.View):\n def get(self, request):\n context = dict(request.context)\n form = TeamForm()\n context['form'] = form\n return render(request, 'team/create_team.html', context)\n\n def post(self, request):\n context = dict(request.context)\n form = TeamForm(request.POST)\n team_uid = None\n if form.is_valid():\n team = form.save(commit=False)\n team.owner = request.user\n team.uid = shortuuid.ShortUUID().random(length=10)\n team.save()\n\n return redirect('/team/{}'.format(team.uid))\n\n\nclass GetTeamBoardsViews(views.View):\n def get(self, request, team_uid):\n context = dict(request.context)\n\n try:\n team = Team.objects.get(uid=team_uid, deleted=False)\n team_boards = TeamBoards.objects.filter(team=team, deleted=False)\n boards = [item.board for item in team_boards]\n\n context['team_nav'] = \"boards\"\n context['team_boards'] = boards\n context['team'] = team\n\n return render(request, 'team/get_boards.html', context)\n\n except Team.DoesNotExist:\n return redirect('team:all')\n\n\nclass TeamMemberAjax(views.View):\n def get(self, request):\n query = request.GET['query']\n uid = request.GET['uid']\n\n login = Login.objects.filter(\n Q(login_name__icontains=query) |\n Q(email__icontains=query) |\n Q(user__full_name__icontains=query)\n )[:5]\n\n team = Team.objects.get(uid=uid)\n\n json_response = []\n\n for item in login:\n user = item.user.to_dict()\n json_response.append(\n {\n 'email': item.email,\n 'login_name': item.login_name,\n 'full_name': user['full_name'],\n 'picture': user['picture'],\n 'id': user['user_id'],\n 'joined': TeamMembers.objects.filter(team=team, user=item.user, deleted=False).exists(),\n 'last_login': str(item.last_login.date()) or \"\",\n }\n )\n\n json_dump = json.dumps({\n 'response': json_response,\n 'length': len(json_response)\n }, sort_keys=True, indent=5, ensure_ascii=False)\n\n return HttpResponse(json_dump, content_type=\"application/json\")\n\n def post(self, request):\n user_id = request.POST['user_id']\n uid = request.POST['uid']\n\n user = User.objects.get(user_id=user_id)\n team = Team.objects.get(uid=uid)\n\n team_members = TeamMembers.objects.create(\n user_type=TeamMembers.USER,\n user=user,\n team=team,\n )\n\n return HttpResponse(json.dumps({'uid': uid}, sort_keys=True, indent=5, ensure_ascii=False),\n content_type=\"application/json\")\n\n\nclass DeleteTeamMemberViews(views.View):\n def get(self, request, team_uid, user_id):\n try:\n team_members = TeamMembers.objects.get(team__uid=team_uid, user__user_id=user_id, deleted=False)\n team_members.deleted = True\n team_members.save()\n\n return redirect('team:members', team_uid=team_uid)\n except TeamMembers.DoesNotExist:\n return redirect('team:all')\n", "sub_path": "team/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5431, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "django.views.View", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 13, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 17, "usage_type": "call"}, {"api_name": "team.models.TeamMembers.objects.filter", "line_number": 20, "usage_type": "call"}, {"api_name": "team.models.TeamMembers.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "team.models.TeamMembers", "line_number": 20, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 23, "usage_type": "call"}, {"api_name": "team.models.Team.DoesNotExist", "line_number": 24, "usage_type": "attribute"}, {"api_name": "team.models.Team", "line_number": 24, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 25, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 28, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 31, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 35, "usage_type": "call"}, {"api_name": "team.forms", "line_number": 37, "usage_type": "name"}, {"api_name": "team.models.Team.objects.get", "line_number": 37, "usage_type": "call"}, {"api_name": "team.models.Team.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "team.models.Team", "line_number": 37, "usage_type": "name"}, {"api_name": "team.forms", "line_number": 38, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 40, "usage_type": "call"}, {"api_name": "team.models.Team.DoesNotExist", "line_number": 41, "usage_type": "attribute"}, {"api_name": "team.models.Team", "line_number": 41, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 42, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 45, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 48, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 48, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 52, "usage_type": "call"}, {"api_name": "team.forms", "line_number": 54, "usage_type": "name"}, {"api_name": "team.models.Team.objects.get", "line_number": 54, "usage_type": "call"}, {"api_name": "team.models.Team.objects", "line_number": 54, "usage_type": "attribute"}, {"api_name": "team.models.Team", "line_number": 54, "usage_type": "name"}, {"api_name": "team.models.TeamMembers.objects.filter", "line_number": 55, "usage_type": "call"}, {"api_name": "team.models.TeamMembers.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "team.models.TeamMembers", "line_number": 55, "usage_type": "name"}, {"api_name": "team.forms", "line_number": 56, "usage_type": "name"}, {"api_name": "team.forms", "line_number": 62, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 64, "usage_type": "call"}, {"api_name": "team.models.Team.DoesNotExist", "line_number": 65, "usage_type": "attribute"}, {"api_name": "team.models.Team", "line_number": 65, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 66, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 69, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 72, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 72, "usage_type": "name"}, {"api_name": "team.forms.TeamForm", "line_number": 75, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 77, "usage_type": "call"}, {"api_name": "team.forms.TeamForm", "line_number": 81, "usage_type": "call"}, {"api_name": "team.forms", "line_number": 84, "usage_type": "name"}, {"api_name": "team.forms.owner", "line_number": 85, "usage_type": "attribute"}, {"api_name": "team.forms", "line_number": 85, "usage_type": "name"}, {"api_name": "team.forms.uid", "line_number": 86, "usage_type": "attribute"}, {"api_name": "team.forms", "line_number": 86, "usage_type": "name"}, {"api_name": "shortuuid.ShortUUID", "line_number": 86, "usage_type": "call"}, {"api_name": "team.forms.save", "line_number": 87, "usage_type": "call"}, {"api_name": "team.forms", "line_number": 87, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 89, "usage_type": "call"}, {"api_name": "team.forms.uid", "line_number": 89, "usage_type": "attribute"}, {"api_name": "team.forms", "line_number": 89, "usage_type": "name"}, {"api_name": "django.views.View", "line_number": 92, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 92, "usage_type": "name"}, {"api_name": "team.forms", "line_number": 97, "usage_type": "name"}, {"api_name": "team.models.Team.objects.get", "line_number": 97, "usage_type": "call"}, {"api_name": "team.models.Team.objects", "line_number": 97, "usage_type": "attribute"}, {"api_name": "team.models.Team", "line_number": 97, "usage_type": "name"}, {"api_name": "team.models.TeamBoards.objects.filter", "line_number": 98, "usage_type": "call"}, {"api_name": "team.models.TeamBoards.objects", "line_number": 98, "usage_type": "attribute"}, {"api_name": "team.models.TeamBoards", "line_number": 98, "usage_type": "name"}, {"api_name": "team.forms", "line_number": 98, "usage_type": "name"}, {"api_name": "team.forms", "line_number": 103, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 105, "usage_type": "call"}, {"api_name": "team.models.Team.DoesNotExist", "line_number": 107, "usage_type": "attribute"}, {"api_name": "team.models.Team", "line_number": 107, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 108, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 111, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 111, "usage_type": "name"}, {"api_name": "auth.models.Login.objects.filter", "line_number": 116, "usage_type": "call"}, {"api_name": "auth.models.Login.objects", "line_number": 116, "usage_type": "attribute"}, {"api_name": "auth.models.Login", "line_number": 116, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 117, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 118, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 119, "usage_type": "call"}, {"api_name": "team.forms", "line_number": 122, "usage_type": "name"}, {"api_name": "team.models.Team.objects.get", "line_number": 122, "usage_type": "call"}, {"api_name": "team.models.Team.objects", "line_number": 122, "usage_type": "attribute"}, {"api_name": "team.models.Team", "line_number": 122, "usage_type": "name"}, {"api_name": "team.models.TeamMembers.objects.filter", "line_number": 135, "usage_type": "call"}, {"api_name": "team.models.TeamMembers.objects", "line_number": 135, "usage_type": "attribute"}, {"api_name": "team.models.TeamMembers", "line_number": 135, "usage_type": "name"}, {"api_name": "team.forms", "line_number": 135, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 140, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 145, "usage_type": "call"}, {"api_name": "auth.models.User.objects.get", "line_number": 151, "usage_type": "call"}, {"api_name": "auth.models.User.objects", "line_number": 151, "usage_type": "attribute"}, {"api_name": "auth.models.User", "line_number": 151, "usage_type": "name"}, {"api_name": "team.forms", "line_number": 152, "usage_type": "name"}, {"api_name": "team.models.Team.objects.get", "line_number": 152, "usage_type": "call"}, {"api_name": "team.models.Team.objects", "line_number": 152, "usage_type": "attribute"}, {"api_name": "team.models.Team", "line_number": 152, "usage_type": "name"}, {"api_name": "team.models.TeamMembers.objects.create", "line_number": 154, "usage_type": "call"}, {"api_name": "team.models.TeamMembers.objects", "line_number": 154, "usage_type": "attribute"}, {"api_name": "team.models.TeamMembers", "line_number": 154, "usage_type": "name"}, {"api_name": "team.models.TeamMembers.USER", "line_number": 155, "usage_type": "attribute"}, {"api_name": "team.models.TeamMembers", "line_number": 155, "usage_type": "name"}, {"api_name": "team.forms", "line_number": 157, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 160, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 160, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 164, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 164, "usage_type": "name"}, {"api_name": "team.models.TeamMembers.objects.get", "line_number": 167, "usage_type": "call"}, {"api_name": "team.models.TeamMembers.objects", "line_number": 167, "usage_type": "attribute"}, {"api_name": "team.models.TeamMembers", "line_number": 167, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 171, "usage_type": "call"}, {"api_name": "team.models.TeamMembers.DoesNotExist", "line_number": 172, "usage_type": "attribute"}, {"api_name": "team.models.TeamMembers", "line_number": 172, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 173, "usage_type": "call"}]} +{"seq_id": "585838237", "text": "# Very simple implementation of simulation of gravity on bodies in 2D. Does not handle the case when 2 or more\n# bodies collide with each other\nimport random\nimport sys\nimport math\nimport pygame\n\nfrom Planet import Body\n\nif __name__ == \"__main__\":\n NUM_OF_BODIES = 20\n WIDTH = 900\n HEIGHT = 800\n WHITE = (255, 255, 255)\n BLACK = (0, 0, 0)\n BLUE = (109, 196, 255)\n\n bodies = []\n for i in range(NUM_OF_BODIES):\n px = random.randint(10, WIDTH - 10)\n py = random.randint(10, HEIGHT - 10)\n m = random.randint(1, 20)\n bodies.append(Body([px, py], [0, 0], m, i))\n\n pygame.init()\n size = WIDTH, HEIGHT\n screen = pygame.display.set_mode(size)\n\n font = pygame.font.SysFont('Arial', 16)\n text = font.render('0', True, BLUE)\n textRect = text.get_rect()\n velocity_diff = [0, 0]\n while True:\n screen.fill(BLACK)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n\n # Don't process bodies that where hit/removed\n bodies = [body for body in bodies if not body.hit]\n\n # Get Bodies and find the center of mass of all\n x = [p.pos[0] for p in bodies]\n y = [p.pos[1] for p in bodies]\n centroid = (sum(x) / len(bodies), sum(y) / len(bodies))\n lx = bodies[0].pos[0]\n ly = bodies[0].pos[1]\n for body in bodies:\n body.pos[0] = body.pos[0] - centroid[0] + WIDTH / 2\n body.pos[1] = body.pos[1] - centroid[1] + HEIGHT / 2\n\n # Draw circles and line for reference point of origin\n velocity_diff[0] = lx - centroid[0]\n velocity_diff[1] = ly - centroid[1]\n # Origin Point\n textRect.center = (velocity_diff[0] + 10, velocity_diff[1] + 10)\n screen.blit(font.render(\"{0},{1}\".format(0, 0), True, BLUE), textRect)\n pygame.draw.circle(screen, (255, 255, 127), [int(velocity_diff[0]), int(velocity_diff[1])], 3, 1)\n # Center of all objects\n textRect.center = (WIDTH/2 + 10, HEIGHT/2 + 10)\n screen.blit(font.render(\"{0},{1}\".format(\n int(math.floor(velocity_diff[0])),\n int(math.floor(velocity_diff[1]))), True, BLUE), textRect)\n # Draw line from origin to center of all objects\n pygame.draw.line(screen, (255, 255, 0), (WIDTH/2, HEIGHT/2), velocity_diff)\n pygame.draw.circle(screen, (255, 255, 127), [int(WIDTH/2), int(HEIGHT/2)], 3, 1)\n\n for body_a in bodies:\n # Remove invulnerability flag\n body_a.invuln = False\n\n f_totals = body_a.n_body(bodies, 0, 0)\n\n body_a.velocity[0] = body_a.velocity[0] + f_totals[0] / body_a.mass\n body_a.velocity[1] = body_a.velocity[1] + f_totals[1] / body_a.mass\n\n body_a.pos[0] = body_a.pos[0] + body_a.velocity[0]\n body_a.pos[1] = body_a.pos[1] + body_a.velocity[1]\n\n mass_text = 'M={0}'.format(body_a.mass)\n # force_text = 'F=({0},{1})'.format(fx_total.__round__(3), fy_total.__round__(3))\n # velocity_text = 'V=({},{})'.format(body_a.v[0].__round__(3),body_a.v[1].__round__(3))\n # text_str = mass_text + ' ' + force_text + ' ' + velocity_text\n text_str = mass_text\n\n text = font.render(text_str, True, BLUE)\n textRect.center = (\n body_a.pos[0] + body_a.size + 10,\n body_a.pos[1] + body_a.size + 10)\n\n screen.blit(text, textRect)\n\n pygame.draw.circle(\n screen,\n (255, 255, 255),\n [int(body_a.pos[0]), int(body_a.pos[1])], int(body_a.size))\n\n # Get a list of bodies, except for body_a\n next_bodies = [body for body in bodies if not body_a.id == body.id]\n for body in next_bodies:\n # if body is invulnerable then continue on to the next body\n if body.invuln:\n continue\n # Find the distance to body_a\n distance = math.sqrt(\n ((body_a.pos[0] - body.pos[0]) * (body_a.pos[0] - body.pos[0])) +\n ((body_a.pos[1] - body.pos[1]) * (body_a.pos[1] - body.pos[1])))\n # If bodied touch then \"remove\" one by setting flag and making the other invulnerable\n if distance < int(body_a.size) + int(body.size):\n if body_a.mass >= body.mass:\n body_a.mass += body.mass\n body_a.update_size()\n body_a.velocity[0] = (body_a.mass * body_a.velocity[0] + body.mass * body.velocity[0]\n ) / (body_a.mass + body.mass)\n body_a.velocity[1] = (body_a.mass * body_a.velocity[1] + body.mass * body.velocity[1]\n ) / (body_a.mass + body.mass)\n body.hit = True\n body_a.invuln = True\n else:\n body.mass += body_a.mass\n body.update_size()\n body.velocity[0] = (body_a.mass * body_a.velocity[0] + body.mass * body.velocity[0]\n ) / (body_a.mass + body.mass)\n body.velocity[1] = (body_a.mass * body_a.velocity[1] + body.mass * body.velocity[1]\n ) / (body_a.mass + body.mass)\n body_a.hit = True\n body.invuln = True\n pygame.display.flip()\n", "sub_path": "gravity_simulation.py", "file_name": "gravity_simulation.py", "file_ext": "py", "file_size_in_byte": 5567, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "random.randint", "line_number": 20, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 21, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 22, "usage_type": "call"}, {"api_name": "Planet.Body", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.init", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 29, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 35, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 36, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.draw.circle", "line_number": 58, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 58, "usage_type": "attribute"}, {"api_name": "math.floor", "line_number": 62, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.draw.line", "line_number": 65, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 66, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 66, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 93, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 93, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 105, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 128, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 128, "usage_type": "attribute"}]} +{"seq_id": "125984025", "text": "\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport seaborn as sns\r\nimport os\r\n\r\n\r\nimport sys\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.cross_validation import KFold\r\nfrom sklearn import ensemble\r\nfrom sklearn import metrics\r\nfrom sklearn.preprocessing import LabelEncoder\r\n\r\nos.chdir('D:\\practice_file\\poc')\r\n\r\ntrain_df = pd.read_csv('Churn_Modelling.csv')\r\nsample_df = pd.read_csv('Churn_Modelling.csv')\r\n\r\nsample_df.head().to_csv('top5.csv',index=False)\r\n\r\nlen(sample_df['Gender'].unique())\r\n\r\n\r\n\r\n######\r\n\r\nsam =sample_df[(sample_df.CreditScore >700) & (sample_df.Surname =='Yen')]\r\ngender_dict = {'Female':0, 'Male':1}\r\n\r\ndef age_binning(age):\r\n # {'0-17':0, '18-25':1, '26-35':2, '36-45':3, '46-50':4, '51-55':5, '55+':6}\r\n if age > 0 and age <= 17:\r\n return 0\r\n elif age > 18 and age <= 25:\r\n return 1\r\n elif age > 26 and age <= 35:\r\n return 2\r\n elif age > 36 and age <= 45:\r\n return 3\r\n elif age > 46 and age <= 50:\r\n return 4\r\n elif age > 51 and age <= 55:\r\n return 5\r\n else:\r\n return 6\r\n\r\ntrain_df[\"Gender\"] = train_df[\"Gender\"].apply(lambda x: gender_dict[x])\r\n\r\ntrain_df['Age'] = train_df['Age'].apply(age_binning)\r\n\r\n\r\ndef getCountVar(compute_df, count_df, var_name):\r\n\tgrouped_df = count_df.groupby(var_name)\r\n\tcount_dict = {}\r\n\tfor name, group in grouped_df:\r\n\t\tcount_dict[name] = group.shape[0]\r\n\r\n\tcount_list = []\r\n\tfor index, row in compute_df.iterrows():\r\n\t\tname = row[var_name]\r\n\t\tcount_list.append(count_dict.get(name, 0))\r\n\treturn count_list\r\n\r\n\r\nprint(\"Getting count features..\")\r\ntrain_df[\"Age_Count\"] = getCountVar(train_df, train_df, \"Age\")\r\n#test_df[\"Age_Count\"] = getCountVar(test_df, train_df, \"Age\")\r\n\r\n\r\ndata.groupby(['month', 'item']).agg({'duration':sum, # find the sum of the durations for each group\r\n 'network_type': \"count\", # find the number of network type entries\r\n 'date': 'first'})\r\n\r\nsss=train_df.groupby('Geography')['CreditScore'].max()\r\n\r\nqwe = sample_df.groupy('Geography')['CreditScore'].agg({'maximum':max,'Count':Count})\r\n\r\nqwe1 = sample_df.groupby('Geography').agg({'CreditScore':['max','mean','count']})\r\n###################3\r\nmeans = sample_df.groupby('Geography')['Exited'].mean()\r\nsample_df['Geography_TARGET_MEAN']=sample_df['Geography'].map(means)\r\n\r\nsss['France']\r\n\r\ndef getPurchaseVar(compute_df, purchase_df, var_name):\r\n grouped_df = purchase_df.groupby(var_name)\r\n min_dict = {}\r\n max_dict = {}\r\n mean_dict = {}\r\n for name, group in grouped_df:\r\n min_dict[name] = min(np.array(group[\"CreditScore\"]))\r\n max_dict[name] = max(np.array(group[\"CreditScore\"]))\r\n mean_dict[name] = np.mean(np.array(group[\"CreditScore\"]))\r\n # twentyfive_dict[name] = np.percentile(np.array(group[\"Purchase\"]),25)\r\n # seventyfive_dict[name] = np.percentile(np.array(group[\"Purchase\"]),75)\r\n\r\n \r\n print(min_dict)\r\n print(max_dict)\r\n print(mean_dict)\r\n min_list = []\r\n max_list = []\r\n mean_list = []\r\n for index, row in compute_df.iterrows():\r\n name = row[var_name]\r\n min_list.append(min_dict.get(name,0))\r\n max_list.append(max_dict.get(name,0))\r\n mean_list.append(mean_dict.get(name,0))\r\n\r\n return min_list, max_list, mean_list\r\n\r\nmin_price_list, max_price_list, mean_price_list = getPurchaseVar(train_df, train_df, \"Geography\")\r\ndjjd=pd.DataFrame({'min_dict':min_price_list})\r\ntrain_df[\"User_ID_MinPrice\"] = min_price_list\r\ntrain_df[\"User_ID_MaxPrice\"] = max_price_list\r\ntrain_df[\"User_ID_MeanPrice\"] = mean_price_list\r\n\r\ntrain_y = np.array(train_df[\"Exited\"])\r\ntrain_df.drop([\"Exited\"], axis=1, inplace=True)\r\n\r\ncol = train_df.columns\r\n\r\ncat_columns_list = [\"User_ID\", \"Product_ID\"]\r\nfor var in cat_columns_list:\r\n lb = LabelEncoder()\r\n # full_var_data = pd.concat((train_df[var],test_df[var]),axis=0).astype('str')\r\n temp = lb.fit_transform(np.array(train_df))\r\n # train_df[var] = lb.transform(np.array( train_df[var] ).astype('str'))\r\n # test_df[var] = lb.transform(np.array( test_df[var] ).astype('str'))\r\n \r\nsample_df.describe()", "sub_path": "Bostongroup.py", "file_name": "Bostongroup.py", "file_ext": "py", "file_size_in_byte": 4361, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "os.chdir", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 96, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 121, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 130, "usage_type": "call"}]} +{"seq_id": "528726021", "text": "import pandas as pd \nfrom geopy.geocoders import Nominatim\nimport time\nfrom pprint import pprint\n\n# dif_data = pd.read_csv('./Dif_data.csv')\ndata_same = pd.read_csv('./Same_data.csv')\n\n\n# Correct one row\nindexNames = dif_data[dif_data['address'] == 'Camins Al Grau - Penya - Roja - Avda. Francia'].index\n# Delete these row indexes from dataFrame\ndif_data.drop(indexNames , inplace=True)\n\n\napp = Nominatim(user_agent=\"tutorial\")\n\n### Look for coordinates in data_dif\nlatitude_dif = []\nlongitude_dif = []\nto_check = pd.DataFrame(columns=['index', 'address', 'district', 'neighbourhood', 'sqft_surface',\n 'bedrooms', 'bathrooms', 'new_construction', 'price'])\nx = 0\nfor i in range(len(dif_data)):\n print(i)\n print(dif_data['address'].iloc[i])\n try:\n hola = dif_data['address'].iloc[i], dif_data['district'].iloc[i], \"Valencia, España\"\n location = app.geocode(hola).raw\n latitude_dif.append(location['lat'])\n longitude_dif.append(location['lon'])\n time.sleep(1)\n x += 1\n print(hola)\n print('OK!')\n except:\n pass\n longitude_dif.append('None')\n latitude_dif.append('None')\n pd.concat([dif_data.iloc[i], to_check], axis=1)\n print(hola)\n print('NOPE!')\n\n# Create columns with location info and append to data_dif\ndif_data_location = dif_data \ndif_data_location['latitude'] = latitude_dif\ndif_data_location['longitude'] = longitude_dif\n\n\n\n# I'm going to get many None values, therefore if location are None append to data same\n\nfor_samedata = dif_data_location[dif_data_location['longitude']=='None']\nnew_dif_data = dif_data_location[dif_data_location['longitude']!= 'None']\n\ndif_data.to_csv('secure_dif.csv', index=False)\nnew_dif_data.to_csv('real_dif_data.csv', index=False)\nfor_samedata.to_csv('for_samedata.csv', index=False)\n\n\nfor_samedata2 = pd.read_csv(\"for_samedata.csv\")\nfor_samedata2 = for_samedata2.drop(columns=['latitude', 'longitude'])\n\n\n\nsame_data_maybe = pd.concat([data_same, for_samedata2], axis=0)\nsame_data_maybe.to_csv('same_data_maybe.csv', index=False)", "sub_path": "data_cleaning-preprocessing/.ipynb_checkpoints/3.get_coordinates-checkpoint.py", "file_name": "3.get_coordinates-checkpoint.py", "file_ext": "py", "file_size_in_byte": 2083, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pandas.read_csv", "line_number": 7, "usage_type": "call"}, {"api_name": "geopy.geocoders.Nominatim", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 21, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 32, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 61, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "263169273", "text": "# -*- coding: utf-8 -*-\nimport random\nfrom datetime import datetime\nimport json\n\nCOUNT_PAR = 1000\n\n\nclass Server():\n # \"\"\"\n # Клас для організації пошуку ініціалів\n # :param clients - словник клієнтів; key - номер клієнта, value - різні параметри клієнта\n # :param par - масив параграфів\n # :param current_par - поточний параграф\n # :param initials - знайдені ініціали\n # :param search - статус пошуку: true - здійснюється пошук; false - пошук зупинений\n # :param count_par - к-сть парафів, які будуть віддаватись клієнту за раз\n # :param session - унікальний ключ для ідентифікації пошуку\n # :param timeout - к-сть секунд, після яких неактивні клієнти відключаються\n # :param stopped - масив, який складається із зупинених частин пошуку\n # :param log - журнал пошуку\n # :param time - час початку пошуку\n # :param count - к-сть параграфів\n # :param count_task - к-сть задач\n # :param count_active_tasks - к-сть активних задач\n # :param ended_task_count - к-сьб хавершених задач\n # \"\"\"\n\n def __init__(self):\n \"\"\"\n Ініціалізація полів класу\n \"\"\"\n self.clients = dict()\n self.par = \"\"\n self.current_par = 0\n self.initials = []\n self.search = False\n self.count_par = COUNT_PAR\n self.session = 0\n self.timeout = 20\n self.stopped = []\n self.log = []\n self.time = 0\n self.count = 0\n self.count_task = 0\n self.count_active_tasks = 0\n self.ended_task_count = 0\n\n\n def add_log(self, info):\n \"\"\"\n Додавання запису в журнал\n :param info рядок, який потрібно додати у журнал\n \"\"\"\n self.log.append(\"[\" + str(datetime.now()) + \"]: \" + info)\n\n def set_par(self, par):\n \"\"\"\n Запускає пошук ініціалів;\n self.session присвоюється випадкове значення із 32 біт для ідентифікації поточного пошуку\n :param par - рядок вхідного тексту\n \"\"\"\n self.time = datetime.now()\n self.current_par = 0\n self.initials = []\n self.count_active_tasks = 0\n self.ended_task_count = 0\n\n self.par = par.split('\\n')\n for i in range(len(self.par)):\n self.par[i] = self.par[i].decode('utf-8', 'strict')\n self.search = True\n self.count = len(self.par)\n self.count_task = int(self.count / self.count_par) + 1\n self.add_log(\"Start search. Count paragraphs: %d Text: %s...\" % (self.count, self.par[0]))\n self.session = \"%x\" % random.getrandbits(32)\n self.stopped = []\n\n self.add_log(u\"Session: %s\" % self.session)\n\n def add_client(self, ip):\n \"\"\"\n Додати нового клієнта.\n :param ip - ip-адреса клієнта\n :return id - номер клієнта\n \"\"\"\n id = 1\n while self.clients.get(id):\n id += 1\n self.clients[id] = {\"ip\": ip, \"update\": datetime.now()}\n self.add_log(u\"New client is added: id:%d; ip:%s\" % (id, str(ip)))\n return id\n\n def find(self, id, initials):\n \"\"\"\n Обробка результату пошуку\n Додає знайдені ініціали в загальний список знайдених ініціалів.\n\n :param id - номер клієнта\n :param initials - знайдені ініціали\n \"\"\"\n self.count_active_tasks -= 1\n self.ended_task_count += 1\n del self.clients[id][\"current_par\"]\n if initials:\n self.initials += initials\n str = \"\"\n for i in range(len(initials)):\n str += initials[i] + \", \"\n self.add_log(u\"Client №%d has found initials: %s\" % (id, str[:-2]))\n percent = float(self.ended_task_count) / float(self.count_task) * 100\n self.add_log(\"Progress (%d/%d): %.2f%%\" % (self.ended_task_count, self.count_task, percent))\n self.check_end()\n return \"ok\"\n\n def check_end(self):\n \"\"\"\n Перевірка завершення пошуку\n \"\"\"\n if self.current_par > self.count and self.count_active_tasks <= 0 and (not self.stopped) and (\n self.ended_task_count == self.count_task):\n self.stop()\n\n def stop(self):\n \"\"\"\n Зупинка пошуку\n \"\"\"\n self.add_log(u\"Search ended!\")\n self.add_log(u\"Time: %s\" % str(datetime.now() - self.time))\n self.search = False\n for i in self.clients:\n if self.clients[i].get(\"current_par\"):\n del self.clients[i][\"current_par\"]\n\n def get_work(self, id):\n \"\"\"\n Видає роботу клієнту.\n Якщо масив stopped не пустий (є зупинені частини пошуку) віддаємо цю частину клієнту\n інакше віддаємо нову частину.\n\n :param id - номер клієнта\n :return session - сесія\n\n \"\"\"\n self.check_clients()\n id = int(id)\n\n ret = {\"session\": self.session}\n\n if len(self.stopped) > 0:\n self.clients[id][\"current_par\"] = self.stopped[0]\n current = self.stopped[0]\n self.add_log(u\"Client №%d got canceled task %s\" % (id, current))\n self.stopped.remove(self.stopped[0])\n else:\n if self.current_par > self.count:\n return \"no_work\"\n self.clients[id][\"current_par\"] = self.current_par\n current = self.current_par\n self.add_log(\"Client №%d got a task %s.\" % (id, current))\n self.current_par += self.count_par\n\n if current + self.count_par < self.count:\n array = self.par[current:current + self.count_par]\n else:\n array = self.par[current:self.count]\n self.count_active_tasks += 1\n ret[\"paragraph\"] = array\n return json.dumps(ret)\n\n def check_clients(self):\n \"\"\"\n Перевіряє активність клієнтів. Якщо якийсь клієн не відправляв запит online на\n протязі останніх self.timeout секунд, він відключається\n \"\"\"\n if len(self.clients) > 0:\n for key in self.clients.keys():\n if (datetime.now() - self.clients[key][\"update\"]).seconds > self.timeout:\n self.stop_client(key)\n\n def online_client(self, id):\n \"\"\"\n Встановити час останньої активності клієнта\n :param id - номер клієнта\n :return True - клієнт з таким id існує\n False - клієнта з таким id не існує\n\n \"\"\"\n id = int(id)\n if self.clients.get(id):\n self.clients[id][\"update\"] = datetime.now()\n return True\n return False\n\n def stop_client(self, id):\n \"\"\"\n Видаляє клієнта з номером id. Задачу, яку виконував клієнт додаємо\n в масив stopped, для подальшої передачі її іншому клієнту.\n :param id: номер клієнта\n \"\"\"\n id = int(id)\n par = self.clients[id].get(\"current_par\")\n if par:\n self.stopped.append(par)\n self.add_log(\"Client №%d has disconnected. His task (%s) is added to cancel list.\" % (id, par))\n else:\n self.add_log(\"Client №%d has disconnected.\" % id)\n del self.clients[id]\n\n\n def server_update(self):\n \"\"\"\n Запускає перевірку активності клієнтів.\n :return: список клієнтів clients; журнал log;ініліали, якщо вже знайдені\n \"\"\"\n self.check_clients()\n res = {\"search\": self.search, \"clients\": self.clients, \"log\": self.log}\n if self.initials != []:\n res[\"initial\"] = self.initials\n return res\n", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 8753, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "datetime.datetime.now", "line_number": 55, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 55, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 63, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 63, "usage_type": "name"}, {"api_name": "random.getrandbits", "line_number": 76, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 90, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 90, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 129, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 129, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 169, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 178, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 178, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 191, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 191, "usage_type": "name"}]} +{"seq_id": "84723903", "text": "from flask import request\nfrom flask import Flask\nfrom flask import render_template\nfrom club_functions import *\nfrom pymongo import MongoClient\n\napp = Flask(__name__, static_folder='./static')\nclient = MongoClient(\"db\", 27017)\ndb = client.clubs\n\n@app.route('/')\ndef home():\n\t#return render_template(\"home.html\")\n\treturn \"flask2\"\n\t\n@app.route('/search_clubs', methods=[\"POST\", \"GET\"])\ndef search_clubs():\n\tif request.method == \"POST\":\n\n\t\tinterests = request.form.getlist(\"myCheckbox\")\n\t\t#print(interests)\n\n\t\tclubs = search_by_interest(db, interests) #clubs is an list\n\t\tprint(clubs)\n\t\treturn render_template(\"show_clubs.html\", clubs=clubs) #show recommended clubs once they have entered the info\n\n\tif request.method == \"GET\":\n\t\treturn render_template(\"get_interests.html\") #render page to get info from user\n\n@app.route('/club_catalog')\ndef club_catalog():\n\tclubs = get_all_clubs(db) #returns list of all clubs, clubs in list are represented as a club object\n\treturn render_template(\"club_catalog.html\", clubs=clubs)\n\t#return render_template(\"new_club_catalog.html\", clubs=clubs)\n\n@app.route('/featured_news')\ndef featured_news():\n\treturn render_template(\"featured_news.html\")\n\n@app.route('/contact')\ndef contact():\n\treturn render_template(\"contact.html\")\n\nif __name__ == \"__main__\":\n\tapp.run(host=\"0.0.0.0\", debug=True, port=5000)\n", "sub_path": "dn_club_rush/src/flask4/old/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 1332, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 18, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 18, "usage_type": "name"}, {"api_name": "flask.request.form.getlist", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "479949597", "text": "import testing.postgresql\nimport psycopg2\nimport csv\nfrom unittest.mock import patch, PropertyMock\n\nimport luigi\n\nfrom etl.etl import ProjectDemographicLoad\n\n@patch('etl.DBCredentialMixin.host', new_callable=PropertyMock)\n@patch('etl.DBCredentialMixin.database', new_callable=PropertyMock)\n@patch('etl.DBCredentialMixin.user', new_callable=PropertyMock)\ndef test_etl_dimprojectdemographic_table(mock_user, mock_database, mock_host):\n with testing.postgresql.Postgresql() as postgresql:\n # Mocking\n cred = postgresql.dsn()\n mock_host.return_value = cred['host'] + ':' + str(cred['port'])\n mock_database.return_value = cred['database']\n mock_user.return_value = cred['user']\n\n # Test luigi tasks\n w = luigi.worker.Worker()\n\n w.add(ProjectDemographicLoad(id=1, in_filename='tests/test_dimprojectdemographic.csv'))\n w.run()\n\n conn = psycopg2.connect(**cred)\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM public.dimprojectdemographic\")\n result = cur.fetchall()\n\n # Calculate from the csv file\n with open('tests/test_dimprojectdemographic.csv') as f:\n count = sum(1 for row in csv.reader(f))\n\n assert len(result) == count - 1\n", "sub_path": "tests/test_etl.py", "file_name": "test_etl.py", "file_ext": "py", "file_size_in_byte": 1249, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "testing.postgresql.postgresql.Postgresql", "line_number": 14, "usage_type": "call"}, {"api_name": "testing.postgresql.postgresql", "line_number": 14, "usage_type": "attribute"}, {"api_name": "testing.postgresql", "line_number": 14, "usage_type": "name"}, {"api_name": "luigi.worker.Worker", "line_number": 22, "usage_type": "call"}, {"api_name": "luigi.worker", "line_number": 22, "usage_type": "attribute"}, {"api_name": "etl.etl.ProjectDemographicLoad", "line_number": 24, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 27, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 34, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 10, "usage_type": "call"}, {"api_name": "unittest.mock.PropertyMock", "line_number": 10, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 11, "usage_type": "call"}, {"api_name": "unittest.mock.PropertyMock", "line_number": 11, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 12, "usage_type": "call"}, {"api_name": "unittest.mock.PropertyMock", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "312869607", "text": "from tqdm import tqdm\r\n\r\ndef binomial(n,k): #accepts two integers n,k\r\n #returns binom(n,k)\r\n valueN = 1\r\n valueD = 1\r\n if n < 0 or k < 0 or n < k:\r\n return 0\r\n else: #now 0 =< k =< n\r\n k = min(k,n-k)\r\n for i in range(k):\r\n valueN *= (n-i)\r\n valueD *= (1+i)\r\n return valueN//valueD\r\n\r\n\r\n#checking squarefree is equivalent to checking not divisible by square of any prime.\r\n#biggest prime factor available in here is 47.\r\nrelevantprimes = [2,3,5,7,11,13,17,19,23,29,31,37,41,43,47]\r\nrelevant = set()\r\n\r\nfor n in tqdm(range(51)): #go through all elements in pascal's triangle up to row 50\r\n for k in range(0,1+n//2): #rows are symmetric, so only need to do half\r\n test = binomial(n,k)\r\n flag = True\r\n for p in relevantprimes:\r\n if test % (p**2) == 0:\r\n flag = False\r\n break\r\n if flag:\r\n relevant.add(test)\r\n\r\nprint(len(relevant),sum(relevant))\r\n", "sub_path": "203.py", "file_name": "203.py", "file_ext": "py", "file_size_in_byte": 991, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "tqdm.tqdm", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "445924848", "text": "import os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\",'final.settings') # not final.settings.py !!!!\nimport django \ndjango.setup()\nfrom faker import Faker \nfa_fake_gen = Faker('fa_IR')\nfrom user_show.models import karbar\n# en_fake_gen = Faker()\n\ndef populate(n=6):\n for entry in range(n):\n fake_name = fa_fake_gen.name().split()\n\n # قبلا ایندکی ها \n # fake_fname = fake_name[0]\n # fake_lname = fake_name[1]\n # بودند اما برای اینکه سرکار و خانم و جناب و آقا دارد\n # ما ایندکس ها را به -1 و -2 تغییر میدهیم\n fake_fname = fake_name[-2]\n fake_lname = fake_name[-1]\n fake_email = fa_fake_gen.email()\n\n # create new entry now\n k = karbar.objects.get_or_create(fname=fake_fname,lname=fake_lname,email=fake_email)[0]\n\nif __name__ == \"__main__\":\n print('populating persian data')\n populate(20)\n print('persian fake data populated')\n\n\n# ============================================================================\n'''\n# read the docs\n# https://github.com/joke2k/faker\n\n# from faker import Faker\n# fake = Faker('it_IT')\n# for _ in range(10):\n# # print(fake.name())\n# ar_EG - Arabic (Egypt)\n# ar_PS - Arabic (Palestine)\n# ar_SA - Arabic (Saudi Arabia)\n# bg_BG - Bulgarian\n# bs_BA - Bosnian\n# cs_CZ - Czech\n# de_DE - German\n# dk_DK - Danish\n# el_GR - Greek\n# en_AU - English (Australia)\n# en_CA - English (Canada)\n# en_GB - English (Great Britain)\n# en_NZ - English (New Zealand)\n# en_US - English (United States)\n# es_ES - Spanish (Spain)\n# es_MX - Spanish (Mexico)\n# et_EE - Estonian\n# fa_IR - Persian (Iran)\n# fi_FI - Finnish\n# fr_FR - French\n# hi_IN - Hindi\n# hr_HR - Croatian\n# hu_HU - Hungarian\n# hy_AM - Armenian\n# it_IT - Italian\n# ja_JP - Japanese\n# ka_GE - Georgian (Georgia)\n# ko_KR - Korean\n# lt_LT - Lithuanian\n# lv_LV - Latvian\n# ne_NP - Nepali\n# nl_NL - Dutch (Netherlands)\n# no_NO - Norwegian\n# pl_PL - Polish\n# pt_BR - Portuguese (Brazil)\n# pt_PT - Portuguese (Portugal)\n# ro_RO - Romanian\n# ru_RU - Russian\n# sl_SI - Slovene\n# sv_SE - Swedish\n# tr_TR - Turkish\n# uk_UA - Ukrainian\n# zh_CN - Chinese (China)\n# zh_TW - Chinese (Taiwan)'''", "sub_path": "Django_b/final/z_populate_uzr.py", "file_name": "z_populate_uzr.py", "file_ext": "py", "file_size_in_byte": 2216, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "os.environ.setdefault", "line_number": 2, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 2, "usage_type": "attribute"}, {"api_name": "django.setup", "line_number": 4, "usage_type": "call"}, {"api_name": "faker.Faker", "line_number": 6, "usage_type": "call"}, {"api_name": "user_show.models.karbar.objects.get_or_create", "line_number": 24, "usage_type": "call"}, {"api_name": "user_show.models.karbar.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "user_show.models.karbar", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "505251385", "text": "import dcache\nimport dicom\nfrom aenum import Enum, auto\nimport logging\n\nclass DLVL(Enum):\n \"\"\"\n Enumerated DICOM service levels\n \"\"\"\n INSTANCES = auto()\n SERIES = auto()\n STUDIES = auto()\n PATIENTS = auto()\n\n\nclass Dixel(dcache.Persistent):\n\n @classmethod\n def read_dcm(cls, dcm_fp):\n logging.debug(\"Reading DCM file\")\n tags = dicom.read_file(dcm_fp, stop_before_pixels=True)\n data = {'PatientID': tags[0x0010, 0x0020].value,\n 'StudyInstanceUID': tags[0x0020, 0x000d].value,\n 'SeriesInstanceUID': tags[0x0020, 0x000e].value,\n 'SOPInstanceUID': tags[0x0008, 0x0018].value,\n 'TransferSyntaxUID': tags.file_meta.TransferSyntaxUID,\n 'MediaStorage': tags.file_meta.MediaStorageSOPClassUID,\n 'AccessionNumber': tags[0x0008, 0x0050].value,\n 'HasPixels': 'PixelData' in tags,\n 'FilePath': dcm_fp}\n return data\n\n\n @classmethod\n def check_orthanc(cls, oid, orthanc):\n pass\n\n\n def __init__(self, key, data=None, cache=None, init_fn=None, dlvl=DLVL.STUDIES):\n self.dlvl = dlvl\n super(Dixel, self).__init__(key, data, cache, init_fn)\n\n\n\nif __name__ == \"__main__\":\n\n logging.basicConfig(level=logging.DEBUG)\n\n R = dcache.RedisCache(db=15, clear=True)\n\n dcm_fp = \"/Users/derek/Desktop/Protect3/37/20000101-CT/Series-1/14409.254617228500857193993507647715712518182\"\n d = Dixel(dcm_fp, cache=R, dlvl=DLVL.INSTANCES, init_fn=Dixel.read_dcm)\n # With init function on key\n logging.debug(d.data)\n d.data['exists'] = True\n d.persist()\n\n Q = dcache.RedisCache(db=15)\n e = Dixel(dcm_fp, cache=Q, dlvl=DLVL.INSTANCES, init_fn=Dixel.read_dcm)\n logging.debug(e.data)\n\n # Read montage dixels\n M = dcache.CSVCache(fp=\"/Users/derek/Desktop/test_3.csv\",\n id_field=\"Accession Number\")\n for item in M.cache.itervalues():\n # logging.debug(item)\n f = Dixel(item[\"Accession Number\"], data=item, cache=R, dlvl=DLVL.STUDIES)\n f.data['Important thing'] = \"Important data\"\n f.persist()\n\n for item in M.cache.itervalues():\n f = Dixel(item[\"Accession Number\"], cache=R, dlvl=DLVL.STUDIES)\n logging.debug(f.data)\n\n # f = Dixel(oid, cache=Q,\n # init_fn=Dixel.check_orthanc, init_args=[orthanc, DLVL.SERIES])\n", "sub_path": "DianaFuture/dixel.py", "file_name": "dixel.py", "file_ext": "py", "file_size_in_byte": 2395, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "aenum.Enum", "line_number": 6, "usage_type": "name"}, {"api_name": "aenum.auto", "line_number": 10, "usage_type": "call"}, {"api_name": "aenum.auto", "line_number": 11, "usage_type": "call"}, {"api_name": "aenum.auto", "line_number": 12, "usage_type": "call"}, {"api_name": "aenum.auto", "line_number": 13, "usage_type": "call"}, {"api_name": "dcache.Persistent", "line_number": 16, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 20, "usage_type": "call"}, {"api_name": "dicom.read_file", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 47, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 47, "usage_type": "attribute"}, {"api_name": "dcache.RedisCache", "line_number": 49, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 54, "usage_type": "call"}, {"api_name": "dcache.RedisCache", "line_number": 58, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 60, "usage_type": "call"}, {"api_name": "dcache.CSVCache", "line_number": 63, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "81125038", "text": "\"\"\"Module for holding model data and returning it in a form useful for ML pipelines.\"\"\"\n\nfrom typing import Tuple, Optional, List\nfrom datetime import date\n\nimport pandas as pd\nfrom kedro.framework.context import KedroContext\nfrom kedro.framework.session import get_current_session\n\nfrom augury.types import YearRange\nfrom augury.settings import (\n INDEX_COLS,\n TRAIN_YEAR_RANGE,\n VALIDATION_YEAR_RANGE,\n)\n\n\nEND_OF_YEAR = f\"{date.today().year}-12-31\"\n\n\nclass MLData:\n \"\"\"Holds model data and returns it in a form useful for ML pipelines.\"\"\"\n\n def __init__(\n self,\n context: Optional[KedroContext] = None,\n data_set: str = \"full_data\",\n train_year_range: YearRange = TRAIN_YEAR_RANGE,\n test_year_range: YearRange = VALIDATION_YEAR_RANGE,\n index_cols: List[str] = INDEX_COLS,\n label_col: str = \"margin\",\n ) -> None:\n \"\"\"\n Instantiate an MLData object.\n\n Params\n ------\n context: Relevant context for loading data sets.\n data_set: Name of the data set to load.\n train_year_range: Year range (inclusive, exclusive per `range` function)\n for data to include in training sets.\n test_year_range: Year range (inclusive, exclusive per `range` function)\n for data to include in testing sets.\n index_cols: Column names to use for the DataFrame's index.\n label_col: Name of the column to use for data labels (i.e. y data set).\n \"\"\"\n self.context = context\n self._data_set = data_set\n self._train_year_range = train_year_range\n self._test_year_range = test_year_range\n self.index_cols = index_cols\n self.label_col = label_col\n self._data = pd.DataFrame()\n self._X_data = None\n self._y_data = None\n\n if self.context is None:\n session = get_current_session()\n assert session is not None\n self.context = session.load_context()\n\n @property\n def data(self) -> pd.DataFrame:\n \"\"\"Full data set stored in the given class instance.\"\"\"\n if self._data.empty:\n self._data = self._load_data()\n\n return self._data\n\n @property\n def train_data(self) -> Tuple[pd.DataFrame, pd.DataFrame]:\n \"\"\"Filter data by year to produce training data.\"\"\"\n if len(self.data.index.names) != 3:\n raise ValueError(\n \"The index of the data frame must have 3 levels. The expected indexes \"\n \"are ['team', 'year', 'round_number'], but the index names are: \"\n f\"{self.data.index.names}\"\n )\n\n train_year_range = range(*self.train_year_range)\n X_train = self._X.loc[(slice(None), train_year_range, slice(None)), :]\n y_train = self._y.loc[\n (\n slice(None),\n # Series can't slice by range, so we have to convert to a valid\n # slice (i.e. end inclusive, so subtract 1)\n slice(min(train_year_range), max(train_year_range)),\n slice(None),\n )\n ]\n\n return X_train, y_train\n\n @property\n def test_data(self) -> Tuple[pd.DataFrame, pd.DataFrame]:\n \"\"\"Filter data by year to produce test data.\"\"\"\n if len(self.data.index.names) != 3:\n raise ValueError(\n \"The index of the data frame must have 3 levels. The expected indexes \"\n \"are ['team', 'year', 'round_number'], but the index names are: \"\n f\"{self.data.index.names}\"\n )\n\n test_year_range = range(*self.test_year_range)\n X_test = self._X.loc[(slice(None), test_year_range, slice(None)), :]\n y_test = self._y.loc[\n (\n slice(None),\n # Series can't slice by range, so we have to convert to a valid\n # slice (i.e. end inclusive)\n slice(min(test_year_range), max(test_year_range)),\n slice(None),\n )\n ]\n\n return X_test, y_test\n\n @property\n def train_year_range(self) -> YearRange:\n \"\"\"Range of years for slicing training data.\"\"\"\n return self._train_year_range\n\n @train_year_range.setter\n def train_year_range(self, years: YearRange) -> None:\n self._train_year_range = years\n\n @property\n def test_year_range(self) -> YearRange:\n \"\"\"Range of years for slicing test data.\"\"\"\n return self._test_year_range\n\n @test_year_range.setter\n def test_year_range(self, years: YearRange) -> None:\n self._test_year_range = years\n\n @property\n def data_set(self) -> str:\n \"\"\"Name of the associated kedro data set.\"\"\"\n return self._data_set\n\n @data_set.setter\n def data_set(self, name: str) -> None:\n if self._data_set != name:\n self._data = pd.DataFrame()\n self._X_data = None\n self._y_data = None\n\n self._data_set = name\n\n def _load_data(self):\n data_frame = pd.DataFrame(self.context.catalog.load(self.data_set))\n\n # When loading date columns directly from JSON, we need to convert them\n # from string to datetime\n if \"date\" in list(data_frame.columns) and data_frame[\"date\"].dtype == \"object\":\n data_frame.loc[:, \"date\"] = pd.to_datetime(data_frame[\"date\"])\n\n return (\n data_frame.set_index(self.index_cols, drop=False)\n .rename_axis([None] * len(self.index_cols))\n .sort_index()\n )\n\n @property\n def _X(self) -> pd.DataFrame:\n if self._X_data is None:\n self._X_data = self._load_X()\n\n return self._X_data\n\n def _load_X(self) -> pd.DataFrame:\n labels = [\n \"(?:oppo_)?score\",\n \"(?:oppo_)?(?:team_)?behinds\",\n \"(?:oppo_)?(?:team_)?goals\",\n \"(?:oppo_)?margin\",\n \"(?:oppo_)?result\",\n ]\n label_cols = self.data.filter(regex=f\"^{'$|^'.join(labels)}$\").columns\n features = self.data.drop(label_cols, axis=1)\n\n numeric_features = features.select_dtypes(\"number\").astype(float)\n categorical_features = features.select_dtypes(exclude=[\"number\"])\n\n # Sorting columns with categorical features first to allow for positional indexing\n # for some data transformations further down the pipeline.\n # We sort each column group alphabetically to guarantee the same column order\n # as long as the columns are the same.\n return pd.concat(\n [\n categorical_features[sorted(categorical_features.columns)],\n numeric_features[sorted(numeric_features.columns)],\n ],\n axis=1,\n )\n\n @property\n def _y(self) -> pd.Series:\n if self._y_data is None:\n self._y_data = self._load_y()\n\n return self._y_data\n\n def _load_y(self) -> pd.Series:\n return self.data[self.label_col]\n", "sub_path": "src/augury/ml_data.py", "file_name": "ml_data.py", "file_ext": "py", "file_size_in_byte": 6957, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "datetime.date.today", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 26, "usage_type": "name"}, {"api_name": "kedro.framework.context.KedroContext", "line_number": 26, "usage_type": "name"}, {"api_name": "augury.types.YearRange", "line_number": 28, "usage_type": "name"}, {"api_name": "augury.types.YearRange", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 30, "usage_type": "name"}, {"api_name": "augury.settings.TRAIN_YEAR_RANGE", "line_number": 28, "usage_type": "name"}, {"api_name": "augury.settings.VALIDATION_YEAR_RANGE", "line_number": 29, "usage_type": "name"}, {"api_name": "augury.settings.INDEX_COLS", "line_number": 30, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 53, "usage_type": "call"}, {"api_name": "kedro.framework.session.get_current_session", "line_number": 58, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 63, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 71, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 71, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 95, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 95, "usage_type": "attribute"}, {"api_name": "augury.types.YearRange", "line_number": 119, "usage_type": "name"}, {"api_name": "augury.types.YearRange", "line_number": 124, "usage_type": "name"}, {"api_name": "augury.types.YearRange", "line_number": 128, "usage_type": "name"}, {"api_name": "augury.types.YearRange", "line_number": 133, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 144, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 151, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 156, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 165, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 189, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 171, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 198, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 204, "usage_type": "attribute"}]} +{"seq_id": "422227287", "text": "import re\nimport base64\nfrom urllib.parse import unquote\nfrom itemloaders.processors import TakeFirst, MapCompose\nfrom scrapy.loader import ItemLoader\nfrom scrapy import Selector\nfrom .items import YoulaAutoItem, HHVacancyItem, HHCompaniesItem\n\ndef search_owner_phone(itm):\n find_owner_phone = re.compile(r\"phone%22%2C%22([0-9a-zA-Z]{33}w%3D%3D)%22%2C%22time\")\n phone_encoded = re.findall(find_owner_phone, itm)\n # code-decode-code encryption))\n phone_decoded = base64.b64decode(base64.b64decode(unquote(phone_encoded[0]).encode('utf-8')))\n phone = phone_decoded.decode('utf-8')\n return phone\n\ndef search_owner_id(itm):\n owner_id = re.compile(r\"youlaId%22%2C%22([0-9a-zA-Z]+)%22%2C%22avatar\")\n dealer_id = re.compile(\"page%22%2C%22(https%3A%2F%2Fam.ru%2Fcardealers%2F[0-9a-zA-Z\\S]+%2F%23info)\"\n \"%22%2C%22salePointLogo\")\n owner = re.findall(owner_id, itm)\n dealer = re.findall(dealer_id, itm)\n #if not private person, but dealer condition:\n owner = f'https://youla.ru/user/{owner[0]}' if owner else unquote(dealer[0])\n return owner\n\n\ndef get_tech_data(itm):\n tech_data = Selector(text=itm)\n return {tech_data.xpath('//div/div[1]/text()').get(): tech_data.xpath('//div/div[2]//text()').get()}\n\n\ndef tech_data_out(itms):\n tech_data = {itm for itm in itms if None not in itm}\n return tech_data\n\n\nclass YoulaAutoLoader(ItemLoader):\n default_item_class = YoulaAutoItem\n title_out = TakeFirst()\n url_out = TakeFirst()\n tech_data_in = MapCompose(get_tech_data)\n tech_data_out = tech_data_out\n description_out = TakeFirst()\n owner_in = MapCompose(search_owner_id)\n owner_out = TakeFirst()\n phone_num_in = MapCompose(search_owner_phone)\n phone_num_out = TakeFirst()\n\n\ndef symbols_delete(itm):\n result = itm.replace('\\xa0', '')\n return result\n\n\ndef list_to_string_with_space(itm):\n result = ' '.join(itm)\n return symbols_delete(result)\n\n\ndef create_owner_url(itm):\n result = f'https://hh.ru{itm[0]}'\n return result\n\nclass HHVacancyLoader(ItemLoader):\n default_item_class = HHVacancyItem\n url_out = TakeFirst()\n title_out = TakeFirst()\n salary_in = list_to_string_with_space\n salary_out = TakeFirst()\n description_in = list_to_string_with_space\n description_out = TakeFirst()\n skills_out = MapCompose(symbols_delete)\n owner_url_in = create_owner_url\n owner_url_out = TakeFirst()\n\n\nclass HHCompaniesLoader(ItemLoader):\n default_item_class = HHCompaniesItem\n url_out = TakeFirst()\n title_in = list_to_string_with_space\n title_out = TakeFirst()\n company_url_out = TakeFirst()\n field_of_work_out = MapCompose(symbols_delete)\n description_in = list_to_string_with_space\n description_out = TakeFirst()\n\n\ndef get_tech_data(itm):\n tech_data = Selector(text=itm)\n return {tech_data.xpath('//div/div[1]/text()').get(): tech_data.xpath('//div/div[2]//text()').get()}\n\n\ndef tech_data_out(itms):\n tech_data = {}\n for itm in itms:\n if None not in itm:\n tech_data.update(itm)\n return tech_data\n", "sub_path": "lesson4/gbpars/loaders.py", "file_name": "loaders.py", "file_ext": "py", "file_size_in_byte": 3085, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "re.compile", "line_number": 10, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 11, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 13, "usage_type": "call"}, {"api_name": "urllib.parse.unquote", "line_number": 13, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 18, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 19, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 21, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 22, "usage_type": "call"}, {"api_name": "urllib.parse.unquote", "line_number": 24, "usage_type": "call"}, {"api_name": "scrapy.Selector", "line_number": 29, "usage_type": "call"}, {"api_name": "scrapy.loader.ItemLoader", "line_number": 38, "usage_type": "name"}, {"api_name": "items.YoulaAutoItem", "line_number": 39, "usage_type": "name"}, {"api_name": "itemloaders.processors.TakeFirst", "line_number": 40, "usage_type": "call"}, {"api_name": "itemloaders.processors.TakeFirst", "line_number": 41, "usage_type": "call"}, {"api_name": "itemloaders.processors.MapCompose", "line_number": 42, "usage_type": "call"}, {"api_name": "itemloaders.processors.TakeFirst", "line_number": 44, "usage_type": "call"}, {"api_name": "itemloaders.processors.MapCompose", "line_number": 45, "usage_type": "call"}, {"api_name": "itemloaders.processors.TakeFirst", "line_number": 46, "usage_type": "call"}, {"api_name": "itemloaders.processors.MapCompose", "line_number": 47, "usage_type": "call"}, {"api_name": "itemloaders.processors.TakeFirst", "line_number": 48, "usage_type": "call"}, {"api_name": "scrapy.loader.ItemLoader", "line_number": 65, "usage_type": "name"}, {"api_name": "items.HHVacancyItem", "line_number": 66, "usage_type": "name"}, {"api_name": "itemloaders.processors.TakeFirst", "line_number": 67, "usage_type": "call"}, {"api_name": "itemloaders.processors.TakeFirst", "line_number": 68, "usage_type": "call"}, {"api_name": "itemloaders.processors.TakeFirst", "line_number": 70, "usage_type": "call"}, {"api_name": "itemloaders.processors.TakeFirst", "line_number": 72, "usage_type": "call"}, {"api_name": "itemloaders.processors.MapCompose", "line_number": 73, "usage_type": "call"}, {"api_name": "itemloaders.processors.TakeFirst", "line_number": 75, "usage_type": "call"}, {"api_name": "scrapy.loader.ItemLoader", "line_number": 78, "usage_type": "name"}, {"api_name": "items.HHCompaniesItem", "line_number": 79, "usage_type": "name"}, {"api_name": "itemloaders.processors.TakeFirst", "line_number": 80, "usage_type": "call"}, {"api_name": "itemloaders.processors.TakeFirst", "line_number": 82, "usage_type": "call"}, {"api_name": "itemloaders.processors.TakeFirst", "line_number": 83, "usage_type": "call"}, {"api_name": "itemloaders.processors.MapCompose", "line_number": 84, "usage_type": "call"}, {"api_name": "itemloaders.processors.TakeFirst", "line_number": 86, "usage_type": "call"}, {"api_name": "scrapy.Selector", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "452516040", "text": "#!/usr/bin/python2.7\n\"\"\"\n File name: control_rate_pwm_auv_state.py\n Author: skconan\n Date created: 2018/10/29\n Python Version: 2.7\n Description: change rate\n\"\"\"\n\nfrom sensor_msgs.msg import Imu\nfrom std_msgs.msg import Float64\nfrom hg_ros_pololu.msg import Pwm\nfrom geometry_msgs.msg import Twist\nfrom nav_msgs.msg import Odometry\nimport rospy\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport rospkg\nqueue = []\n# pub = rospy.Publisher(\"/gx4_45_imu/data\", Imu, queue_size=1)\n# pub = rospy.Publisher(\"/pwm_auv_state\", Float64[], queue_size=1)\ncount = 0\n# r = 0.05\npath = rospkg.RosPack().get_path('zeabus_sensor') + '/scripts'\nrate = 1/20.\nf = open(path+'/auv_state_pwm_00.csv','w+')\npwm = Pwm()\npwm.pwm = [1500]*8\nauv_state = Odometry()\nimu = Imu()\nget_data = True\n\npub_pwm = rospy.Publisher(\"/pwm/sub_sampling\", Pwm, queue_size=1)\npub_imu = rospy.Publisher(\"/imu/sub_sampling\", Imu, queue_size=1)\npub_state = rospy.Publisher(\"/state/sub_sampling\", Odometry, queue_size=1)\n\ndef change_rate():\n global rate,auv_state,pwm, count, f, get_data, imu\n pub_imu.publish(imu)\n pub_pwm.publish(pwm)\n pub_state.publish(auv_state)\n string = str(imu.linear_acceleration.x) + \", \" + str(imu.linear_acceleration.y) + \"\\n\"\n f.write(string)\n rospy.sleep(rate)\n\ndef pwm_callback(msg):\n global pwm, get_data\n # print(msg,get_data)\n # if msg is None:\n # get_data = False\n if get_data:\n print(\"getdata PWM\")\n for i in range(8):\n pwm.pwm[i]= (msg.pwm[i])\n\ndef auv_state_callback(msg):\n global auv_state, get_data\n # print(msg,get_data)\n # if msg is None:\n # get_data = False\n if get_data:\n print(\"getdata AUV STATE\")\n auv_state.twist.twist.linear.x = (msg.twist.twist.linear.x)\n auv_state.twist.twist.linear.y = (msg.twist.twist.linear.y)\n\ndef imu_callback(msg):\n global imu, get_data\n if msg is None:\n get_data = False\n if get_data:\n print(\"getdata IMU\")\n # imu.linear_acceleration.x = msg.linear_acceleration.z\n # imu.linear_acceleration.y = msg.linear_acceleration.x\n imu.linear_acceleration.x = msg.linear_acceleration.x\n imu.linear_acceleration.y = msg.linear_acceleration.y\n\nif __name__ == '__main__':\n rospy.init_node('SubSampling')\n # rospy.Subscriber(\"/gx4_45_imu/data\", Imu, imu_callback)\n rospy.Subscriber(\"/imu/data\", Imu, imu_callback)\n rospy.Subscriber(\"/auv/state\", Odometry, auv_state_callback)\n rospy.Subscriber(\"/pwm\", Pwm, pwm_callback)\n # print(\"IMU FILTER\")\n # rospy.spin()\n while not rospy.is_shutdown():\n # get_data = False\n change_rate()\n # get_data = True\n\n", "sub_path": "zeabus_sensor/scripts/sampling_data.py", "file_name": "sampling_data.py", "file_ext": "py", "file_size_in_byte": 2684, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "rospkg.RosPack", "line_number": 24, "usage_type": "call"}, {"api_name": "hg_ros_pololu.msg.Pwm", "line_number": 27, "usage_type": "call"}, {"api_name": "nav_msgs.msg.Odometry", "line_number": 29, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.Imu", "line_number": 30, "usage_type": "call"}, {"api_name": "rospy.Publisher", "line_number": 33, "usage_type": "call"}, {"api_name": "hg_ros_pololu.msg.Pwm", "line_number": 33, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 34, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.Imu", "line_number": 34, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 35, "usage_type": "call"}, {"api_name": "nav_msgs.msg.Odometry", "line_number": 35, "usage_type": "argument"}, {"api_name": "rospy.sleep", "line_number": 44, "usage_type": "call"}, {"api_name": "rospy.init_node", "line_number": 78, "usage_type": "call"}, {"api_name": "rospy.Subscriber", "line_number": 80, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.Imu", "line_number": 80, "usage_type": "argument"}, {"api_name": "rospy.Subscriber", "line_number": 81, "usage_type": "call"}, {"api_name": "nav_msgs.msg.Odometry", "line_number": 81, "usage_type": "argument"}, {"api_name": "rospy.Subscriber", "line_number": 82, "usage_type": "call"}, {"api_name": "hg_ros_pololu.msg.Pwm", "line_number": 82, "usage_type": "argument"}, {"api_name": "rospy.is_shutdown", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "611607300", "text": "import datetime\r\n\r\nfrom .inventoryFunctions import *\r\nimport time\r\ntime.strftime('%Y-%m-%d %H:%M:%S')\r\n\r\ndef getCurrentRiceTypes():\r\n sql = '''SELECT * FROM RICE_TYPE WHERE AVAILABLE = 1;'''\r\n cur = conn.execute(sql)\r\n return createRiceTypeArr(cur)\r\n\r\ndef getAllRiceTypes():\r\n sql = '''SELECT * FROM RICE_TYPE'''\r\n cur = conn.execute(sql)\r\n return createRiceTypeArr(cur)\r\n\r\n\r\ndef getCurrentSauceTypes():\r\n sql = '''SELECT * FROM SAUCE_TYPE WHERE AVAILABLE = 1;'''\r\n cur = conn.execute(sql)\r\n return createSauceTypeArr(cur)\r\n\r\ndef getAllSauceTypes():\r\n sql = '''SELECT * FROM SAUCE_TYPE'''\r\n cur = conn.execute(sql)\r\n return createSauceTypeArr(cur)\r\n\r\n\r\ndef getCurrentIngredientTypes():\r\n sql = '''SELECT * FROM INGREDIENT_TYPE WHERE AVAILABLE = 1;'''\r\n cur = conn.execute(sql)\r\n return createIngredientTypeArr(cur)\r\n\r\ndef getAllIngredientTypes():\r\n sql = '''SELECT * FROM INGREDIENT_TYPE'''\r\n cur = conn.execute(sql)\r\n return createIngredientTypeArr(cur)\r\n\r\ndef placeOrder(order):\r\n orderNumber = getStateCounter('currentOrderNumber')\r\n sql = 'INSERT INTO ORDERS(ORDER_NUMBER, ORDER_NAME, ORDER_PRICE, PICKUP_TIME, STATUS_CODE) VALUES(?, ?, ?, ?, ?)'\r\n\r\n # get order time\r\n if order.pickupTime == None:\r\n orderTime = datetime.datetime.now() + datetime.timedelta(minutes = 30)\r\n order.pickupTime = orderTime\r\n\r\n # get status code\r\n statusCode = 0\r\n if order.statusCode == \"waiting\":\r\n statucCode = 0\r\n elif order.statusCode == \"readyForPickup\":\r\n statucCode = 1\r\n elif order.statusCode == 'completed':\r\n statusCode = 2\r\n elif order.statusCode == 'canceled':\r\n statusCode = 3\r\n\r\n conn.execute(sql, (orderNumber, order.orderName, order.orderPrice, order.pickupTime, statusCode))\r\n addBowlsToOrder(orderNumber, order.bowls)\r\n addAppetizersToOrder(orderNumber, order.appetizers)\r\n\r\n conn.commit()\r\n\r\ndef addBowlsToOrder(orderNumber, bowls):\r\n sql = 'INSERT INTO BOWLS(ORDER_NUMBER, BOWL_NUMBER, BOWL_PRICE, BOWL_SIZE, RICE_TYPE, SAUCE_TYPE) VALUES(?, ?, ?, ?, ?, ?)'\r\n\r\n for i in range(len(bowls)):\r\n bowl = bowls[i]\r\n conn.execute(sql, (orderNumber, i, bowl.bowlPrice, bowl.getSizeNumber(), bowl.riceType, bowl.sauceType))\r\n addIngrediantsToOrder(orderNumber, i, bowl.ingredients)\r\n\r\ndef addIngrediantsToOrder(orderNumber, bowlNumber, ingredients):\r\n sql = 'INSERT INTO BOWL_INGREDIENTS(ORDER_NUMBER, BOWL_NUMBER, INGREDIENT_TYPE) VALUES(?, ?, ?)'\r\n\r\n for i in range(len(ingredients)):\r\n ing = ingredients[i]\r\n conn.execute(sql, (orderNumber, bowlNumber, ing))\r\n\r\ndef addAppetizersToOrder(orderNumber, appetizers):\r\n sql = 'INSERT INTO APPETIZERS(ORDER_NUMBER, APPETIZER_NUMBER, NUMBER_ORDERED) VALUES(?, ?, 1)'\r\n\r\n for i in range(len(appetizers)):\r\n app = appetizers[i]\r\n conn.execute(sql, (orderNumber, app))\r\n\r\n\r\ndef getOrder(orderNumber):\r\n sql = 'SELECT * FROM ORDERS WHERE ORDER_NUMBER = ?'\r\n cur = conn.execute(sql, (orderNumber, ))\r\n orderNumber, orderName, orderPrice, pickupTime, statusCode = cur.fetchone()\r\n\r\n order = Order(orderNumber, orderNumber, orderPrice, pickupTime, statusCode)\r\n\r\n bowlsSql = 'SELECT * FROM BOWLS WHERE ORDER_NUMBER = ?'\r\n order.bowls = getBowls(orderNumber)\r\n order.appetizers = getAppetizers(orderNumber)\r\n order.payment = getPayments(orderNumber)\r\n return order\r\n\r\ndef getBowls(orderNumber):\r\n sql = 'SELECT * FROM BOWLS WHERE ORDER_NUMBER = ?'\r\n cur = conn.execute(sql, (orderNumber, ))\r\n arr = cur.fetchall()\r\n out = []\r\n for element in arr:\r\n orderNumber, bowlNumber, bowlPrice, bowlSize, riceType, sauceType = element\r\n bowl = Bowl(orderNumber, bowlNumber, bowlPrice, bowlSize, riceType, sauceType)\r\n bowl.ingredients = getIngredients(orderNumber, bowlNumber)\r\n out = out + [bowl]\r\n return out\r\n\r\ndef getIngredients(orderNumber, bowlNumber):\r\n sql = 'SELECT INGREDIENT_TYPE FROM BOWL_INGREDIENTS WHERE ORDER_NUMBER = ? AND BOWL_NUMBER = ?'\r\n cur = conn.execute(sql, (orderNumber, bowlNumber))\r\n arr = cur.fetchall()\r\n return arr\r\n\r\ndef getAppetizers(orderNumber):\r\n sql = 'SELECT APPETIZER_NUMBER FROM APPETIZERS WHERE ORDER_NUMBER = ?'\r\n cur = conn.execute(sql, (orderNumber, ))\r\n arr = cur.fetchall()\r\n return arr\r\n\r\ndef getPayments(orderNumber):\r\n sql = 'SELECT PAYMENT_TYPE FROM PAYMENTS WHERE ORDER_NUMBER = ?'\r\n cur = conn.execute(sql, (orderNumber, ))\r\n type = cur.fetchone()\r\n if type is None:\r\n return None\r\n else:\r\n type = type[0]\r\n if type == 0:\r\n return Payment('cash')\r\n else:\r\n return Payment('card', getCardInfo(orderNumber))\r\n\r\ndef getCardInfo(orderNumber):\r\n sql = 'SELECT * FROM CARD_INFO WHERE ORDER_NUMBER = ?'\r\n cur = conn.execute(sql, (orderNumber, ))\r\n orderNumber, firstName, lastName, address, cardNumber, ccv, zipCode = cur.fetchone()\r\n return CardInfo(orderNumber, firstName, lastName, address, cardNumber, ccv, zip)\r\n\r\n\r\ndef getActiveOrders():\r\n sql = 'SELECT ORDER_NUMBER FROM ORDERS WHERE STATUS_CODE = 0'\r\n cur = conn.execute(sql)\r\n arr = cur.fetchall()\r\n return unwrapOrders(arr)\r\n\r\ndef unwrapOrders(arr):\r\n out = []\r\n for element in arr:\r\n out = out + [getOrder(element[0])]\r\n return out\r\n\r\n\r\n\r\n\r\n\r\ndef setOrderStatus(orderNumber, status):\r\n statusCode = Order.getStatusCode(status)\r\n sql = 'UPDATE ORDERS SET STATUS_CODE = ? WHERE ORDER_NUMBER = ?;'\r\n conn.execute(sql, (statusCode, orderNumber))\r\n conn.commit()\r\n\r\n\r\n\r\n\r\n\r\ndef setOrderPayment(orderNumber, payment):\r\n if payment.paymentType == \"cash\":\r\n setCashPayment(orderNumber)\r\n else:\r\n setCardPayment(orderNumber, payment.cardInfo)\r\n\r\ndef setCashPayment(orderNumber):\r\n sql = 'INSERT INTO PAYMENTS(ORDER_NUMBER, PAYMENT_TYPE) VALUES(?, 0);'\r\n conn.execute(sql, (orderNumber, ))\r\n conn.commit()\r\n\r\ndef setCardPayment(orderNumber, cardInfo):\r\n sql = 'INSERT INTO PAYMENTS(ORDER_NUMBER, PAYMENT_TYPE) VALUES(?, 1);'\r\n conn.execute(sql, (orderNumber, ))\r\n\r\n sql2 = 'INSERT INTO CARD_INFO(ORDER_NUMBER, F_NAME, L_NAME, ADDRESS, CARD_NUMBER, CCV, CARD_ZIP) VALUES(?, ?, ?, ?, ?, ?, ?);'\r\n conn.execute(sql2, (orderNumber, cardInfo.firstName, cardInfo.lastName, cardInfo.address, cardInfo.cardNumber, cardInfo.ccv, cardInfo.zipCode))\r\n conn.commit()\r\n\r\n\r\ndef placeDebugOrder():\r\n order = Order(0, \"Reynolds\", 10, None, \"waiting\")\r\n order.bowls = [\r\n Bowl(bowlPrice=5, ingredients=[0, 1]),\r\n Bowl(bowlPrice=5, sauceType = 1, ingredients=[1])\r\n ]\r\n order.appetizers = [1]\r\n placeOrder(order)\r\n", "sub_path": "officialProj/Project/orderingFunctions.py", "file_name": "orderingFunctions.py", "file_ext": "py", "file_size_in_byte": 6691, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "time.strftime", "line_number": 5, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 45, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "639834325", "text": "#!/usr/bin/env python\n\nimport sys\nimport os\nimport sqlite3\nimport shutil\nimport argparse\n\nimport time\nfrom datetime import datetime\nfrom pytz import timezone\nimport tempfile\nimport myfunc\nimport webserver_common as webcom\n\nFORMAT_DATETIME = webcom.FORMAT_DATETIME\nTZ = webcom.TZ\n\nprogname=os.path.basename(sys.argv[0])\nrootname_progname = os.path.splitext(progname)[0]\nlockname = os.path.realpath(__file__).replace(\" \", \"\").replace(\"/\", \"-\")\nimport fcntl\nlock_file = \"/tmp/%s.lock\"%(lockname)\nfp = open(lock_file, 'w')\ntry:\n fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)\nexcept IOError:\n print >> sys.stderr, \"Another instance of %s is running\"%(progname)\n sys.exit(1)\n\n\nrundir = os.path.dirname(os.path.realpath(__file__))\nbasedir = os.path.realpath(\"%s/..\"%(rundir)) # path of the application, i.e. pred/\npath_log = \"%s/static/log\"%(basedir)\npath_stat = \"%s/stat\"%(path_log)\npath_result = \"%s/static/result\"%(basedir)\npath_cache = \"%s/static/result/cache\"%(basedir)\ngen_logfile = \"%s/%s.log\"%(path_log, progname)\ngen_errfile = \"%s/%s.err\"%(path_log, progname)\n\ndef CleanCachedResult(MAX_KEEP_DAYS):# {{{\n \"\"\"Clean out-dated cached result\"\"\"\n db = \"%s/cached_job_finished_date.sqlite3\"%(path_log)\n tmpdb = tempfile.mktemp(prefix=\"%s_\"%(db))\n\n msg = \"copy db (%s) to tmpdb (%s)\"%(db, tmpdb)\n date_str = time.strftime(FORMAT_DATETIME)\n myfunc.WriteFile(\"[%s] %s\\n\"%(date_str, msg), gen_logfile, \"a\", True)\n try:\n shutil.copyfile(db,tmpdb)\n except Exception as e:\n myfunc.WriteFile(\"[%s] %s\\n\"%(date_str, str(e)), gen_errfile, \"a\", True)\n return 1\n\n md5listfile = \"%s/cache_to_delete.md5list\"%(path_log)\n con = sqlite3.connect(tmpdb)\n msg = \"output the outdated md5 list to %s\"%(md5listfile)\n date_str = time.strftime(FORMAT_DATETIME)\n myfunc.WriteFile(\"[%s] %s\\n\"%(date_str, msg), gen_logfile, \"a\", True)\n\n tablename = \"data\"\n\n with con:\n cur = con.cursor()\n fpout = open(md5listfile, \"w\")\n nn_mag = cur.execute(\"SELECT md5, date_finish FROM %s\"%(tablename))\n cnt = 0 \n chunk_size = 1000\n while True:\n result = nn_mag.fetchmany(chunk_size)\n if not result:\n break\n else:\n for row in result:\n cnt += 1\n md5_key = row[0]\n finish_date_str = row[1]\n finish_date = webcom.datetime_str_to_time(finish_date_str)\n current_time = datetime.now(timezone(TZ))\n timeDiff = current_time - finish_date\n if timeDiff.days > MAX_KEEP_DAYS:\n fpout.write(\"%s\\n\"%(md5_key))\n fpout.close()\n\n\n # delete cached result folder and delete the record\n msg = \"Delete cached result folder and delete the record\"\n date_str = time.strftime(FORMAT_DATETIME)\n myfunc.WriteFile(\"[%s] %s\\n\"%(date_str, msg), gen_logfile, \"a\", True)\n\n hdl = myfunc.ReadLineByBlock(md5listfile)\n lines = hdl.readlines()\n cnt = 0\n while lines != None:\n for line in lines:\n line = line.strip()\n if line != \"\":\n cnt += 1\n md5_key = line\n\n subfoldername = md5_key[:2]\n cachedir = \"%s/%s/%s\"%(path_cache, subfoldername, md5_key)\n zipfile_cache = cachedir + \".zip\"\n date_str = time.strftime(FORMAT_DATETIME)\n if os.path.exists(zipfile_cache):\n try:\n os.remove(zipfile_cache)\n msg = \"rm %s\"%(zipfile_cache)\n myfunc.WriteFile(\"[%s] %s\\n\"%(date_str, msg), gen_logfile, \"a\", True)\n cmd_d = \"DELETE FROM %s WHERE md5 = '%s'\"%(tablename, md5_key)\n cur.execute(cmd_d)\n except Exception as e:\n myfunc.WriteFile(\"[%s] %s\\n\"%(date_str, str(e)), gen_errfile, \"a\", True)\n pass\n\n lines = hdl.readlines()\n hdl.close()\n\n msg = \"VACUUM the database %s\"%(tmpdb)\n date_str = time.strftime(FORMAT_DATETIME)\n myfunc.WriteFile(\"[%s] %s\\n\"%(date_str, msg), gen_logfile, \"a\", True)\n cur.execute(\"VACUUM\")\n\n # copy back\n msg = \"cp tmpdb (%s) -> db (%s)\"%(tmpdb, db)\n date_str = time.strftime(FORMAT_DATETIME)\n myfunc.WriteFile(\"[%s] %s\\n\"%(date_str, msg), gen_logfile, \"a\", True)\n try:\n shutil.copyfile(tmpdb, db)\n except Exception as e:\n myfunc.WriteFile(\"[%s] %s\\n\"%(date_str, str(e)), gen_errfile, \"a\", True)\n return 1\n\n msg = \"delete tmpdb (%s)\"%(tmpdb)\n date_str = time.strftime(FORMAT_DATETIME)\n myfunc.WriteFile(\"[%s] %s\\n\"%(date_str, msg), gen_logfile, \"a\", True)\n try:\n os.remove(tmpdb)\n except Exception as e:\n myfunc.WriteFile(\"[%s] %s\\n\"%(date_str, str(e)), gen_errfile, \"a\", True)\n return 1\n\n\n# }}}\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Clean outdated cached results',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog='''\\\nCreated 2018-10-21, updated 2018-10-21, Nanjiang Shu\n\nExamples:\n %s -max-keep-day 360\n'''%(sys.argv[0]))\n parser.add_argument('-max-keep-day' , metavar='INT', dest='max_keep_days',\n default=360, type=int, required=False,\n help='The age of the cached result to be kept, (default: 360)')\n parser.add_argument('-v', dest='verbose', nargs='?', type=int, default=0, const=1, \n help='show verbose information, (default: 0)')\n\n args = parser.parse_args()\n\n MAX_KEEP_DAYS = args.max_keep_days\n verbose=args.verbose\n\n print (\"MAX_KEEP_DAYS = %d\\n\"%MAX_KEEP_DAYS)\n rtvalue = CleanCachedResult(MAX_KEEP_DAYS)\n sys.exit(rtvalue)\n\n", "sub_path": "proj/pred/app/clean_cached_result.py", "file_name": "clean_cached_result.py", "file_ext": "py", "file_size_in_byte": 5948, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "webserver_common.FORMAT_DATETIME", "line_number": 16, "usage_type": "attribute"}, {"api_name": "webserver_common.TZ", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "fcntl.lockf", "line_number": 26, "usage_type": "call"}, {"api_name": "fcntl.LOCK_EX", "line_number": 26, "usage_type": "attribute"}, {"api_name": "fcntl.LOCK_NB", "line_number": 26, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 28, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "tempfile.mktemp", "line_number": 44, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 47, "usage_type": "call"}, {"api_name": "myfunc.WriteFile", "line_number": 48, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 50, "usage_type": "call"}, {"api_name": "myfunc.WriteFile", "line_number": 52, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 56, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 58, "usage_type": "call"}, {"api_name": "myfunc.WriteFile", "line_number": 59, "usage_type": "call"}, {"api_name": "webserver_common.datetime_str_to_time", "line_number": 78, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 79, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 79, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 79, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 88, "usage_type": "call"}, {"api_name": "myfunc.WriteFile", "line_number": 89, "usage_type": "call"}, {"api_name": "myfunc.ReadLineByBlock", "line_number": 91, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 107, "usage_type": "call"}, {"api_name": "myfunc.WriteFile", "line_number": 109, "usage_type": "call"}, {"api_name": "myfunc.WriteFile", "line_number": 113, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 120, "usage_type": "call"}, {"api_name": "myfunc.WriteFile", "line_number": 121, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 126, "usage_type": "call"}, {"api_name": "myfunc.WriteFile", "line_number": 127, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 129, "usage_type": "call"}, {"api_name": "myfunc.WriteFile", "line_number": 131, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 135, "usage_type": "call"}, {"api_name": "myfunc.WriteFile", "line_number": 136, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 138, "usage_type": "call"}, {"api_name": "myfunc.WriteFile", "line_number": 140, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 147, "usage_type": "call"}, {"api_name": "argparse.RawDescriptionHelpFormatter", "line_number": 149, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 155, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 169, "usage_type": "call"}]} +{"seq_id": "203802526", "text": "import feature\nimport video_game_model\nimport tensorflow as tf\nfrom tensorflow_recommenders_addons import dynamic_embedding as de\n\nfrom absl import flags\nfrom absl import app\n\nflags.DEFINE_integer('batch_size', 64, 'Batch size.')\nflags.DEFINE_integer('num_steps', 500, 'Number of training steps.')\nflags.DEFINE_integer('embedding_size', 4, 'Embedding size.')\nflags.DEFINE_integer('shuffle_size', 3000,\n 'Shuffle pool size for input examples.')\nflags.DEFINE_integer('reserved_features', 30000,\n 'Number of reserved features in embedding.')\nflags.DEFINE_string('export_dir', './export_dir', 'Directory to export model.')\nflags.DEFINE_string('mode', 'train', 'Select the running mode: train or test.')\n\nFLAGS = flags.FLAGS\n\n\ndef train(num_steps):\n \"\"\"\n Do trainnig and produce model.\n \"\"\"\n\n # Create a model\n model = video_game_model.VideoGameDnn(batch_size=FLAGS.batch_size,\n embedding_size=FLAGS.embedding_size)\n\n # Get data iterator\n iterator = feature.initialize_dataset(batch_size=FLAGS.batch_size,\n split='train',\n shuffle_size=FLAGS.shuffle_size,\n skips=0,\n balanced=True)\n\n # Run training.\n try:\n for step in range(num_steps):\n features, labels = feature.input_fn(iterator)\n loss, auc = model.train(features, labels)\n\n # To avoid too many features burst the memory, we restrict\n # the model embedding layer to `reserved_features` features.\n # And the restriction behavior will be triggered when it gets\n # over `reserved_features * 1.2`.\n model.embedding_store.restrict(FLAGS.reserved_features,\n trigger=int(FLAGS.reserved_features * 1.2))\n\n if step % 10 == 0:\n print('step: {}, loss: {}, var_size: {}, auc: {}'.format(\n step, loss, model.embedding_store.size(), auc))\n\n except tf.errors.OutOfRangeError:\n print('Run out the training data.')\n\n # Set TFRA ops become legit.\n options = tf.saved_model.SaveOptions(namespace_whitelist=['TFRA'])\n\n # Save the model for inference.\n inference_model = video_game_model.VideoGameDnnInference(model)\n inference_model(feature.input_fn(iterator)[0])\n inference_model.save('export', signatures=None, options=options)\n\n\ndef test(num_steps):\n \"\"\"\n Use some sampels to test the accuracy of model prediction.\n \"\"\"\n\n # Load model.\n options = tf.saved_model.SaveOptions(namespace_whitelist=['TFRA'])\n model = tf.saved_model.load('export', tags='serve', options=options)\n sig = model.signatures['serving_default']\n\n # Get data iterator\n iterator = feature.initialize_dataset(batch_size=FLAGS.batch_size,\n split='train',\n shuffle_size=0,\n skips=100000)\n\n # Do tests.\n for step in range(num_steps):\n features, labels = feature.input_fn(iterator)\n probabilities = sig(features)['output_1']\n probabilities = tf.reshape(probabilities, (-1))\n preds = tf.cast(tf.round(probabilities), dtype=tf.int32)\n labels = tf.cast(labels, dtype=tf.int32)\n ctr = tf.metrics.Accuracy()(labels, preds)\n print(\"step: {}, ctr: {}\".format(step, ctr))\n\n\ndef main(argv):\n del argv\n if FLAGS.mode == 'train':\n train(FLAGS.num_steps)\n elif FLAGS.mode == 'test':\n test(FLAGS.num_steps)\n else:\n raise ValueError('running mode only supports `train` or `test`')\n\n\nif __name__ == '__main__':\n app.run(main)\n", "sub_path": "demo/dynamic_embedding/amazon-us-reviews-digital-video-games/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3622, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "absl.flags.DEFINE_integer", "line_number": 9, "usage_type": "call"}, {"api_name": "absl.flags", "line_number": 9, "usage_type": "name"}, {"api_name": "absl.flags.DEFINE_integer", "line_number": 10, "usage_type": "call"}, {"api_name": "absl.flags", "line_number": 10, "usage_type": "name"}, {"api_name": "absl.flags.DEFINE_integer", "line_number": 11, "usage_type": "call"}, {"api_name": "absl.flags", "line_number": 11, "usage_type": "name"}, {"api_name": "absl.flags.DEFINE_integer", "line_number": 12, "usage_type": "call"}, {"api_name": "absl.flags", "line_number": 12, "usage_type": "name"}, {"api_name": "absl.flags.DEFINE_integer", "line_number": 14, "usage_type": "call"}, {"api_name": "absl.flags", "line_number": 14, "usage_type": "name"}, {"api_name": "absl.flags.DEFINE_string", "line_number": 16, "usage_type": "call"}, {"api_name": "absl.flags", "line_number": 16, "usage_type": "name"}, {"api_name": "absl.flags.DEFINE_string", "line_number": 17, "usage_type": "call"}, {"api_name": "absl.flags", "line_number": 17, "usage_type": "name"}, {"api_name": "absl.flags.FLAGS", "line_number": 19, "usage_type": "attribute"}, {"api_name": "absl.flags", "line_number": 19, "usage_type": "name"}, {"api_name": "video_game_model.VideoGameDnn", "line_number": 28, "usage_type": "call"}, {"api_name": "feature.initialize_dataset", "line_number": 32, "usage_type": "call"}, {"api_name": "feature.input_fn", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.errors", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tensorflow.saved_model.SaveOptions", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.saved_model", "line_number": 59, "usage_type": "attribute"}, {"api_name": "video_game_model.VideoGameDnnInference", "line_number": 62, "usage_type": "call"}, {"api_name": "feature.input_fn", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.saved_model.SaveOptions", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.saved_model", "line_number": 73, "usage_type": "attribute"}, {"api_name": "tensorflow.saved_model.load", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow.saved_model", "line_number": 74, "usage_type": "attribute"}, {"api_name": "feature.initialize_dataset", "line_number": 78, "usage_type": "call"}, {"api_name": "feature.input_fn", "line_number": 85, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 87, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.round", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 88, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 89, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 89, "usage_type": "attribute"}, {"api_name": "tensorflow.metrics.Accuracy", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.metrics", "line_number": 90, "usage_type": "attribute"}, {"api_name": "absl.app.run", "line_number": 105, "usage_type": "call"}, {"api_name": "absl.app", "line_number": 105, "usage_type": "name"}]} +{"seq_id": "107216093", "text": "import warnings\nimport numpy as np\nfrom PIL import Image\nimport Augmentor\n\n\nclass AugmentorTransform():\n def __init__(self, scale=1.0 / 255, size=224, train=True):\n self.p = Augmentor.Pipeline()\n self.p.resize(probability=1, width=size, height=size,\n resample_filter='BILINEAR')\n if train:\n self.p.rotate(probability=0.5, max_left_rotation=10,\n max_right_rotation=10)\n self.p.flip_left_right(probability=0.5)\n self.p.flip_top_bottom(probability=0.5)\n self.p.zoom_random(probability=0.5, percentage_area=0.8)\n self.p.random_distortion(\n probability=0.5, grid_width=4, grid_height=4, magnitude=5)\n self.p.random_erasing(probability=0.5, rectangle_area=0.5)\n self.scale = scale\n\n def __call__(self, in_data):\n x, t = in_data\n\n if x.dtype == np.uint8:\n pass\n elif x.dtype == np.float32:\n if np.max(x) < 1.0:\n warnings.warn(\n 'scale is [0, 1]? AugmentorTransform assumes [0, 255]')\n x = x.astype(np.uint8)\n else:\n raise ValueError('cannot handle dtype {}'.format(x.dtype))\n\n if x.shape[0] == 1:\n x = np.concatenate([x, x, x])\n elif x.shape[0] > 3:\n x = x[:3]\n\n img = Image.fromarray(x.transpose((1, 2, 0)))\n for operation in self.p.operations:\n img = operation.perform_operation([img])[0]\n\n img = np.array(img, dtype=np.float32) * self.scale\n img = img.transpose((2, 0, 1))\n return img, t\n", "sub_path": "augmentor_transformer.py", "file_name": "augmentor_transformer.py", "file_ext": "py", "file_size_in_byte": 1633, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "Augmentor.Pipeline", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 26, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 29, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 37, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 41, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 41, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 45, "usage_type": "attribute"}]} +{"seq_id": "210322467", "text": "import os\nimport shutil\nimport tempfile\n\nfrom datetime import date\n\nfrom django.views.generic.base import TemplateView\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.core.files.storage import default_storage\nfrom django.core.files.base import ContentFile\n\nfrom jsontableschema.model import SchemaModel\n\nfrom wildlifelicensing.apps.main.mixins import OfficerRequiredMixin, OfficerOrCustomerRequiredMixin\nfrom wildlifelicensing.apps.returns.models import Return, ReturnTable, ReturnRow\nfrom wildlifelicensing.apps.returns import excel\nfrom wildlifelicensing.apps.returns.forms import UploadSpreadsheetForm\nfrom wildlifelicensing.apps.returns.utils_schema import Schema\nfrom wildlifelicensing.apps.returns.signals import return_submitted\nfrom wildlifelicensing.apps.main.helpers import is_officer\n\nLICENCE_TYPE_NUM_CHARS = 2\nLODGEMENT_NUMBER_NUM_CHARS = 6\n\nRETURNS_APP_PATH = os.path.join(os.path.dirname(__file__), 'excel_templates')\n\n\ndef _is_post_data_valid(ret, tables_info, post_data):\n for table in tables_info:\n table_rows = _get_table_rows_from_post(table.get('name'), post_data)\n if len(table_rows) == 0:\n return False\n schema = Schema(ret.return_type.get_schema_by_name(table.get('name')))\n if not schema.is_all_valid(table_rows):\n return False\n return True\n\n\ndef _get_validated_rows_from_post(ret, table_name, post_data):\n rows = _get_table_rows_from_post(table_name, post_data)\n schema = Schema(ret.return_type.get_schema_by_name(table_name))\n return list(schema.rows_validator(rows))\n\n\ndef _get_table_rows_from_post(table_name, post_data):\n table_namespace = table_name + '::'\n by_column = dict([(key.replace(table_namespace, ''), post_data.getlist(key)) for key in post_data.keys() if\n key.startswith(table_namespace)])\n # by_column is of format {'col_header':[row1_val, row2_val,...],...}\n num_rows = len(by_column.values()[0])\n rows = []\n for row_num in range(num_rows):\n row_data = {}\n for key, value in by_column.items():\n row_data[key] = value[row_num]\n # filter empty rows.\n is_empty = True\n for value in row_data.values():\n if len(value.strip()) > 0:\n is_empty = False\n break\n if not is_empty:\n rows.append(row_data)\n return rows\n\n\ndef _create_return_data_from_post_data(ret, tables_info, post_data):\n for table in tables_info:\n table_namespace = table.get('name') + '::'\n\n table_data = dict([(key.replace(table_namespace, ''), post_data.getlist(key)) for key in post_data.keys() if\n key.startswith(table_namespace)])\n\n return_table, created = ReturnTable.objects.get_or_create(name=table.get('name'), ret=ret)\n\n # delete any existing rows as they will all be recreated\n return_table.returnrow_set.all().delete()\n\n num_rows = len(table_data.values()[0])\n\n return_rows = []\n for row_num in range(num_rows):\n row_data = {}\n for key, value in table_data.items():\n row_data[key] = value[row_num]\n\n return_rows.append(ReturnRow(return_table=return_table, data=row_data))\n\n ReturnRow.objects.bulk_create(return_rows)\n\n\nclass EnterReturnView(OfficerOrCustomerRequiredMixin, TemplateView):\n template_name = 'wl/enter_return.html'\n login_url = '/'\n\n def get_context_data(self, **kwargs):\n ret = get_object_or_404(Return, pk=self.args[0])\n\n kwargs['return'] = ret\n\n kwargs['tables'] = []\n\n for resource in ret.return_type.resources:\n resource_name = resource.get('name')\n schema = Schema(resource.get('schema'))\n table = {'name': resource_name, 'title': resource.get('title', resource.get('name')),\n 'headers': schema.headers}\n\n try:\n return_table = ret.returntable_set.get(name=resource_name)\n rows = [return_row.data for return_row in return_table.returnrow_set.all()]\n validated_rows = list(schema.rows_validator(rows))\n table['data'] = validated_rows\n except ReturnTable.DoesNotExist:\n pass\n\n kwargs['tables'].append(table)\n\n kwargs['upload_spreadsheet_form'] = UploadSpreadsheetForm()\n\n return super(EnterReturnView, self).get_context_data(**kwargs)\n\n def post(self, request, *args, **kwargs):\n context = self.get_context_data()\n ret = context['return']\n\n if 'upload' in request.POST:\n form = UploadSpreadsheetForm(request.POST, request.FILES)\n\n if form.is_valid():\n temp_file_dir = tempfile.mkdtemp(dir=settings.MEDIA_ROOT)\n try:\n data = form.cleaned_data.get('spreadsheet_file')\n path = default_storage.save(os.path.join(temp_file_dir, str(data)), ContentFile(data.read()))\n\n workbook = excel.load_workbook_content(path)\n\n for table in context['tables']:\n worksheet = excel.get_sheet(workbook, table.get('title'))\n if worksheet is not None:\n table_data = excel.TableData(worksheet)\n schema = Schema(ret.return_type.get_schema_by_name(table.get('name')))\n validated_rows = list(schema.rows_validator(table_data.rows_by_col_header_it()))\n table['data'] = validated_rows\n else:\n messages.warning(request, 'Missing worksheet ' + table.get('name'))\n finally:\n shutil.rmtree(temp_file_dir)\n elif 'draft' in request.POST or 'draft_continue' in request.POST:\n _create_return_data_from_post_data(ret, context['tables'], request.POST)\n\n if is_officer(request.user):\n ret.proxy_customer = request.user\n\n ret.status = 'draft'\n ret.save()\n\n messages.warning(request, 'Return saved as draft.')\n\n # redirect or reshow page depending on whether save or save/continue was clicked\n if 'draft' in request.POST:\n return redirect('home')\n else:\n for table in context['tables']:\n return_table = ReturnTable.objects.get(ret=ret, name=table.get('name'))\n table['data'] = [return_row.data for return_row in return_table.returnrow_set.all()]\n\n elif 'lodge' in request.POST:\n if _is_post_data_valid(ret, context['tables'], request.POST):\n\n _create_return_data_from_post_data(ret, context['tables'], request.POST)\n\n ret.lodgement_number = '%s-%s' % (str(ret.licence.licence_type.pk).zfill(LICENCE_TYPE_NUM_CHARS),\n str(ret.pk).zfill(LODGEMENT_NUMBER_NUM_CHARS))\n\n ret.lodgement_date = date.today()\n\n if is_officer(request.user):\n ret.proxy_customer = request.user\n\n ret.status = 'submitted'\n ret.save()\n\n # update next return in line's status to become the new current return\n next_ret = Return.objects.filter(licence=ret.licence, status='future').order_by('due_date').first()\n\n if next_ret is not None:\n next_ret.status = 'current'\n next_ret.save()\n\n return_submitted.send(sender=self.__class__, ret=ret)\n\n messages.success(request, 'Return successfully submitted.')\n\n return redirect('home')\n else:\n for table in context['tables']:\n table['data'] = _get_validated_rows_from_post(ret, table.get('name'), request.POST)\n if len(table['data']) == 0:\n messages.warning(request, \"You must enter data for {}\".format(table.get('name')))\n\n return render(request, self.template_name, context)\n\n\nclass CurateReturnView(OfficerRequiredMixin, TemplateView):\n template_name = 'wl/curate_return.html'\n login_url = '/'\n\n def get_context_data(self, **kwargs):\n ret = get_object_or_404(Return, pk=self.args[0])\n\n kwargs['return'] = ret\n\n kwargs['tables'] = []\n\n for resource in ret.return_type.resources:\n resource_name = resource.get('name')\n schema = Schema(resource.get('schema'))\n table = {'name': resource_name, 'title': resource.get('title', resource.get('name')),\n 'headers': schema.headers}\n\n try:\n return_table = ret.returntable_set.get(name=resource_name)\n rows = [return_row.data for return_row in return_table.returnrow_set.all()]\n validated_rows = list(schema.rows_validator(rows))\n table['data'] = validated_rows\n except ReturnTable.DoesNotExist:\n pass\n\n kwargs['tables'].append(table)\n\n kwargs['upload_spreadsheet_form'] = UploadSpreadsheetForm()\n\n return super(CurateReturnView, self).get_context_data(**kwargs)\n\n def post(self, request, *args, **kwargs):\n context = self.get_context_data()\n ret = get_object_or_404(Return, pk=self.args[0])\n\n if 'accept' in request.POST:\n if _is_post_data_valid(ret, context['tables'], request.POST):\n _create_return_data_from_post_data(ret, context['tables'], request.POST)\n\n ret.status = 'accepted'\n ret.save()\n\n messages.success(request, 'Return was accepted.')\n return redirect('home')\n else:\n for table in context['tables']:\n table['data'] = _get_validated_rows_from_post(ret, table.get('name'), request.POST)\n if len(table['data']) == 0:\n messages.warning(request, \"You must enter data for {}\".format(table.get('name')))\n\n return render(request, self.template_name, context)\n else:\n ret.status = 'declined'\n ret.save()\n\n messages.warning(request, 'Return was declined.')\n return redirect('home')\n\n\nclass ViewReturnReadonlyView(OfficerOrCustomerRequiredMixin, TemplateView):\n template_name = 'wl/view_return_read_only.html'\n login_url = '/'\n\n def get_context_data(self, **kwargs):\n ret = get_object_or_404(Return, pk=self.args[0])\n\n kwargs['return'] = ret\n\n kwargs['tables'] = []\n\n for schema_name in ret.return_type.get_resources_names():\n schema = SchemaModel(ret.return_type.get_schema_by_name(schema_name))\n table = {'name': schema_name, 'headers': schema.headers}\n\n try:\n return_table = ret.returntable_set.get(name=schema_name)\n table['data'] = [return_row.data for return_row in return_table.returnrow_set.all()]\n except ReturnTable.DoesNotExist:\n pass\n\n kwargs['tables'].append(table)\n\n return super(ViewReturnReadonlyView, self).get_context_data(**kwargs)\n", "sub_path": "wildlifelicensing/apps/returns/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 11339, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 27, "usage_type": "call"}, {"api_name": "wildlifelicensing.apps.returns.utils_schema.Schema", "line_number": 35, "usage_type": "call"}, {"api_name": "wildlifelicensing.apps.returns.utils_schema.Schema", "line_number": 43, "usage_type": "call"}, {"api_name": "wildlifelicensing.apps.returns.models.ReturnTable.objects.get_or_create", "line_number": 76, "usage_type": "call"}, {"api_name": "wildlifelicensing.apps.returns.models.ReturnTable.objects", "line_number": 76, "usage_type": "attribute"}, {"api_name": "wildlifelicensing.apps.returns.models.ReturnTable", "line_number": 76, "usage_type": "name"}, {"api_name": "wildlifelicensing.apps.returns.models.ReturnRow", "line_number": 89, "usage_type": "call"}, {"api_name": "wildlifelicensing.apps.returns.models.ReturnRow.objects.bulk_create", "line_number": 91, "usage_type": "call"}, {"api_name": "wildlifelicensing.apps.returns.models.ReturnRow.objects", "line_number": 91, "usage_type": "attribute"}, {"api_name": "wildlifelicensing.apps.returns.models.ReturnRow", "line_number": 91, "usage_type": "name"}, {"api_name": "wildlifelicensing.apps.main.mixins.OfficerOrCustomerRequiredMixin", "line_number": 94, "usage_type": "name"}, {"api_name": "django.views.generic.base.TemplateView", "line_number": 94, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 99, "usage_type": "call"}, {"api_name": "wildlifelicensing.apps.returns.models.Return", "line_number": 99, "usage_type": "argument"}, {"api_name": "wildlifelicensing.apps.returns.utils_schema.Schema", "line_number": 107, "usage_type": "call"}, {"api_name": "wildlifelicensing.apps.returns.models.ReturnTable.DoesNotExist", "line_number": 116, "usage_type": "attribute"}, {"api_name": "wildlifelicensing.apps.returns.models.ReturnTable", "line_number": 116, "usage_type": "name"}, {"api_name": "wildlifelicensing.apps.returns.forms.UploadSpreadsheetForm", "line_number": 121, "usage_type": "call"}, {"api_name": "wildlifelicensing.apps.returns.forms.UploadSpreadsheetForm", "line_number": 130, "usage_type": "call"}, {"api_name": "tempfile.mkdtemp", "line_number": 133, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 133, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 133, "usage_type": "name"}, {"api_name": "django.core.files.storage.default_storage.save", "line_number": 136, "usage_type": "call"}, {"api_name": "django.core.files.storage.default_storage", "line_number": 136, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path", "line_number": 136, "usage_type": "attribute"}, {"api_name": "django.core.files.base.ContentFile", "line_number": 136, "usage_type": "call"}, {"api_name": "wildlifelicensing.apps.returns.excel.load_workbook_content", "line_number": 138, "usage_type": "call"}, {"api_name": "wildlifelicensing.apps.returns.excel", "line_number": 138, "usage_type": "name"}, {"api_name": "wildlifelicensing.apps.returns.excel.get_sheet", "line_number": 141, "usage_type": "call"}, {"api_name": "wildlifelicensing.apps.returns.excel", "line_number": 141, "usage_type": "name"}, {"api_name": "wildlifelicensing.apps.returns.excel.TableData", "line_number": 143, "usage_type": "call"}, {"api_name": "wildlifelicensing.apps.returns.excel", "line_number": 143, "usage_type": "name"}, {"api_name": "wildlifelicensing.apps.returns.utils_schema.Schema", "line_number": 144, "usage_type": "call"}, {"api_name": "django.contrib.messages.warning", "line_number": 148, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 148, "usage_type": "name"}, {"api_name": "shutil.rmtree", "line_number": 150, "usage_type": "call"}, {"api_name": "wildlifelicensing.apps.main.helpers.is_officer", "line_number": 154, "usage_type": "call"}, {"api_name": "django.contrib.messages.warning", "line_number": 160, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 160, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 164, "usage_type": "call"}, {"api_name": "wildlifelicensing.apps.returns.models.ReturnTable.objects.get", "line_number": 167, "usage_type": "call"}, {"api_name": "wildlifelicensing.apps.returns.models.ReturnTable.objects", "line_number": 167, "usage_type": "attribute"}, {"api_name": "wildlifelicensing.apps.returns.models.ReturnTable", "line_number": 167, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 178, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 178, "usage_type": "name"}, {"api_name": "wildlifelicensing.apps.main.helpers.is_officer", "line_number": 180, "usage_type": "call"}, {"api_name": "wildlifelicensing.apps.returns.models.Return.objects.filter", "line_number": 187, "usage_type": "call"}, {"api_name": "wildlifelicensing.apps.returns.models.Return.objects", "line_number": 187, "usage_type": "attribute"}, {"api_name": "wildlifelicensing.apps.returns.models.Return", "line_number": 187, "usage_type": "name"}, {"api_name": "wildlifelicensing.apps.returns.signals.return_submitted.send", "line_number": 193, "usage_type": "call"}, {"api_name": "wildlifelicensing.apps.returns.signals.return_submitted", "line_number": 193, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 195, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 195, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 197, "usage_type": "call"}, {"api_name": "django.contrib.messages.warning", "line_number": 202, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 202, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 204, "usage_type": "call"}, {"api_name": "wildlifelicensing.apps.main.mixins.OfficerRequiredMixin", "line_number": 207, "usage_type": "name"}, {"api_name": "django.views.generic.base.TemplateView", "line_number": 207, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 212, "usage_type": "call"}, {"api_name": "wildlifelicensing.apps.returns.models.Return", "line_number": 212, "usage_type": "argument"}, {"api_name": "wildlifelicensing.apps.returns.utils_schema.Schema", "line_number": 220, "usage_type": "call"}, {"api_name": "wildlifelicensing.apps.returns.models.ReturnTable.DoesNotExist", "line_number": 229, "usage_type": "attribute"}, {"api_name": "wildlifelicensing.apps.returns.models.ReturnTable", "line_number": 229, "usage_type": "name"}, {"api_name": "wildlifelicensing.apps.returns.forms.UploadSpreadsheetForm", "line_number": 234, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 240, "usage_type": "call"}, {"api_name": "wildlifelicensing.apps.returns.models.Return", "line_number": 240, "usage_type": "argument"}, {"api_name": "django.contrib.messages.success", "line_number": 249, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 249, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 250, "usage_type": "call"}, {"api_name": "django.contrib.messages.warning", "line_number": 255, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 255, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 257, "usage_type": "call"}, {"api_name": "django.contrib.messages.warning", "line_number": 262, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 262, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 263, "usage_type": "call"}, {"api_name": "wildlifelicensing.apps.main.mixins.OfficerOrCustomerRequiredMixin", "line_number": 266, "usage_type": "name"}, {"api_name": "django.views.generic.base.TemplateView", "line_number": 266, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 271, "usage_type": "call"}, {"api_name": "wildlifelicensing.apps.returns.models.Return", "line_number": 271, "usage_type": "argument"}, {"api_name": "jsontableschema.model.SchemaModel", "line_number": 278, "usage_type": "call"}, {"api_name": "wildlifelicensing.apps.returns.models.ReturnTable.DoesNotExist", "line_number": 284, "usage_type": "attribute"}, {"api_name": "wildlifelicensing.apps.returns.models.ReturnTable", "line_number": 284, "usage_type": "name"}]} +{"seq_id": "201385682", "text": "from flask.ext.script import Manager\nfrom flask.ext.migrate import Migrate, MigrateCommand\n#from models import db\nfrom app import create_app\n\napp_ = create_app()\n\n#app_.config.from_pyfile('config.py')\n#try:\n# app_.config.from_pyfile('local_config.py')\n#except IOError:\n# pass\n\n#migrate = Migrate(app_, db)\nmanager = Manager(app_)\n\nmanager.add_command('db', MigrateCommand)\n\n@manager.command\ndef profile(length=25, profile_dir=None):\n \"\"\"Start the application under the code profiler.\"\"\"\n from werkzeug.contrib.profiler import ProfilerMiddleware\n app_.wsgi_app = ProfilerMiddleware(app_.wsgi_app, restrictions=[length],\n profile_dir=profile_dir)\n app_.run()\n\nif __name__ == '__main__':\n manager.run()\n", "sub_path": "metrics_service/manage.py", "file_name": "manage.py", "file_ext": "py", "file_size_in_byte": 758, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "app.create_app", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.ext.script.Manager", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.ext.migrate.MigrateCommand", "line_number": 17, "usage_type": "argument"}, {"api_name": "werkzeug.contrib.profiler.ProfilerMiddleware", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "177418125", "text": "#coding:utf-8\nfrom django.test import TestCase\nfrom django.core.urlresolvers import reverse\nfrom django.conf import settings\n\nfrom model_mommy import mommy\n\n\nclass UserCreateViewTestCase(TestCase):\n\n def setUp(self):\n self._setup_user()\n self.client.login(username='user', password='secret')\n\n self.url = reverse('user_add')\n self.response = self.client.get(self.url)\n \n def test_get(self):\n self.assertEqual(200, self.response.status_code)\n\n def test_template_used(self):\n self.assertTemplateUsed(self.response,\n 'account/user_form.html')\n\n def test_post_form_valid(self):\n response = self.client.post(self.url, self._valid_dict())\n self.assertRedirects(response, reverse('user_list'))\n\n def test_post_form_invalid(self):\n response = self.client.post(self.url)\n self.assertEqual(200, response.status_code)\n self.assertFalse(response.context['form'].is_valid())\n\n def _valid_dict(self):\n\n return {\n 'username': 'foobar',\n 'is_active': '1',\n 'first_name': 'Foo',\n 'last_name': 'Bar',\n 'email': 'foo@bar.com',\n 'phone': 'xxx',\n 'birth_date': '02/08/1986',\n 'cpf_cnpj':'51763747654',\n 'doc': '1234',\n 'doc_entity': 'detran/rj',\n 'address': '3rd street',\n 'neighborhood': 'neighborhood',\n 'city': 'Rio de Janeiro',\n 'state': 'RJ',\n 'cep': '23000000',\n }\n\n def _setup_user(self):\n self.user = mommy.make(\n settings.AUTH_USER_MODEL, \n username='user',\n is_active=True)\n self.user.set_password('secret')\n self.user.save()\n\n", "sub_path": "account/tests/test_view_user_create.py", "file_name": "test_view_user_create.py", "file_ext": "py", "file_size_in_byte": 1785, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "django.test.TestCase", "line_number": 9, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 15, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 27, "usage_type": "call"}, {"api_name": "model_mommy.mommy.make", "line_number": 55, "usage_type": "call"}, {"api_name": "model_mommy.mommy", "line_number": 55, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 56, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 56, "usage_type": "name"}]} +{"seq_id": "143718639", "text": "#写一个数据库DBUtils与Excel表的数据转换工具\n# Db_to_excel() : 将数据库数据写入excel表里\n\nfrom DBUtils import Mysql\nimport xlwt\nku = []\nclass Db_to_excel:\n sql = \"select * from bank\"\n data = Mysql.Select(sql,[])\n for i in data:\n i = list(i)\n ku.append(i)\n print(ku)\n\n\n #把数据库信息写入Excel表中\n # 空的工作簿\n wb = xlwt.Workbook()\n # 添加\n sheet = wb.add_sheet(\"用户管理\")\n # 向选项卡里添加数据\n a = 0\n for i in range(len(ku)):\n\n for j in range(len(ku[i])):\n num = ku[i][j]\n\n sheet.write(i,j,num)\n\n # 保存\n wb.save(\"去往Excel表.xls\")\n", "sub_path": "day13/任务2/demo2Db_to_excel.py", "file_name": "demo2Db_to_excel.py", "file_ext": "py", "file_size_in_byte": 679, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "DBUtils.Mysql.Select", "line_number": 9, "usage_type": "call"}, {"api_name": "DBUtils.Mysql", "line_number": 9, "usage_type": "name"}, {"api_name": "xlwt.Workbook", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "594540907", "text": "import nltk \nfrom nltk.corpus import stopwords\nfrom nltk import FreqDist\n\nf = open('input.txt','r')\nraw = f.read()\nraw = raw.replace('\\n',' ')\n\n#Tokenization\ntokens = nltk.word_tokenize(raw)\n\n\n#POS Tagging\nPOS_tags = nltk.pos_tag(tokens) #use unprocessed 'tokens', not 'words'\n\n#Generate a list of POS tags\nPOS_tag_list = [(word,tag) for (word,tag) in POS_tags if tag.startswith('N')]\n\n#Generate a frequency distribution of all the POS tags\ntag_freq = nltk.FreqDist(POS_tag_list)\n#Sort the result \nsorted_tag_freq = sorted(tag_freq.items(), key = lambda k:k[1], reverse = True)\n\n#write result into .txt file\nwith open('4.POS_Tagging.txt','w+') as f:\n for (word,tag),frequency in sorted_tag_freq:\n f.write(str(word)+'\\t'+str(tag)+'\\t'+str(frequency)+'\\n')\n", "sub_path": "Lab2/4.POS_Tagging.py", "file_name": "4.POS_Tagging.py", "file_ext": "py", "file_size_in_byte": 765, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "nltk.word_tokenize", "line_number": 10, "usage_type": "call"}, {"api_name": "nltk.pos_tag", "line_number": 14, "usage_type": "call"}, {"api_name": "nltk.FreqDist", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "77126975", "text": "'''\nThis is a http server, as the program API endpoint\n\n'''\nfrom flask import Flask, render_template, request, Response, jsonify\nimport driver # the dirver.py file\nimport os\n\napp = Flask(__name__)\n\n# /list?table=???\n@app.route(\"/list\", methods=[\"GET\"])\ndef list_tables():\n response = ''\n\n table_name = request.args.get('table', '')\n table_info_obj = driver.get_table(table_name)\n\n response = jsonify(table_info_obj)\n\n return response\n\n\n@app.route(\"/load\", methods=[\"GET\"])\ndef load_data():\n response = ''\n\n filename = 'files/Lists.xlsx'\n driver.populate_file(filename)\n \n response_obj = {'status': 'OK'}\n \n response = jsonify(response_obj)\n return response\n\n@app.route(\"/file\", methods=[\"GET\", \"POST\"])\ndef upload_file():\n response = ''\n if request.method == 'POST':\n if 'file' not in request.files:\n return response\n file = request.files['file']\n if file.filename == '':\n return response\n if file:\n driver.populate_file(file)\n response_obj = {'status': 'OK'}\n response = jsonify(response_obj)\n return response\n else:\n return response\n\n\n@app.route('/')\ndef index():\n\n return 'Hello World'\n\n\nif __name__ == '__main__':\n \n app.run(host='0.0.0.0', port=80, debug=True)\n\n", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 1343, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 16, "usage_type": "name"}, {"api_name": "driver.get_table", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 19, "usage_type": "call"}, {"api_name": "driver.populate_file", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 39, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 42, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 42, "usage_type": "name"}, {"api_name": "driver.populate_file", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "564690590", "text": "import random\nimport flask\nimport yaml\nfrom datetime import datetime, timedelta, timezone\nfrom flask import json, request, session, redirect, url_for\nfrom flask.views import MethodView\nfrom api.utilities.decorators import instructor_required\nfrom api.utilities.http_response import HttpResponse\nfrom main_app_utilities.gcp.cloud_env import CloudEnv\nfrom main_app_utilities.gcp.datastore_manager import DataStoreManager\nfrom main_app_utilities.gcp.pubsub_manager import PubSubManager\nfrom main_app_utilities.gcp.bucket_manager import BucketManager\nfrom main_app_utilities.globals import PubSub, DatastoreKeyTypes, BuildConstants, Buckets, WorkoutStates\nfrom main_app_utilities.infrastructure_as_code.build_spec_to_cloud import BuildSpecToCloud\n\n__author__ = \"Andrew Bomberger\"\n__copyright__ = \"Copyright 2022, UA Little Rock, Emerging Analytics Center\"\n__credits__ = [\"Andrew Bomberger\", \"Philip Huff\"]\n__license__ = \"MIT\"\n__version__ = \"1.0.0\"\n__maintainer__ = \"Philip Huff\"\n__email__ = \"pdhuff@ualr.edu\"\n__status__ = \"Testing\"\n\n\nclass Unit(MethodView):\n \"\"\"Method View to handle API requests for Cyber Arena Units\"\"\"\n decorators = [instructor_required]\n\n def __init__(self):\n self.key_type = DatastoreKeyTypes.UNIT.value\n self.pubsub_actions = PubSub.Actions\n self.handler = PubSub.Handlers\n self.http_resp = HttpResponse\n self.env = CloudEnv()\n self.env_dict = self.env.get_env()\n self.pubsub_mgr = PubSubManager(topic=PubSub.Topics.CYBER_ARENA, env_dict=self.env_dict)\n self.bm = BucketManager(env_dict=self.env_dict)\n\n def get(self, build_id=None):\n if build_id:\n args = request.args\n if args.get(\"state\", None):\n # Returns state for all workouts in unit\n states = []\n workouts = DataStoreManager().get_children(DatastoreKeyTypes.WORKOUT, build_id)\n if workouts:\n exists = True\n states = [{'id': workout['id'], 'state': WorkoutStates(workout['state']).name.lower()} for workout in workouts]\n else:\n exists = False\n states = []\n return self.http_resp(code=200, data={'exists': exists, 'states': states}).prepare_response()\n unit = DataStoreManager(key_type=self.key_type, key_id=build_id).get()\n if unit:\n return self.http_resp(code=200, data=unit).prepare_response()\n return self.http_resp(code=404).prepare_response()\n return self.http_resp(code=400).prepare_response()\n\n def post(self):\n user_email = session.get('user_email', None)\n recv_data = request.form\n\n # Parse Form Data\n expire_datetime = recv_data.get('expires', None)\n registration_required = recv_data.get('registration_required', False)\n build_type = recv_data.get('build_file', None)\n build_count = recv_data.get('build_count', None)\n\n # Send build request\n if build_count and expire_datetime and build_type:\n build_spec = DataStoreManager(key_type=DatastoreKeyTypes.CATALOG.value, key_id=build_type).get()\n if not build_spec:\n return self.http_resp(code=404, msg=f\"Invalid build type {build_type}\").prepare_response()\n build_spec['instructor_id'] = user_email\n # expires_ts = int(datetime.strptime(expire_datetime.replace(\"T\", \" \"), \"%Y-%m-%d %H:%M\").timestamp())\n expire_ts = int(datetime.strptime(expire_datetime.replace(\"T\", \" \"), \"%Y-%m-%d %H:%M\").astimezone(timezone.utc).timestamp())\n build_spec['workspace_settings'] = {\n 'count': build_count,\n 'registration_required': registration_required,\n 'student_emails': [],\n 'expires': expire_ts\n }\n build_spec['join_code'] = ''.join(str(random.randint(0, 9)) for num in range(0, 6))\n build_spec_to_cloud = BuildSpecToCloud(cyber_arena_spec=build_spec, env_dict=self.env_dict)\n build_spec_to_cloud.commit(publish=False)\n return redirect(url_for('teacher_app.workout_list', unit_id=build_spec_to_cloud.get_build_id()))\n return self.http_resp(code=400).prepare_response()\n\n @instructor_required\n def delete(self, build_id=None):\n if build_id:\n self.pubsub_mgr.msg(handler=str(self.handler.CONTROL.value), build_id=str(build_id),\n action=str(self.pubsub_actions.DELETE.value),\n cyber_arena_object=str(PubSub.CyberArenaObjects.UNIT.value))\n return self.http_resp(code=200).prepare_response()\n return self.http_resp(code=400).prepare_response()\n\n @instructor_required\n def put(self, build_id):\n if build_id:\n args = request.json\n action = args.get('action', None)\n question_id = args.get('question_id', None)\n child_id = args.get('build_id', None)\n\n valid_actions = [PubSub.Actions.START.value, PubSub.Actions.STOP.value, PubSub.Actions.NUKE]\n if action and action in valid_actions:\n self.pubsub_mgr.msg(handler=str(PubSub.Handlers.CONTROL.value), action=str(action),\n build_id=str(build_id),\n cyber_arena_object=str(PubSub.CyberArenaObjects.UNIT.value))\n return self.http_resp(code=200).prepare_response()\n elif question_id and child_id:\n \"\"\"For cases where an assessment question needs manual grading\"\"\"\n workout = DataStoreManager(key_type=DatastoreKeyTypes.WORKOUT, key_id=child_id).get()\n if workout:\n for question in workout['assessment']:\n if question['id'] == question:\n question['complete'] = True\n break\n DataStoreManager(key_type=DatastoreKeyTypes.WORKOUT, key_id=build_id).put(workout)\n return self.http_resp(code=404, msg=\"NO BUILD FOUND\").prepare_response()\n return self.http_resp(code=400, msg=\"BAD REQUEST\").prepare_response()\n", "sub_path": "v2/main_app/api/unit.py", "file_name": "unit.py", "file_ext": "py", "file_size_in_byte": 6221, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "flask.views.MethodView", "line_number": 26, "usage_type": "name"}, {"api_name": "api.utilities.decorators.instructor_required", "line_number": 28, "usage_type": "name"}, {"api_name": "main_app_utilities.globals.DatastoreKeyTypes.UNIT", "line_number": 31, "usage_type": "attribute"}, {"api_name": "main_app_utilities.globals.DatastoreKeyTypes", "line_number": 31, "usage_type": "name"}, {"api_name": "main_app_utilities.globals.PubSub.Actions", "line_number": 32, "usage_type": "attribute"}, {"api_name": "main_app_utilities.globals.PubSub", "line_number": 32, "usage_type": "name"}, {"api_name": "main_app_utilities.globals.PubSub.Handlers", "line_number": 33, "usage_type": "attribute"}, {"api_name": "main_app_utilities.globals.PubSub", "line_number": 33, "usage_type": "name"}, {"api_name": "api.utilities.http_response.HttpResponse", "line_number": 34, "usage_type": "name"}, {"api_name": "main_app_utilities.gcp.cloud_env.CloudEnv", "line_number": 35, "usage_type": "call"}, {"api_name": "main_app_utilities.gcp.pubsub_manager.PubSubManager", "line_number": 37, "usage_type": "call"}, {"api_name": "main_app_utilities.globals.PubSub.Topics", "line_number": 37, "usage_type": "attribute"}, {"api_name": "main_app_utilities.globals.PubSub", "line_number": 37, "usage_type": "name"}, {"api_name": "main_app_utilities.gcp.bucket_manager.BucketManager", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 42, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 42, "usage_type": "name"}, {"api_name": "main_app_utilities.gcp.datastore_manager.DataStoreManager", "line_number": 46, "usage_type": "call"}, {"api_name": "main_app_utilities.globals.DatastoreKeyTypes.WORKOUT", "line_number": 46, "usage_type": "attribute"}, {"api_name": "main_app_utilities.globals.DatastoreKeyTypes", "line_number": 46, "usage_type": "name"}, {"api_name": "main_app_utilities.globals.WorkoutStates", "line_number": 49, "usage_type": "call"}, {"api_name": "main_app_utilities.gcp.datastore_manager.DataStoreManager", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.session.get", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 61, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 62, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 62, "usage_type": "name"}, {"api_name": "main_app_utilities.gcp.datastore_manager.DataStoreManager", "line_number": 72, "usage_type": "call"}, {"api_name": "main_app_utilities.globals.DatastoreKeyTypes.CATALOG", "line_number": 72, "usage_type": "attribute"}, {"api_name": "main_app_utilities.globals.DatastoreKeyTypes", "line_number": 72, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 77, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 77, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 77, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 77, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 84, "usage_type": "call"}, {"api_name": "main_app_utilities.infrastructure_as_code.build_spec_to_cloud.BuildSpecToCloud", "line_number": 85, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 87, "usage_type": "call"}, {"api_name": "main_app_utilities.globals.PubSub.CyberArenaObjects", "line_number": 95, "usage_type": "attribute"}, {"api_name": "main_app_utilities.globals.PubSub", "line_number": 95, "usage_type": "name"}, {"api_name": "api.utilities.decorators.instructor_required", "line_number": 90, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 102, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 102, "usage_type": "name"}, {"api_name": "main_app_utilities.globals.PubSub.Actions", "line_number": 107, "usage_type": "attribute"}, {"api_name": "main_app_utilities.globals.PubSub", "line_number": 107, "usage_type": "name"}, {"api_name": "main_app_utilities.globals.PubSub.Handlers", "line_number": 109, "usage_type": "attribute"}, {"api_name": "main_app_utilities.globals.PubSub", "line_number": 109, "usage_type": "name"}, {"api_name": "main_app_utilities.globals.PubSub.CyberArenaObjects", "line_number": 111, "usage_type": "attribute"}, {"api_name": "main_app_utilities.globals.PubSub", "line_number": 111, "usage_type": "name"}, {"api_name": "main_app_utilities.gcp.datastore_manager.DataStoreManager", "line_number": 115, "usage_type": "call"}, {"api_name": "main_app_utilities.globals.DatastoreKeyTypes.WORKOUT", "line_number": 115, "usage_type": "attribute"}, {"api_name": "main_app_utilities.globals.DatastoreKeyTypes", "line_number": 115, "usage_type": "name"}, {"api_name": "main_app_utilities.gcp.datastore_manager.DataStoreManager", "line_number": 121, "usage_type": "call"}, {"api_name": "main_app_utilities.globals.DatastoreKeyTypes.WORKOUT", "line_number": 121, "usage_type": "attribute"}, {"api_name": "main_app_utilities.globals.DatastoreKeyTypes", "line_number": 121, "usage_type": "name"}, {"api_name": "api.utilities.decorators.instructor_required", "line_number": 99, "usage_type": "name"}]} +{"seq_id": "481858750", "text": "from PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nimport os\nimport pyqtgraph as pg\nimport sys\nimport time\nfrom pyqtgraph.Qt import QtCore, QtGui\nimport numpy as np\nimport cv2\nfrom scipy import misc\nfrom termcolor import colored\n\n\nclass App(QtGui.QMainWindow):\n # TODO add possibility to invert flow of time\n # TODO add times in shelter and in threat to progress bar [likely needs to be done in processing]\n \"\"\" Display frames from behavioural videos and tracking data for processed sessions.\n Only works for Tracked and Processed sessions \"\"\"\n def __init__(self, sessions, parent=None):\n \"\"\" Set up class, initialise variables \"\"\"\n super(App, self).__init__(parent)\n\n # Useful vars\n self.wait_ms = 0 # change speed of figure refresh rate\n self.colors = dict(head=(100, 220, 100), body=(220, 100, 100), tail=(100, 100, 220)) # Stuff to color bparts\n self.bodyparts = dict(head=['snout', 'Lear', 'Rear', 'neck'], body=['body'], tail=['tail'])\n self.bodyparts_plotdata = {}\n self.sessions = sessions # sessions\n self.session = None # place holder for current working session\n self.ready_to_plot = False # flag to control plotting behaviour\n self.plot_wnd = 100\n self.plot_items = []\n self.counter = 0 # vars to check plotting framerate\n self.fps = 0.\n self.lastupdate = time.time()\n self.start_frame = 1200\n self.is_paused = False\n\n # Create GUI\n self.display_keyboard_shortcuts()\n self.define_layout()\n\n # Create second window to display trial images\n self.previews = ImgsViwer()\n\n # Get session Data\n self.get_session_data(None)\n\n app = QtGui.QApplication(sys.argv)\n self.show()\n app.exec_()\n\n ####################################################################################################################\n def keyPressEvent(self, e): # Deal with keyboard shortcuts\n if e.key() == QtCore.Qt.Key_Escape:\n self.close()\n self.previews.close()\n print('Shutting down...')\n sys.exit()\n elif e.key() == QtCore.Qt.Key_W: # W = Faster\n self.increase_speed(None)\n elif e.key() == QtCore.Qt.Key_S: # S = Slower\n self.decrease_speed(None)\n elif e.key() == QtCore.Qt.Key_D: # D = Skip 100 frames\n self.pause_playback(None)\n self.ready_to_plot = True\n self.update_by_frame(None, start_frame=self.curr_frame + 100)\n elif e.key() == QtCore.Qt.Key_A: # A = go back 100 frames\n self.pause_playback(None)\n self.ready_to_plot = True\n self.update_by_frame(None, start_frame=self.curr_frame - 100)\n elif e.key() == QtCore.Qt.Key_Space: # Space = pause\n self.pause_playback(None)\n\n def display_keyboard_shortcuts(self):\n print(colored(' Keyboard shortcuts:', 'white'))\n print(colored(' W', 'green'), ' -- ', colored('Faster', 'yellow'))\n print(colored(' S', 'green'), ' -- ', colored('Slower', 'yellow'))\n print(colored(' D', 'green'), ' -- ', colored('Skip 100 frames', 'yellow'))\n print(colored(' A', 'green'), ' -- ', colored('Go back 100 frames', 'yellow'))\n print(colored(' Space', 'green'), ' -- ', colored('Pause', 'yellow'))\n print(colored(' Esc', 'green'), ' -- ', colored('Close program', 'yellow'))\n\n\n ####################################################################################################################\n def define_style_sheet(self):\n # Main window color\n self.setAutoFillBackground(True)\n p = self.palette()\n p.setColor(self.backgroundRole(), QColor(40, 40, 40, 255))\n self.setPalette(p)\n\n # Widgets style sheet\n self.setStyleSheet(\"\"\"\n QPushButton {\n color: #ffffff;\n font-size: 18pt;\n background-color: #565656;\n border: 2px solid #8f8f91;\n border-radius: 6px;\n min-width: 250px;\n min-height: 60px;\n }\n \n QPlainTextEdit {\n color: #ffffff;\n font-size: 14pt;\n background-color: #565656;\n border: 2px solid #8f8f91;\n border-radius: 6px;\n min-width: 250px;\n min-height: 60px;\n }\n\n QLabel {\n color: #ffffff;\n\n font-size: 16pt;\n\n min-width: 80px;\n min-height: 40px;\n } \n\n\n QLineEdit {\n color: #202223;\n font-size: 14pt;\n background-color: #c1c1c1;\n border-radius: 4px;\n min-height: 40px;\n min-width: 20px;\n }\n\n QListWidget {\n font-size: 14pt;\n background-color: #c1c1c1;\n border-radius: 4px;\n\n }\n\n QPushButton#LaunchBtn {\n background-color: #006600;\n } \n QPushButton#PauseBtn {\n background-color: #d7c832;\n }\n QPushButton#StopBtn {\n background-color: #a32020;\n } \n QPushButton#ResumeBtn {\n background-color: #73a120;\n } \n \n QPushButton#GotoBtn {\n font-size: 16pt;\n min-width: 200px;\n min-height: 40px;\n }\n \"\"\")\n\n def create_label(self, txt, pos):\n obj = QtGui.QLabel()\n obj.setText(txt)\n if len(pos) == 4:\n self.mainbox.layout().addWidget(obj, pos[0], pos[1], pos[2], pos[3])\n elif len(pos) == 2:\n self.mainbox.layout().addWidget(obj, pos[0], pos[1])\n else:\n print('Cannot create label widget, wrong position parameter: {}'.format(pos))\n return obj\n\n def create_btn(self, txt, pos, name=None, func=None):\n obj = QPushButton(text=txt)\n if len(pos) == 4:\n self.mainbox.layout().addWidget(obj, pos[0], pos[1], pos[2], pos[3])\n elif len(pos) == 2:\n self.mainbox.layout().addWidget(obj, pos[0], pos[1])\n else:\n print('Cannot create label widget, wrong position parameter: {}'.format(pos))\n if name is not None:\n obj.setObjectName(name)\n if func is not None:\n obj.clicked.connect(lambda: func(self))\n return obj\n\n def define_layout(self):\n # TODO make legends\n \"\"\" Create the layout of the figure\"\"\"\n self.define_style_sheet()\n\n # Main figure layout\n self.mainbox = QtGui.QWidget()\n self.mainbox.showFullScreen()\n self.setCentralWidget(self.mainbox)\n grid = QtGui.QGridLayout()\n grid.setSpacing(10)\n self.mainbox.setLayout(grid)\n\n # Create Plotting Canvases\n self.progres_bar_canvas = pg.GraphicsLayoutWidget()\n self.mainbox.layout().addWidget(self.progres_bar_canvas, 14, 0, 3, 15)\n\n self.canvas = pg.GraphicsLayoutWidget()\n self.mainbox.layout().addWidget(self.canvas, 0, 0, 12, 15)\n\n self.view = self.canvas.addViewBox()\n self.view.setAspectLocked(False)\n\n # Create Plotting Items\n # Frame\n self.img = pg.ImageItem(border='w')\n self.view.addItem(self.img)\n\n # Pose Reconstruction\n self.poseplot = self.canvas.addPlot(title='Pose reconstruction')\n self.poseplot.invertY(True)\n self.canvas.nextRow()\n\n # Velocity\n self.velplot = self.canvas.addPlot(title='Velocity and bodylength')\n\n # Ang vels\n self.angvelplot = self.canvas.addPlot(title='Head and Body ang. vel')\n\n # Progress\n self.progress_plot = self.progres_bar_canvas.addPlot(title='Progress bar', colspan=2)\n\n # Create text labels and lineedits\n self.framerate_label = self.create_label('Framerate', (16, 18, 1, 2))\n self.current_frame = self.create_label('Frame 0', (14, 18, 1, 2))\n self.goto_frame_edit = QtGui.QLineEdit('Go to frame number')\n self.mainbox.layout().addWidget(self.goto_frame_edit, 15, 18, 1, 2)\n self.trname = self.create_label('Trial Name', (1, 16, 1, 2))\n self.tracking_vars_label = QtGui.QPlainTextEdit()\n self.tracking_vars_label.insertPlainText('Tracking data')\n self.mainbox.layout().addWidget(self.tracking_vars_label, 14, 16, 2, 2)\n self.trlistlabel = self.create_label('Trials', (8, 19))\n self.sesslistlabel = self.create_label('Sessions', (5, 19))\n if not self.sessions is None:\n name = \"\"\" \n Session ID: ,\n Experiment: ,\n Date: ,\n Mouse ID: .\n \"\"\"\n self.sessname = self.create_label('Session Metadata \\n {}'.format(name), (0, 16, 1, 2))\n else:\n self.sessname = self.create_label('No session found', (0, 16, 1, 2))\n\n # Create buttons\n self.launch_btn = self.create_btn('Launch', (6, 16, 2, 2), name='LaunchBtn', func=self.update_by_frame)\n self.stop_btn = self.create_btn('Stop', (7, 16, 2, 2), name='StopBtn', func=self.stop_playback)\n self.pause_btn = self.create_btn('Pause', (8, 16, 4, 2), name='PauseBtn', func=self.pause_playback)\n self.resume_btn = self.create_btn('Resume', (7, 16, 4, 2), name='ResumeBtn', func=self.resume_playback)\n self.faster_btn = self.create_btn('Faster', (16, 16), func=self.increase_speed)\n self.slower_btn = self.create_btn('Slower', (16, 17), func=self.decrease_speed)\n self.gotoframe_btn = self.create_btn('Go to frame', (15, 20), func=self.change_frame, name='GotoBtn')\n\n # List widgets\n self.trials_listw = QListWidget()\n self.mainbox.layout().addWidget(self.trials_listw, 9, 18, 2, 3)\n self.trials_listw.itemDoubleClicked.connect(self.load_trial_data)\n\n self.sessions_listw = QListWidget()\n self.mainbox.layout().addWidget(self.sessions_listw, 6, 18, 2, 3)\n self.sessions_listw.itemDoubleClicked.connect(self.get_session_data)\n\n # Define window geometry\n self.setGeometry(50, 50, 3600, 2000)\n\n # Create plot items\n self.define_plot_items()\n\n def define_plot_items(self):\n self.vel_line = self.velplot.plot(pen=pg.mkPen('r', width=5))\n self.blength_line = self.velplot.plot(pen=pg.mkPen((150, 100, 220), width=3))\n self.plot_items.append(self.vel_line)\n self.plot_items.append(self.blength_line)\n\n self.head_ang_vel_line = self.angvelplot.plot(pen=pg.mkPen((100, 100, 25), width=3))\n self.body_ang_vel_line = self.angvelplot.plot(pen=pg.mkPen((150, 50, 75), width=3))\n self.head_body_angle_diff_line = self.angvelplot.plot(pen=pg.mkPen((150, 200, 150), width=5))\n self.plot_items.append(self.head_ang_vel_line)\n self.plot_items.append(self.body_ang_vel_line)\n self.plot_items.append(self.head_body_angle_diff_line)\n\n ####################################################################################################################\n def get_session_data(self, event):\n \"\"\" Get paths of videofiles and names of tracking data + add these names to list widget\"\"\"\n if self.sessions is None:\n return\n\n if event is None: # function not called by widget click\n [self.sessions_listw.addItem(sess) for sess in sorted(list(self.sessions.keys()))]\n session_name = sorted(list(self.sessions.keys()))[0]\n else: # Clean up trials list widget and laod data\n for i in range(self.trials_listw.count()):\n self.trials_listw.model().removeRow(0)\n\n session_name = event.text()\n\n session = self.sessions[session_name]\n self.session = session\n self.videos = session.Metadata.video_file_paths\n self.trials = [t for t in session.Tracking.keys() if '-' in t]\n [self.trials_listw.addItem(tr) for tr in sorted(self.trials)]\n\n name = \"\"\" \n Session ID: {},\n Experiment: {},\n Date: {},\n Mouse ID: {}.\n \"\"\".format(session.Metadata.session_id, session.Metadata.experiment,\n session.Metadata.date, session.Metadata.mouse_id)\n self.sessname.setText('Session Metadata \\n {}'.format(name))\n\n # Load images in secondary window\n self.previews.get_images(sorted(self.trials))\n\n def load_trial_data(self, trial_name):\n \"\"\" get data from trial and initialise plots \"\"\"\n # Clear up a previously running trials\n self.ready_to_plot = False\n [p.setData([], []) for p in self.plot_items]\n self.curr_frame = 0\n\n # Get data\n trial_name = trial_name.text()\n self.previews.set_img(trial_name)\n self.trname.setText('Trial: {}'.format(trial_name))\n\n videonum = int(trial_name.split('_')[1].split('-')[0])\n self.video = self.videos[videonum][0]\n self.video_grabber = cv2.VideoCapture(self.video)\n self.num_frames = int(self.video_grabber.get(cv2.CAP_PROP_FRAME_COUNT))\n self.video_fps = self.video_grabber.get(cv2.CAP_PROP_FPS)\n self.trial_start_frame = self.session.Tracking[trial_name].metadata['Start frame']\n\n self.tracking_data = self.session.Tracking[trial_name].dlc_tracking['Posture']\n self.num_frames_trial = len(self.tracking_data['body'])\n\n # Plot the first frame\n self.video_grabber.set(1, self.trial_start_frame)\n self.frame = self.prep_frame()\n self.img.setImage(self.frame)\n\n # Prep progress bar\n stim_dur = 9\n self.progress_bg = pg.QtGui.QGraphicsRectItem(0, 0, self.num_frames_trial, 1)\n self.progress_bg.setPen(pg.mkPen((180, 180, 180)))\n self.progress_bg.setBrush(pg.mkBrush((180, 180, 180)))\n self.progress_plot.addItem(self.progress_bg)\n self.stim_bg = pg.QtGui.QGraphicsRectItem(self.num_frames_trial/2, 0, stim_dur*self.video_fps, 1)\n self.stim_bg.setPen(pg.mkPen((100, 100, 200)))\n self.stim_bg.setBrush(pg.mkBrush((100, 100, 200)))\n self.progress_plot.addItem(self.stim_bg)\n self.progress_plot.setRange(xRange=[0, self.num_frames_trial], yRange=[0, 1])\n self.progress_line = self.progress_plot.plot(pen=pg.mkPen('r', width=5))\n\n def prep_frame(self):\n _, frame = self.video_grabber.read()\n frame = frame[:, :, 0]\n return np.rot90(frame, 3)\n\n ####################################################################################################################\n def plot_pose(self, framen):\n # TODO add lines to posture plot\n if framen == self.start_frame:\n self.bodyparts_plotdata = {}\n\n for bp, data in self.tracking_data.items():\n for key,parts in self.bodyparts.items():\n if bp in parts:\n if bp == 'body':\n centre = data.loc[framen].x, data.loc[framen].y\n\n col = self.colors[key]\n if framen == self.start_frame:\n self.bodyparts_plotdata[bp] = self.poseplot.plot([data.loc[framen].x],\n [data.loc[framen].y],\n pen=col, symbolBrush=col, symbolPen='w', symbol='o', symbolSize=30)\n self.plot_items.append(self.bodyparts_plotdata[bp])\n else:\n self.bodyparts_plotdata[bp].setData([data.loc[framen].x], [data.loc[framen].y])\n self.plot_items.append(self.bodyparts_plotdata[bp])\n break\n\n self.poseplot.setRange(xRange=[centre[0]-50, centre[0]+50], yRange=[centre[1]+50, centre[1]-50])\n\n def plot_tracking_data(self, framen):\n # Get vars and prep them\n x, y = self.tracking_data['body']['x'].values, self.tracking_data['body']['y'].values\n vel = self.tracking_data['body']['Velocity'].values\n blength = self.tracking_data['body']['Body length'].values\n blength = np.divide(blength, max(blength))\n head_ang_vel = self.tracking_data['body']['Head ang vel'].values\n body_ang_vel = self.tracking_data['body']['Body ang vel'].values\n bori = self.tracking_data['body']['Orientation'].values\n hori = self.tracking_data['body']['Head angle'].values\n hb_ori_diff = np.subtract(hori, bori)\n\n bor = bori[framen]\n while bor>360:\n bor -= 360\n hor = hori[framen]\n while hor>360:\n hor -= 360\n\n # Update plots\n xx = np.linspace(0, self.plot_wnd, self.plot_wnd)\n self.vel_line.setData(xx, vel[framen:framen+self.plot_wnd])\n self.blength_line.setData(xx, blength[framen:framen+self.plot_wnd])\n self.head_ang_vel_line.setData(xx, head_ang_vel[framen:framen+self.plot_wnd])\n self.body_ang_vel_line.setData(xx, body_ang_vel[framen:framen+self.plot_wnd])\n self.head_body_angle_diff_line.setData(xx, hb_ori_diff[framen:framen+self.plot_wnd])\n\n self.velplot.setRange(yRange=[0, max(vel)+(max(vel)/10)])\n max_ori = max(abs(head_ang_vel))+(max(abs(head_ang_vel))/10)\n self.angvelplot.setRange(yRange=[-max_ori, max_ori])\n\n # Display data\n self.tracking_vars_label.setPlainText(\"\"\"\n Tracking data\n Position: {}, {}\n Velocity: {}\n Orientation [body]: {}\n Orientation [head]: {}\n Ang. Vel. [body]: {}\n Ang. vel. [head]: {}\n\n \"\"\".format(round(x[framen]), round(y[framen]), round(vel[framen], 2), round(bor, 2),\n round(hor, 2), round(body_ang_vel[framen], 2), round(head_ang_vel[framen], 2)))\n\n def update_by_frame(self, event, start_frame=1200):\n def get_plotting_fps(self):\n self.current_frame.setText('Frame: {}'.format(f))\n\n now = time.time()\n dt = (now - self.lastupdate)\n if dt <= 0:\n dt = 0.000000000001\n fps2 = 1.0 / dt\n self.lastupdate = now\n self.fps = self.fps * 0.9 + fps2 * 0.1\n tx = 'Plotting Frame Rate: {} FPS'.format(round(self.fps, 0))\n self.framerate_label.setText(tx)\n\n if event is not None:\n self.ready_to_plot = True\n\n if not self.ready_to_plot:\n return\n\n # Clean up plots\n self.cleanup_plots()\n\n # Set up start time\n self.start_frame = start_frame\n f = start_frame\n self.curr_frame = f\n self.video_grabber.set(1, self.trial_start_frame+start_frame)\n\n # Keep looping unless something goes wrong\n while True:\n get_plotting_fps(self)\n\n # Plot\n frame = self.prep_frame()\n self.img.setImage(frame)\n self.plot_pose(f)\n self.plot_tracking_data(f)\n self.progress_line.setData([f, f], [0, 1])\n\n f += 1\n pg.QtGui.QApplication.processEvents()\n\n if not self.ready_to_plot:\n self.curr_frame = f\n break\n\n if self.wait_ms:\n time.sleep(self.wait_ms/1000)\n\n def cleanup_plots(self):\n [p.setData([], []) for p in self.plot_items]\n\n\n ####################################################################################################################\n def pause_playback(self, event):\n if not self.is_paused:\n self.ready_to_plot = False\n self.is_paused = True\n else:\n self.is_paused = False\n self.resume_playback(None)\n\n def resume_playback(self, event):\n self.ready_to_plot = True\n self.update_by_frame(None, start_frame=self.curr_frame)\n\n def decrease_speed(self, event):\n self.wait_ms += 50\n\n def increase_speed(self, event):\n if self.wait_ms > 0:\n self.wait_ms -= 50\n\n def change_frame(self, event):\n self.ready_to_plot = False\n try:\n target_frame = int(self.goto_frame_edit.text())\n self.ready_to_plot = True\n self.update_by_frame(None, target_frame)\n except:\n return\n\n def stop_playback(self, event):\n self.ready_to_plot = False\n self.cleanup_plots()\n\n\nclass ImgsViwer(QtGui.QMainWindow):\n def __init__(self, parent=None):\n super(ImgsViwer, self).__init__(parent)\n\n self.define_layout()\n self.show()\n\n self.images_flds = 'D:\\\\Dropbox (UCL - SWC)\\\\Dropbox (UCL - SWC)\\\\Rotation_vte\\\\data\\\\z_TrialImages'\n self.images = {}\n self.curr_img = None\n\n def define_layout(self):\n # Main window color\n self.setStyleSheet(\"\"\"\n QPushButton {\n color: #ffffff;\n font-size: 18pt;\n background-color: #565656;\n border: 2px solid #8f8f91;\n border-radius: 6px;\n min-width: 250px;\n min-height: 60px;\n }\n QLabel {\n color: #ffffff;\n\n font-size: 16pt;\n\n min-width: 80px;\n min-height: 40px;\n } \"\"\")\n\n self.setAutoFillBackground(True)\n p = self.palette()\n p.setColor(self.backgroundRole(), QColor(40, 40, 40, 255))\n self.setPalette(p)\n\n self.mainbox = QtGui.QWidget()\n self.mainbox.showFullScreen()\n self.setCentralWidget(self.mainbox)\n grid = QtGui.QGridLayout()\n grid.setSpacing(10)\n self.mainbox.setLayout(grid)\n\n self.canvas = pg.GraphicsLayoutWidget()\n self.mainbox.layout().addWidget(self.canvas, 0, 0, 6, 6)\n\n self.view = self.canvas.addViewBox()\n self.view.setAspectLocked(False)\n self.img = pg.ImageItem(border='w')\n self.view.addItem(self.img)\n\n self.curr_img_label = App.create_label(self, 'Current Image', (7, 1))\n self.prev_btn = App.create_btn(self, 'Prev', (8, 1), func=self.prev_img)\n self.next_btn = App.create_btn(self, 'Next', (8, 3), func=self.next_img)\n\n self.setGeometry(3835, 40, 1450, 1400)\n\n def get_images(self, trials):\n if len(self.images):\n self.discard_images()\n\n for tr_name in trials:\n self.images[tr_name] = os.path.join(self.images_flds, tr_name + '.png')\n\n # self.set_img(trials[0])\n\n def discard_images(self):\n self.images = {}\n\n def set_img(self, trial):\n self.curr_img_label.setText(trial)\n img = np.rot90(misc.imread(self.images[trial]), 3)\n self.img.setImage(img)\n self.curr_img = list(self.images.keys()).index(trial)\n\n def next_img(self, event):\n self.curr_img += 1\n if self.curr_img < len(list(self.images.keys())):\n trial = list(self.images.keys())[self.curr_img]\n self.set_img(trial)\n\n def prev_img(self, event):\n self.curr_img -= 1\n if self.curr_img > 0:\n trial = list(self.images.keys())[self.curr_img]\n self.set_img(trial)\n\n\nif __name__ == '__main__':\n app = QtGui.QApplication(sys.argv)\n thisapp = App(None)\n thisapp.show()\n app.exec_()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "Debug/Visualise_tracking.py", "file_name": "Visualise_tracking.py", "file_ext": "py", "file_size_in_byte": 24179, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pyqtgraph.Qt.QtGui.QMainWindow", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pyqtgraph.Qt.QtGui", "line_number": 15, "usage_type": "name"}, {"api_name": "time.time", "line_number": 36, "usage_type": "call"}, {"api_name": "pyqtgraph.Qt.QtGui.QApplication", "line_number": 50, "usage_type": "call"}, {"api_name": "pyqtgraph.Qt.QtGui", "line_number": 50, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pyqtgraph.Qt.QtCore.Qt", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pyqtgraph.Qt.QtCore", "line_number": 56, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 60, "usage_type": "call"}, {"api_name": "pyqtgraph.Qt.QtCore.Qt", "line_number": 61, "usage_type": "attribute"}, {"api_name": "pyqtgraph.Qt.QtCore", "line_number": 61, "usage_type": "name"}, {"api_name": "pyqtgraph.Qt.QtCore.Qt", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pyqtgraph.Qt.QtCore", "line_number": 63, "usage_type": "name"}, {"api_name": "pyqtgraph.Qt.QtCore.Qt", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pyqtgraph.Qt.QtCore", "line_number": 65, "usage_type": "name"}, {"api_name": "pyqtgraph.Qt.QtCore.Qt", "line_number": 69, "usage_type": "attribute"}, {"api_name": "pyqtgraph.Qt.QtCore", "line_number": 69, "usage_type": "name"}, {"api_name": "pyqtgraph.Qt.QtCore.Qt", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pyqtgraph.Qt.QtCore", "line_number": 73, "usage_type": "name"}, {"api_name": "termcolor.colored", "line_number": 77, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 78, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 79, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 80, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 81, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 82, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 83, "usage_type": "call"}, {"api_name": "pyqtgraph.Qt.QtGui.QLabel", "line_number": 163, "usage_type": "call"}, {"api_name": "pyqtgraph.Qt.QtGui", "line_number": 163, "usage_type": "name"}, {"api_name": "pyqtgraph.Qt.QtGui.QWidget", "line_number": 193, "usage_type": "call"}, {"api_name": "pyqtgraph.Qt.QtGui", "line_number": 193, "usage_type": "name"}, {"api_name": "pyqtgraph.Qt.QtGui.QGridLayout", "line_number": 196, "usage_type": "call"}, {"api_name": "pyqtgraph.Qt.QtGui", "line_number": 196, "usage_type": "name"}, {"api_name": "pyqtgraph.GraphicsLayoutWidget", "line_number": 201, "usage_type": "call"}, {"api_name": "pyqtgraph.GraphicsLayoutWidget", "line_number": 204, "usage_type": "call"}, {"api_name": "pyqtgraph.ImageItem", "line_number": 212, "usage_type": "call"}, {"api_name": "pyqtgraph.Qt.QtGui.QLineEdit", "line_number": 232, "usage_type": "call"}, {"api_name": "pyqtgraph.Qt.QtGui", "line_number": 232, "usage_type": "name"}, {"api_name": "pyqtgraph.Qt.QtGui.QPlainTextEdit", "line_number": 235, "usage_type": "call"}, {"api_name": "pyqtgraph.Qt.QtGui", "line_number": 235, "usage_type": "name"}, {"api_name": "pyqtgraph.mkPen", "line_number": 276, "usage_type": "call"}, {"api_name": "pyqtgraph.mkPen", "line_number": 277, "usage_type": "call"}, {"api_name": "pyqtgraph.mkPen", "line_number": 281, "usage_type": "call"}, {"api_name": "pyqtgraph.mkPen", "line_number": 282, "usage_type": "call"}, {"api_name": "pyqtgraph.mkPen", "line_number": 283, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 335, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FRAME_COUNT", "line_number": 336, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FPS", "line_number": 337, "usage_type": "attribute"}, {"api_name": "pyqtgraph.QtGui.QGraphicsRectItem", "line_number": 350, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui", "line_number": 350, "usage_type": "attribute"}, {"api_name": "pyqtgraph.mkPen", "line_number": 351, "usage_type": "call"}, {"api_name": "pyqtgraph.mkBrush", "line_number": 352, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui.QGraphicsRectItem", "line_number": 354, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui", "line_number": 354, "usage_type": "attribute"}, {"api_name": "pyqtgraph.mkPen", "line_number": 355, "usage_type": "call"}, {"api_name": "pyqtgraph.mkBrush", "line_number": 356, "usage_type": "call"}, {"api_name": "pyqtgraph.mkPen", "line_number": 359, "usage_type": "call"}, {"api_name": "numpy.rot90", "line_number": 364, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 396, "usage_type": "call"}, {"api_name": "numpy.subtract", "line_number": 401, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 411, "usage_type": "call"}, {"api_name": "time.time", "line_number": 439, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui.QApplication.processEvents", "line_number": 476, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui", "line_number": 476, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 483, "usage_type": "call"}, {"api_name": "pyqtgraph.Qt.QtGui.QMainWindow", "line_number": 523, "usage_type": "attribute"}, {"api_name": "pyqtgraph.Qt.QtGui", "line_number": 523, "usage_type": "name"}, {"api_name": "pyqtgraph.Qt.QtGui.QWidget", "line_number": 560, "usage_type": "call"}, {"api_name": "pyqtgraph.Qt.QtGui", "line_number": 560, "usage_type": "name"}, {"api_name": "pyqtgraph.Qt.QtGui.QGridLayout", "line_number": 563, "usage_type": "call"}, {"api_name": "pyqtgraph.Qt.QtGui", "line_number": 563, "usage_type": "name"}, {"api_name": "pyqtgraph.GraphicsLayoutWidget", "line_number": 567, "usage_type": "call"}, {"api_name": "pyqtgraph.ImageItem", "line_number": 572, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 586, "usage_type": "call"}, {"api_name": "os.path", "line_number": 586, "usage_type": "attribute"}, {"api_name": "numpy.rot90", "line_number": 595, "usage_type": "call"}, {"api_name": "scipy.misc.imread", "line_number": 595, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 595, "usage_type": "name"}, {"api_name": "pyqtgraph.Qt.QtGui.QApplication", "line_number": 613, "usage_type": "call"}, {"api_name": "pyqtgraph.Qt.QtGui", "line_number": 613, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 613, "usage_type": "attribute"}]} +{"seq_id": "524583076", "text": "import os\nfrom flask import (\n Blueprint,\n request,\n make_response,\n send_from_directory,\n current_app as app,\n)\nfrom marshmallow import ValidationError\nfrom sqlalchemy.exc import SQLAlchemyError\nfrom app.users.schemas import (\n RegisterSchema,\n UserSchema,\n LoginSchema,\n ChangePwdSchema,\n ChangeBioSchema,\n)\nfrom app.users.services import (\n register_user,\n login_user,\n logout_user,\n get_user_by_id,\n change_password,\n change_profile_img,\n change_users_bio,\n)\nfrom app.users.decorators import login_required\nfrom app.exceptions import OperationNotPermitted\nfrom app.translations.utils import t\n\nusers = Blueprint(\"users\", __name__)\n\n\n@users.route(\"/register\", methods=[\"POST\"])\ndef register():\n try:\n data = RegisterSchema().load(request.form)\n avatar = request.files.get(\"profile_img\")\n if avatar and not os.path.splitext(avatar.filename)[1] in [\n \".jpg\",\n \".jpeg\",\n \".png\",\n ]:\n raise ValidationError({\"profile_img\": [t(\"img_format_error\")]})\n jwt, user = register_user(data, avatar)\n return make_response({\"token\": jwt, \"user\": UserSchema().dump(user)}), 201\n except ValidationError as err:\n return make_response(err.messages), 400\n except SQLAlchemyError as err:\n e = str(err)\n print(e)\n return make_response(e), 500\n\n\n@users.route(\"/login\", methods=[\"POST\"])\ndef login():\n try:\n data = LoginSchema().load(request.json)\n jwt, user = login_user(data)\n return {\"token\": jwt, \"user\": UserSchema().dump(user)}\n except ValidationError as err:\n return make_response(err.messages), 400\n except SQLAlchemyError as err:\n e = str(err)\n print(e)\n return make_response(e), 500\n\n\n@users.route(\"/logout\", methods=[\"POST\"])\n@login_required\ndef logout():\n logout_user(request.token)\n return make_response(), 200\n\n\n@users.route(\"//data\", methods=[\"GET\"])\ndef get_user_data(user_id):\n try:\n user = get_user_by_id(user_id)\n res = UserSchema().dump(user)\n return res\n except ValidationError as err:\n return make_response(err.messages), 400\n except SQLAlchemyError as err:\n e = str(err)\n print(e)\n return make_response(e), 500\n\n\n@users.route(\"/avatar/\")\ndef get_podcast_image(filename):\n avatars_dir = os.path.abspath(os.path.join(app.root_path, \"static/avatars\"))\n return send_from_directory(avatars_dir, filename)\n\n\n@users.route(\"/change_pwd\", methods=[\"PATCH\"])\n@login_required\ndef change_pwd():\n try:\n data = ChangePwdSchema().dump(request.json)\n change_password(request.user, data[\"new_pwd\"], data[\"old_pwd\"])\n return make_response()\n except ValidationError as err:\n return make_response(err.messages), 400\n except OperationNotPermitted as err:\n return make_response({\"error\": err.message}), 400\n except SQLAlchemyError as err:\n e = str(err)\n print(e)\n return make_response(e), 500\n\n\n@users.route(\"/change_profile_pic\", methods=[\"PATCH\"])\n@login_required\ndef change_avatar():\n try:\n new_img = request.files.get(\"new_profile_pic\")\n if not new_img:\n raise OperationNotPermitted(t(\"no_img_error\"))\n _, ext = os.path.splitext(new_img.filename)\n if ext not in [\".jpg\", \".jpeg\", \".png\"]:\n raise OperationNotPermitted(t(\"img_format_error\"))\n\n change_profile_img(request.user, new_img)\n\n return make_response()\n except ValidationError as err:\n return make_response(err.messages), 400\n except OperationNotPermitted as err:\n return make_response({\"error\": err.message}), 400\n except SQLAlchemyError as err:\n e = str(err)\n print(e)\n return make_response(e), 500\n\n\n@users.route(\"/edit_bio\", methods=[\"PATCH\"])\n@login_required\ndef change_bio():\n try:\n bio = ChangeBioSchema().dump(request.json)[\"bio\"]\n change_users_bio(request.user, bio)\n return make_response({\"bio\": bio}), 200\n except ValidationError as err:\n return make_response(err.messages), 400\n except OperationNotPermitted as err:\n return make_response({\"error\": err.message}), 400\n except SQLAlchemyError as err:\n e = str(err)\n print(e)\n return make_response(e), 500\n", "sub_path": "app/users/routes.py", "file_name": "routes.py", "file_ext": "py", "file_size_in_byte": 4374, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "flask.Blueprint", "line_number": 31, "usage_type": "call"}, {"api_name": "app.users.schemas.RegisterSchema", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 37, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 37, "usage_type": "name"}, {"api_name": "flask.request.files.get", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 38, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "marshmallow.ValidationError", "line_number": 44, "usage_type": "call"}, {"api_name": "app.translations.utils.t", "line_number": 44, "usage_type": "call"}, {"api_name": "app.users.services.register_user", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 46, "usage_type": "call"}, {"api_name": "app.users.schemas.UserSchema", "line_number": 46, "usage_type": "call"}, {"api_name": "marshmallow.ValidationError", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 48, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 49, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 52, "usage_type": "call"}, {"api_name": "app.users.schemas.LoginSchema", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 58, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 58, "usage_type": "name"}, {"api_name": "app.users.services.login_user", "line_number": 59, "usage_type": "call"}, {"api_name": "app.users.schemas.UserSchema", "line_number": 60, "usage_type": "call"}, {"api_name": "marshmallow.ValidationError", "line_number": 61, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 62, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 63, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 66, "usage_type": "call"}, {"api_name": "app.users.services.logout_user", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.request.token", "line_number": 72, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 72, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 73, "usage_type": "call"}, {"api_name": "app.users.decorators.login_required", "line_number": 70, "usage_type": "name"}, {"api_name": "app.users.services.get_user_by_id", "line_number": 79, "usage_type": "call"}, {"api_name": "app.users.schemas.UserSchema", "line_number": 80, "usage_type": "call"}, {"api_name": "marshmallow.ValidationError", "line_number": 82, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 83, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 84, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.current_app.root_path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 92, "usage_type": "name"}, {"api_name": "flask.send_from_directory", "line_number": 93, "usage_type": "call"}, {"api_name": "app.users.schemas.ChangePwdSchema", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 100, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 100, "usage_type": "name"}, {"api_name": "app.users.services.change_password", "line_number": 101, "usage_type": "call"}, {"api_name": "flask.request.user", "line_number": 101, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 101, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 102, "usage_type": "call"}, {"api_name": "marshmallow.ValidationError", "line_number": 103, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 104, "usage_type": "call"}, {"api_name": "app.exceptions.OperationNotPermitted", "line_number": 105, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 106, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 107, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 110, "usage_type": "call"}, {"api_name": "app.users.decorators.login_required", "line_number": 97, "usage_type": "name"}, {"api_name": "flask.request.files.get", "line_number": 117, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 117, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 117, "usage_type": "name"}, {"api_name": "app.exceptions.OperationNotPermitted", "line_number": 119, "usage_type": "call"}, {"api_name": "app.translations.utils.t", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "app.exceptions.OperationNotPermitted", "line_number": 122, "usage_type": "call"}, {"api_name": "app.translations.utils.t", "line_number": 122, "usage_type": "call"}, {"api_name": "app.users.services.change_profile_img", "line_number": 124, "usage_type": "call"}, {"api_name": "flask.request.user", "line_number": 124, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 124, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 126, "usage_type": "call"}, {"api_name": "marshmallow.ValidationError", "line_number": 127, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 128, "usage_type": "call"}, {"api_name": "app.exceptions.OperationNotPermitted", "line_number": 129, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 130, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 131, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 134, "usage_type": "call"}, {"api_name": "app.users.decorators.login_required", "line_number": 114, "usage_type": "name"}, {"api_name": "app.users.schemas.ChangeBioSchema", "line_number": 141, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 141, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 141, "usage_type": "name"}, {"api_name": "app.users.services.change_users_bio", "line_number": 142, "usage_type": "call"}, {"api_name": "flask.request.user", "line_number": 142, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 142, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 143, "usage_type": "call"}, {"api_name": "marshmallow.ValidationError", "line_number": 144, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 145, "usage_type": "call"}, {"api_name": "app.exceptions.OperationNotPermitted", "line_number": 146, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 147, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 148, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 151, "usage_type": "call"}, {"api_name": "app.users.decorators.login_required", "line_number": 138, "usage_type": "name"}]} +{"seq_id": "53662829", "text": "from requests.exceptions import SSLError\nimport requests\nimport time\nimport os\n\n\nclass Vk:\n\tdef __init__(self, token, sleep_time=0.5, is_offline=False, **config):\n\t\tconfig['access_token'] = get_token(token)\n\t\tself.sleep_time = sleep_time\n\t\tself.is_offline = is_offline\n\t\tself.config = config\n\t\tif is_offline:\n\t\t\t# Dummy method for testing\n\t\t\tself.send_request = lambda *a, **k: {\"items\":[], \"count\":0}\n\n\tdef __call__(self, method, **kwargs):\n\t\tkwargs.update(self.config)\n\t\turl = f\"https://api.vk.com/method/{method}\"\n\t\ttime.sleep(self.sleep_time)\n\t\treturn self.send_request(method, **kwargs)\n\n\tdef send_request(self, method, **kwargs):\n\t\ttry:\n\t\t\tresponse = requests.post(method, kwargs)\n\t\texcept SSLError as e:\n\t\t\traise NO_INTERNET from e\n\t\telse:\n\t\t\treturn get_response_dict(response)\n\n\ndef get_token(token):\n\ttoken = str(token) # To prevent os.path.exists errors\n\tif os.path.exists(token):\n\t\twith open(token) as f:\n\t\t\treturn f.read().strip()\n\telse:\n\t\treturn token.strip()\n\n\ndef get_response_dict(response):\n\tjson_dict = response.json()\n\tif \"response\" in json_dict:\n\t\treturn json_dict['response']\n\telif \"error\" in json_dict:\n\t\traise ResponseError(json_dict['error'])\n\telse:\n\t\treturn json_dict\n\n\nclass LongpollServer:\n\tdef __init__(self, vk_session, version=2, wait_time=30):\n\t\tself.vk = vk_session\n\t\tself.version = version\n\t\tself.wait_time = wait_time\n\t\tself.server, self.key, self.ts = self.get_server_info()\n\t\tself.kwargs = self.make_config()\n\t\tself.url = \"https://\" + self.server\n\n\tdef __call__(self):\n\t\treturn self.send_longpoll_request()\n\n\tdef get_server_info(self):\n\t\tresponse = self.vk(\"messages.getLongPollServer\")\n\t\treturn response['server'], response['key'], response['ts']\n\n\tdef make_config(self):\n\t\treturn {\"act\":\"a_check\", \"key\":self.key, \"ts\":self.ts,\n\t\t\t\t\"wait\":self.wait_time, \"version\":self.version}\n\n\tdef send_longpoll_request(self):\n\t\tresponse = requests.post(self.url, self.kwargs)\n\t\tresponse = get_response_dict(response)\n\t\tself.kwargs['ts'] = response['ts']\n\t\treturn response\n\n\nclass VkError(Exception):\n\tpass\n\n\nclass ResponseError(VkError):\n\tdef __init__(self, error):\n\t\tparams = self.extract_request_params(error)\n\t\tmessage = \"{}\\nParameters: {}\".format(error['error_msg'], params)\n\t\tsuper().__init__(message)\n\t\n\t@staticmethod\n\tdef extract_request_params(error):\n\t\treturn {p['key']:p['value'] for p in error['request_params']}\n\n\n\nNO_INTERNET = VkError(\"No internet connection\")\n", "sub_path": "lib/vk.py", "file_name": "vk.py", "file_ext": "py", "file_size_in_byte": 2402, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "time.sleep", "line_number": 20, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 25, "usage_type": "call"}, {"api_name": "requests.exceptions.SSLError", "line_number": 26, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "515699984", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\n\nimport mock\nfrom oslo_config import cfg\n\nfrom senlin.common import consts\nfrom senlin.common import messaging\nfrom senlin.engine import health_manager\nfrom senlin.objects import cluster as obj_cluster\nfrom senlin.objects import health_registry as hr\nfrom senlin.rpc import client as rpc_client\nfrom senlin.tests.unit.common import base\n\n\n@mock.patch('oslo_messaging.NotificationFilter')\nclass TestNotificationEndpoint(base.SenlinTestCase):\n\n def test_init(self, mock_filter):\n x_filter = mock_filter.return_value\n obj = health_manager.NotificationEndpoint('PROJECT', 'CLUSTER')\n\n mock_filter.assert_called_once_with(\n publisher_id='^compute.*',\n event_type='^compute\\.instance\\..*',\n context={'project_id': '^PROJECT$'})\n self.assertEqual(x_filter, obj.filter_rule)\n\n\n@mock.patch('senlin.engine.health_manager.NotificationEndpoint')\n@mock.patch('oslo_messaging.Target')\n@mock.patch('oslo_messaging.get_notification_transport')\n@mock.patch('oslo_messaging.get_notification_listener')\nclass TestListenerProc(base.SenlinTestCase):\n\n def test_listener_proc(self, mock_listener, mock_transport, mock_target,\n mock_endpoint):\n x_listener = mock.Mock()\n mock_listener.return_value = x_listener\n x_transport = mock.Mock()\n mock_transport.return_value = x_transport\n x_target = mock.Mock()\n mock_target.return_value = x_target\n x_endpoint = mock.Mock()\n mock_endpoint.return_value = x_endpoint\n\n res = health_manager.ListenerProc('EXCHANGE', 'PROJECT_ID',\n 'CLUSTER_ID')\n\n self.assertIsNone(res)\n mock_transport.assert_called_once_with(cfg.CONF)\n mock_target.assert_called_once_with(topic=\"notifications\",\n exchange='EXCHANGE')\n mock_endpoint.assert_called_once_with('PROJECT_ID', 'CLUSTER_ID')\n mock_listener.assert_called_once_with(\n x_transport, [x_target], [x_endpoint], pool=\"senlin-listeners\")\n x_listener.start.assert_called_once_with()\n x_listener.wait.assert_called_once_with()\n\n\nclass TestHealthManager(base.SenlinTestCase):\n\n def setUp(self):\n super(TestHealthManager, self).setUp()\n\n mock_eng = mock.Mock()\n mock_eng.engine_id = 'ENGINE_ID'\n topic = consts.ENGINE_HEALTH_MGR_TOPIC\n version = consts.RPC_API_VERSION\n self.hm = health_manager.HealthManager(mock_eng, topic, version)\n\n def test_init(self):\n self.assertEqual('ENGINE_ID', self.hm.engine_id)\n self.assertIsNotNone(self.hm.TG)\n self.assertIsNotNone(self.hm.rpc_client)\n self.assertEqual(consts.ENGINE_HEALTH_MGR_TOPIC, self.hm.topic)\n self.assertEqual(consts.RPC_API_VERSION, self.hm.version)\n self.assertEqual(0, len(self.hm.rt['registries']))\n\n @mock.patch.object(hr.HealthRegistry, 'claim')\n def test__load_runtime_registry(self, mock_claim):\n mock_claim.return_value = [\n mock.Mock(cluster_id='CID1',\n check_type=consts.NODE_STATUS_POLLING,\n interval=12,\n params={'k1': 'v1'}),\n mock.Mock(cluster_id='CID2',\n check_type=consts.NODE_STATUS_POLLING,\n interval=34,\n params={'k2': 'v2'}),\n mock.Mock(cluster_id='CID3',\n check_type='UNKNOWN_CHECK_TYPE',\n interval=56,\n params={'k3': 'v3'}),\n ]\n\n timer1 = mock.Mock()\n timer2 = mock.Mock()\n mock_add_timer = self.patchobject(self.hm.TG, 'add_timer',\n side_effect=[timer1, timer2])\n\n # do it\n self.hm._load_runtime_registry()\n\n # assertions\n mock_claim.assert_called_once_with(self.hm.ctx, self.hm.engine_id)\n mock_calls = [\n mock.call(12, self.hm._poll_cluster, None, 'CID1'),\n mock.call(34, self.hm._poll_cluster, None, 'CID2')\n ]\n mock_add_timer.assert_has_calls(mock_calls)\n self.assertEqual(2, len(self.hm.registries))\n self.assertEqual(\n {\n 'cluster_id': 'CID1',\n 'check_type': consts.NODE_STATUS_POLLING,\n 'interval': 12,\n 'params': {'k1': 'v1'},\n 'timer': timer1\n },\n self.hm.registries[0])\n self.assertEqual(\n {\n 'cluster_id': 'CID2',\n 'check_type': consts.NODE_STATUS_POLLING,\n 'interval': 34,\n 'params': {'k2': 'v2'},\n 'timer': timer2\n },\n self.hm.registries[1])\n\n @mock.patch.object(rpc_client.EngineClient, 'cluster_check')\n def test__poll_cluster(self, mock_check):\n self.hm._poll_cluster('CLUSTER_ID')\n mock_check.assert_called_once_with(self.hm.ctx, 'CLUSTER_ID')\n\n @mock.patch.object(obj_cluster.Cluster, 'get')\n def test__add_listener(self, mock_get):\n x_listener = mock.Mock()\n mock_add_thread = self.patchobject(self.hm.TG, 'add_thread',\n return_value=x_listener)\n x_cluster = mock.Mock(project='PROJECT_ID')\n mock_get.return_value = x_cluster\n\n # do it\n res = self.hm._add_listener('CLUSTER_ID')\n\n # assertions\n self.assertEqual(x_listener, res)\n mock_get.assert_called_once_with(self.hm.ctx, 'CLUSTER_ID')\n mock_add_thread.assert_called_once_with(health_manager.ListenerProc,\n 'nova', 'PROJECT_ID',\n 'CLUSTER_ID')\n\n @mock.patch.object(obj_cluster.Cluster, 'get')\n def test__add_listener_cluster_not_found(self, mock_get):\n mock_get.return_value = None\n mock_add_thread = self.patchobject(self.hm.TG, 'add_thread')\n\n # do it\n res = self.hm._add_listener('CLUSTER_ID')\n\n # assertions\n self.assertIsNone(res)\n mock_get.assert_called_once_with(self.hm.ctx, 'CLUSTER_ID')\n self.assertEqual(0, mock_add_thread.call_count)\n\n def test__start_check_for_polling(self):\n x_timer = mock.Mock()\n mock_add_timer = self.patchobject(self.hm.TG, 'add_timer',\n return_value=x_timer)\n\n entry = {\n 'cluster_id': 'CCID',\n 'interval': 12,\n 'check_type': consts.NODE_STATUS_POLLING,\n }\n res = self.hm._start_check(entry)\n\n expected = copy.deepcopy(entry)\n expected['timer'] = x_timer\n self.assertEqual(expected, res)\n mock_add_timer.assert_called_once_with(12, self.hm._poll_cluster, None,\n 'CCID')\n\n def test__start_check_for_listening(self):\n x_listener = mock.Mock()\n mock_add_listener = self.patchobject(self.hm, '_add_listener',\n return_value=x_listener)\n\n entry = {\n 'cluster_id': 'CCID',\n 'check_type': consts.VM_LIFECYCLE_EVENTS,\n }\n res = self.hm._start_check(entry)\n\n expected = copy.deepcopy(entry)\n expected['listener'] = x_listener\n self.assertEqual(expected, res)\n mock_add_listener.assert_called_once_with('CCID')\n\n def test__start_check_for_listening_failed(self):\n mock_add_listener = self.patchobject(self.hm, '_add_listener',\n return_value=None)\n\n entry = {\n 'cluster_id': 'CCID',\n 'check_type': consts.VM_LIFECYCLE_EVENTS,\n }\n res = self.hm._start_check(entry)\n\n self.assertIsNone(res)\n mock_add_listener.assert_called_once_with('CCID')\n\n def test__start_check_other_types(self):\n entry = {\n 'cluster_id': 'CCID',\n 'check_type': 'BOGUS TYPE',\n }\n res = self.hm._start_check(entry)\n\n self.assertIsNone(res)\n\n @mock.patch.object(hr.HealthRegistry, 'create')\n def test_register_cluster(self, mock_reg_create):\n ctx = mock.Mock()\n timer = mock.Mock()\n mock_add_tm = self.patchobject(self.hm.TG, 'add_timer',\n return_value=timer)\n mock_poll = self.patchobject(self.hm, '_poll_cluster',\n return_value=mock.Mock())\n x_reg = mock.Mock(cluster_id='CLUSTER_ID',\n check_type=consts.NODE_STATUS_POLLING,\n interval=50, params={})\n mock_reg_create.return_value = x_reg\n\n self.hm.register_cluster(ctx,\n cluster_id='CLUSTER_ID',\n check_type=consts.NODE_STATUS_POLLING,\n interval=50)\n\n mock_reg_create.assert_called_once_with(\n ctx, 'CLUSTER_ID', consts.NODE_STATUS_POLLING, 50, {}, 'ENGINE_ID')\n mock_add_tm.assert_called_with(50, mock_poll, None, 'CLUSTER_ID')\n self.assertEqual(1, len(self.hm.registries))\n\n @mock.patch.object(hr.HealthRegistry, 'delete')\n def test_unregister_cluster(self, mock_reg_delete):\n ctx = mock.Mock()\n timer = mock.Mock()\n registry = {\n 'cluster_id': 'CLUSTER_ID',\n 'check_type': 'NODE_STATUS_POLLING',\n 'interval': 50,\n 'params': {},\n 'timer': timer\n }\n self.hm.rt['registries'] = [registry]\n mock_tm_done = self.patchobject(self.hm.TG, 'timer_done',\n return_value=mock.Mock())\n self.hm.unregister_cluster(ctx, cluster_id='CLUSTER_ID')\n mock_tm_done.assert_called_with(timer)\n self.assertEqual(0, len(self.hm.registries))\n mock_reg_delete.assert_called_once_with(ctx, 'CLUSTER_ID')\n\n @mock.patch('oslo_messaging.Target')\n def test_start(self, mock_target):\n self.hm.TG = mock.Mock()\n target = mock.Mock()\n mock_target.return_value = target\n x_rpc_server = mock.Mock()\n mock_get_rpc = self.patchobject(messaging, 'get_rpc_server',\n return_value=x_rpc_server)\n x_timer = mock.Mock()\n mock_add_timer = self.patchobject(self.hm.TG, 'add_timer',\n return_value=x_timer)\n mock_load = self.patchobject(self.hm, '_load_runtime_registry')\n\n # do it\n self.hm.start()\n\n # assert\n mock_target.assert_called_once_with(server='ENGINE_ID',\n topic='engine-health-mgr',\n version=consts.RPC_API_VERSION)\n mock_get_rpc.assert_called_once_with(target, self.hm)\n x_rpc_server.start.assert_called_once_with()\n mock_add_timer.assert_called_once_with(cfg.CONF.periodic_interval,\n self.hm._dummy_task)\n mock_load.assert_called_once_with()\n", "sub_path": "senlin/tests/unit/engine/test_health_manager.py", "file_name": "test_health_manager.py", "file_ext": "py", "file_size_in_byte": 11704, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "senlin.tests.unit.common.base.SenlinTestCase", "line_number": 28, "usage_type": "attribute"}, {"api_name": "senlin.tests.unit.common.base", "line_number": 28, "usage_type": "name"}, {"api_name": "senlin.engine.health_manager.NotificationEndpoint", "line_number": 32, "usage_type": "call"}, {"api_name": "senlin.engine.health_manager", "line_number": 32, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 27, "usage_type": "call"}, {"api_name": "senlin.tests.unit.common.base.SenlinTestCase", "line_number": 45, "usage_type": "attribute"}, {"api_name": "senlin.tests.unit.common.base", "line_number": 45, "usage_type": "name"}, {"api_name": "mock.Mock", "line_number": 49, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 51, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 53, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 55, "usage_type": "call"}, {"api_name": "senlin.engine.health_manager.ListenerProc", "line_number": 58, "usage_type": "call"}, {"api_name": "senlin.engine.health_manager", "line_number": 58, "usage_type": "name"}, {"api_name": "oslo_config.cfg.CONF", "line_number": 62, "usage_type": "attribute"}, {"api_name": "oslo_config.cfg", "line_number": 62, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 41, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 42, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 43, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 44, "usage_type": "call"}, {"api_name": "senlin.tests.unit.common.base.SenlinTestCase", "line_number": 72, "usage_type": "attribute"}, {"api_name": "senlin.tests.unit.common.base", "line_number": 72, "usage_type": "name"}, {"api_name": "mock.Mock", "line_number": 77, "usage_type": "call"}, {"api_name": "senlin.common.consts.ENGINE_HEALTH_MGR_TOPIC", "line_number": 79, "usage_type": "attribute"}, {"api_name": "senlin.common.consts", "line_number": 79, "usage_type": "name"}, {"api_name": "senlin.common.consts.RPC_API_VERSION", "line_number": 80, "usage_type": "attribute"}, {"api_name": "senlin.common.consts", "line_number": 80, "usage_type": "name"}, {"api_name": "senlin.engine.health_manager.HealthManager", "line_number": 81, "usage_type": "call"}, {"api_name": "senlin.engine.health_manager", "line_number": 81, "usage_type": "name"}, {"api_name": "senlin.common.consts.ENGINE_HEALTH_MGR_TOPIC", "line_number": 87, "usage_type": "attribute"}, {"api_name": "senlin.common.consts", "line_number": 87, "usage_type": "name"}, {"api_name": "senlin.common.consts.RPC_API_VERSION", "line_number": 88, "usage_type": "attribute"}, {"api_name": "senlin.common.consts", "line_number": 88, "usage_type": "name"}, {"api_name": "mock.Mock", "line_number": 94, "usage_type": "call"}, {"api_name": "senlin.common.consts.NODE_STATUS_POLLING", "line_number": 95, "usage_type": "attribute"}, {"api_name": "senlin.common.consts", "line_number": 95, "usage_type": "name"}, {"api_name": "mock.Mock", "line_number": 98, "usage_type": "call"}, {"api_name": "senlin.common.consts.NODE_STATUS_POLLING", "line_number": 99, "usage_type": "attribute"}, {"api_name": "senlin.common.consts", "line_number": 99, "usage_type": "name"}, {"api_name": "mock.Mock", "line_number": 102, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 108, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 109, "usage_type": "call"}, {"api_name": "mock.call", "line_number": 119, "usage_type": "call"}, {"api_name": "mock.call", "line_number": 120, "usage_type": "call"}, {"api_name": "senlin.common.consts.NODE_STATUS_POLLING", "line_number": 127, "usage_type": "attribute"}, {"api_name": "senlin.common.consts", "line_number": 127, "usage_type": "name"}, {"api_name": "senlin.common.consts.NODE_STATUS_POLLING", "line_number": 136, "usage_type": "attribute"}, {"api_name": "senlin.common.consts", "line_number": 136, "usage_type": "name"}, {"api_name": "mock.patch.object", "line_number": 91, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 91, "usage_type": "attribute"}, {"api_name": "senlin.objects.health_registry.HealthRegistry", "line_number": 91, "usage_type": "attribute"}, {"api_name": "senlin.objects.health_registry", "line_number": 91, "usage_type": "name"}, {"api_name": "mock.patch.object", "line_number": 143, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 143, "usage_type": "attribute"}, {"api_name": "senlin.rpc.client.EngineClient", "line_number": 143, "usage_type": "attribute"}, {"api_name": "senlin.rpc.client", "line_number": 143, "usage_type": "name"}, {"api_name": "mock.Mock", "line_number": 150, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 153, "usage_type": "call"}, {"api_name": "senlin.engine.health_manager.ListenerProc", "line_number": 162, "usage_type": "attribute"}, {"api_name": "senlin.engine.health_manager", "line_number": 162, "usage_type": "name"}, {"api_name": "mock.patch.object", "line_number": 148, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 148, "usage_type": "attribute"}, {"api_name": "senlin.objects.cluster.Cluster", "line_number": 148, "usage_type": "attribute"}, {"api_name": "senlin.objects.cluster", "line_number": 148, "usage_type": "name"}, {"api_name": "mock.patch.object", "line_number": 166, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 166, "usage_type": "attribute"}, {"api_name": "senlin.objects.cluster.Cluster", "line_number": 166, "usage_type": "attribute"}, {"api_name": "senlin.objects.cluster", "line_number": 166, "usage_type": "name"}, {"api_name": "mock.Mock", "line_number": 180, "usage_type": "call"}, {"api_name": "senlin.common.consts.NODE_STATUS_POLLING", "line_number": 187, "usage_type": "attribute"}, {"api_name": "senlin.common.consts", "line_number": 187, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 191, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 198, "usage_type": "call"}, {"api_name": "senlin.common.consts.VM_LIFECYCLE_EVENTS", "line_number": 204, "usage_type": "attribute"}, {"api_name": "senlin.common.consts", "line_number": 204, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 208, "usage_type": "call"}, {"api_name": "senlin.common.consts.VM_LIFECYCLE_EVENTS", "line_number": 219, "usage_type": "attribute"}, {"api_name": "senlin.common.consts", "line_number": 219, "usage_type": "name"}, {"api_name": "mock.Mock", "line_number": 237, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 238, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 242, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 243, "usage_type": "call"}, {"api_name": "senlin.common.consts.NODE_STATUS_POLLING", "line_number": 244, "usage_type": "attribute"}, {"api_name": "senlin.common.consts", "line_number": 244, "usage_type": "name"}, {"api_name": "senlin.common.consts.NODE_STATUS_POLLING", "line_number": 250, "usage_type": "attribute"}, {"api_name": "senlin.common.consts", "line_number": 250, "usage_type": "name"}, {"api_name": "senlin.common.consts.NODE_STATUS_POLLING", "line_number": 254, "usage_type": "attribute"}, {"api_name": "senlin.common.consts", "line_number": 254, "usage_type": "name"}, {"api_name": "mock.patch.object", "line_number": 235, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 235, "usage_type": "attribute"}, {"api_name": "senlin.objects.health_registry.HealthRegistry", "line_number": 235, "usage_type": "attribute"}, {"api_name": "senlin.objects.health_registry", "line_number": 235, "usage_type": "name"}, {"api_name": "mock.Mock", "line_number": 260, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 261, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 271, "usage_type": "call"}, {"api_name": "mock.patch.object", "line_number": 258, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 258, "usage_type": "attribute"}, {"api_name": "senlin.objects.health_registry.HealthRegistry", "line_number": 258, "usage_type": "attribute"}, {"api_name": "senlin.objects.health_registry", "line_number": 258, "usage_type": "name"}, {"api_name": "mock.Mock", "line_number": 279, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 280, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 282, "usage_type": "call"}, {"api_name": "senlin.common.messaging", "line_number": 283, "usage_type": "argument"}, {"api_name": "mock.Mock", "line_number": 285, "usage_type": "call"}, {"api_name": "senlin.common.consts.RPC_API_VERSION", "line_number": 296, "usage_type": "attribute"}, {"api_name": "senlin.common.consts", "line_number": 296, "usage_type": "name"}, {"api_name": "oslo_config.cfg.CONF", "line_number": 299, "usage_type": "attribute"}, {"api_name": "oslo_config.cfg", "line_number": 299, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 277, "usage_type": "call"}]} +{"seq_id": "340130332", "text": "# -*- coding: utf-8 -*-\n\nfrom flask import Flask, render_template, redirect\nfrom flask_flatpages import FlatPages\nfrom flask_frozen import Freezer\nfrom prepare import prepare\n\n# -----------------------------------------------------------------------------\n\napp = Flask(__name__)\napp.config.from_pyfile('settings.py')\npages = FlatPages(app)\nfreezer = Freezer(app)\nfamily_book, parent_map = prepare()\n\n# -----------------------------------------------------------------------------\n\ndef get_spec_posts(_type, lang, num = 5):\n posts = [page for page in pages if page.path.startswith(lang) \\\n and 'date' in page.meta \\\n and 'type' in page.meta and page.meta['type'] == _type]\n sorted_posts = sorted(posts, reverse=True,\n key=lambda page: page.meta['date'])[:num]\n return sorted_posts\n\n@app.route('/')\ndef home():\n return redirect('/cn/')\n\n@app.route('//')\ndef home_lang(lang):\n return render_template('index.html', lang=lang,\n event_pages = get_spec_posts('event', lang),\n practice_pages = get_spec_posts('practice', lang),\n law_pages = get_spec_posts('law', lang))\n\n@app.route('//')\ndef page(path):\n lang = path.split('/')[0]\n page = pages.get_or_404(path)\n family = None\n try:\n parent_idx = parent_map[lang]['/'+page.path]\n family = family_book[lang][parent_idx]\n except KeyError:\n if 'type' in page.meta:\n list_path = '%s/list-%s' % (lang, page.meta['type'])\n list_page = pages.get(list_path)\n if list_page:\n family = [list_page.meta['title'], '/'+list_page.path]\n if family is None:\n family = [page.meta['title'], '/'+page.path]\n try:\n head_image = page.meta['head_image']\n except KeyError:\n head_image = None\n try:\n list_type = page.meta['list']\n posts = get_spec_posts(list_type, lang, 1000)\n except KeyError:\n posts = None\n return render_template('page.html', page=page,\n family=family, head_image=head_image, posts=posts, lang=lang)\n\n@app.route('/sitemap/')\ndef site_map():\n urls = ['/cn', '/jp']\n return render_template('sitemap.html', pages=pages, urls=urls)\n\n# -----------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n app.run()\n\n# -----------------------------------------------------------------------------", "sub_path": "sites/denchi.cn/web/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2489, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}, {"api_name": "flask_flatpages.FlatPages", "line_number": 12, "usage_type": "call"}, {"api_name": "flask_frozen.Freezer", "line_number": 13, "usage_type": "call"}, {"api_name": "prepare.prepare", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "436871438", "text": "import time\r\nimport pandas as pd\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.by import By\r\nfrom datetime import datetime\r\nimport shutil\r\nimport os\r\n\r\nprint(datetime.today().strftime(\"%Y%m%d\"))\r\n\r\n# Today's date\r\ndate_label = time.strftime('%Y.%m.%d')\r\ntry:\r\n #local test Directory\r\n work_dir = 'C:\\\\Users\\\\User\\\\Desktop\\\\0047Automate_Script\\\\DLpack_Wayfair\\\\'\r\n Download_dir = 'C:\\\\Users\\\\User\\\\Desktop\\\\0047Automate_Script\\\\DLpack_Wayfair\\\\temp_DL_file\\\\'\r\n final_Inv_dir = 'C:\\\\Users\\\\User\\\\Desktop\\\\0047Automate_Script\\\\DLpack_Wayfair\\\\'\r\n CAN_final_Inv_dir = 'C:\\\\Users\\\\User\\\\Desktop\\\\0047Automate_Script\\\\DLpack_Wayfair\\\\'\r\n driver_path = 'C:\\\\Users\\\\User\\\\Anaconda3\\\\chrome\\\\chromedriver.exe'\r\n os.chdir(work_dir)\r\nexcept:\r\n # 0047Directory\r\n work_dir = 'C:\\\\Users\\\\raymond.hung\\\\Documents\\\\Automate_Script\\\\DLpack_Wayfair\\\\'\r\n Download_dir = 'N:\\\\E Commerce\\\\Public Share\\\\Dot Com - Wayfair\\\\WF Catalog\\\\temp_DL_file\\\\'\r\n final_Inv_dir = 'N:\\\\E Commerce\\\\Public Share\\\\Dot Com - Wayfair\\\\WF Catalog\\\\WF Catalog\\\\'\r\n CAN_final_Inv_dir = 'N:\\\\E Commerce\\\\Public Share\\\\Dot Com - Wayfair\\\\WF Catalog\\\\WF CAN Catalog\\\\'\r\n driver_path = 'C:\\\\Users\\\\raymond.hung\\\\chrome\\\\chromedriver.exe'\r\n os.chdir(work_dir)\r\n\r\n# Account and Password\r\nlogin_info = pd.read_csv(work_dir+ 'Account & Password.csv',index_col=0)\r\nusername = login_info.loc['Account', 'CONTENT']\r\npassword = login_info.loc['Password', 'CONTENT']\r\n\r\n# Chrome driver setting\r\noptions = webdriver.ChromeOptions()\r\noptions.add_argument('--start-maximized')\r\nprefs = {'profile.default_content_settings.popups': '0', 'download.default_directory' : Download_dir}\r\noptions.add_experimental_option('prefs', prefs)\r\ndriver = webdriver.Chrome(driver_path,chrome_options=options)\r\n\r\n# Wayfair supplier website\r\nWF_Extrant = 'https://partners.wayfair.com'\r\ndriver.get(WF_Extrant)\r\n\r\nLoadingChecker = (By.XPATH, '//*[@id=\"login\"]/button')\r\nWebDriverWait(driver, 120).until(EC.presence_of_element_located(LoadingChecker))\r\n\r\n# Input username and password and login\r\ndriver.find_element_by_id('js-username').send_keys(username)\r\ndriver.find_element_by_id('password_field').send_keys(password)\r\ndriver.find_element_by_xpath('//*[@id=\"login\"]/button').click()\r\ntime.sleep(30)\r\n\r\n# Skip Wayfair system info.\r\ntry:\r\n iframe = driver.find_element_by_css_selector('body > div.appcues > appcues-container > iframe')\r\n driver.switch_to_frame(iframe)\r\n\r\n wfe_modal = 'body > appcues > div.appcues-skip > a'\r\n LoadingChecker = (By.CSS_SELECTOR, wfe_modal)\r\n WebDriverWait(driver, 30).until(EC.presence_of_element_located(LoadingChecker))\r\n driver.find_element_by_css_selector(wfe_modal).click()\r\nexcept:\r\n pass\r\ndriver.switch_to_default_content()\r\n\r\ncss_body = 'body'\r\nl = ['US', 'CAN']\r\nfor country in l:\r\n # Click select box to choose US or CAN\r\n if driver.find_element_by_css_selector(css_body).get_attribute('class') == ' extranet ':\r\n LOC = {'US':'Topline Furniture Warehouse Corp.', 'CAN':'CAN_Topline Furniture Warehouse Corp.'}\r\n css_common='body > div.wrapper > div:nth-child(1) > header > div > div > div.PH-Header > div > div.ex-Grid-item.ex-Grid-item--flex.u-flexShrink.ex-Grid-item--column.u-justifyEnd > div > div.PH-Header-information > div'\r\n LoadingChecker = (By.CSS_SELECTOR, css_common+' > span.ex-Box.ex-Block.ex-Block--display-flex.ex-Block--isFlex.ex-Block--flexWrap-wrap.ex-Block--alignItems-center.ex-Block--display-flex.ex-Box--ml-small')\r\n WebDriverWait(driver, 30).until(EC.presence_of_element_located(LoadingChecker))\r\n driver.find_element_by_css_selector(css_common+' > span.ex-Box.ex-Block.ex-Block--display-flex.ex-Block--isFlex.ex-Block--flexWrap-wrap.ex-Block--alignItems-center.ex-Block--display-flex.ex-Box--ml-small').click()\r\n for i in range(1, 3):\r\n try:\r\n box_text = driver.find_element_by_css_selector(css_common+'> span.PH-HeaderDropdown-value > ul > li:nth-child('+str(i)+') > button').text\r\n if box_text == LOC[country]:\r\n driver.find_element_by_css_selector(css_common+'> span.PH-HeaderDropdown-value > ul > li:nth-child('+str(i)+') > button').click()\r\n time.sleep(30)\r\n break\r\n except:\r\n print(\"switch LOC fail\")\r\n driver.refresh()\r\n time.sleep(30)\r\n\r\n elif driver.find_element_by_css_selector(css_body).get_attribute('class') == ' extranet body_new_layout':\r\n LOC ={'US':'Topline', 'CAN':'CAN_Topline Furniture'}\r\n css_select_box = 'body > div.wrapper.wrapper_new_layout > div > div > div.BaseBox-ofhg3j-0.dfeQgM > div.BaseBox-ofhg3j-0.bSmOIL > div > div > div.BaseBox-ofhg3j-0.gknKYY > div:nth-child(1) > div > button > div.BaseBox-ofhg3j-0.dBiCLz'\r\n LoadingChecker = (By.CSS_SELECTOR, css_select_box)\r\n WebDriverWait(driver, 30).until(EC.presence_of_element_located(LoadingChecker))\r\n driver.find_element_by_css_selector(css_select_box).click()\r\n for i in range(1, 3):\r\n try:\r\n css_box_text = 'body > div.wrapper.wrapper_new_layout > div > div > div.BaseBox-ofhg3j-0.dfeQgM > div.BaseBox-ofhg3j-0.bSmOIL > div > div > div.BaseBox-ofhg3j-0.gknKYY > div:nth-child(1) > div.react-tiny-popover-container > div > ul > li:nth-child('+str(i)+') > button > div'\r\n box_text = driver.find_element_by_css_selector(css_box_text).text\r\n if box_text == LOC[country]:\r\n driver.find_element_by_css_selector(css_box_text).click()\r\n time.sleep(30)\r\n break\r\n except:\r\n print(\"switch LOC fail\")\r\n driver.refresh()\r\n time.sleep(30)\r\n \r\n for i in range(3):\r\n # Turn to Catalog page\r\n for j in range(3):\r\n Catalog_Download = 'https://partners.wayfair.com//v/catalog/catalog_management/index'\r\n driver.get(Catalog_Download)\r\n time.sleep(30)\r\n \r\n # Skip Wayfair system info.\r\n try:\r\n iframe = driver.find_element_by_css_selector('body > div.appcues > appcues-container > iframe')\r\n driver.switch_to_frame(iframe)\r\n\r\n wfe_modal = 'body > appcues > div.appcues-skip > a'\r\n LoadingChecker = (By.CSS_SELECTOR, wfe_modal)\r\n WebDriverWait(driver, 30).until(EC.presence_of_element_located(LoadingChecker))\r\n driver.find_element_by_css_selector(wfe_modal).click()\r\n except:\r\n pass\r\n driver.switch_to_default_content()\r\n\r\n ProductManagementDashborad = '#global-layout-wrapper-content > div > div > div > header > div.BaseBox-sc-16uwbyc-0.hGhZcv > div > h1 > div > div.ex-Grid-item.ex-Grid-item--flex.ex-Grid-item--row'\r\n if driver.find_element_by_css_selector(ProductManagementDashborad).text == \"Product Management Dashboard\":\r\n break\r\n else:\r\n print(\"can't turn to catalog page\")\r\n\r\n # Click dropdown menus and download excel file\r\n for j in range(3):\r\n try:\r\n LoadingChecker = (By.CSS_SELECTOR, '#global-layout-wrapper-content > div > div > div > main > div > div.ex-Grid-item.u-size3of12.ex-Grid-item--row > div > div > div > div:nth-child(6) > button')\r\n WebDriverWait(driver, 30).until(EC.presence_of_element_located(LoadingChecker))\r\n driver.find_element_by_css_selector('#global-layout-wrapper-content > div > div > div > main > div > div.ex-Grid-item.u-size3of12.ex-Grid-item--row > div > div > div > div:nth-child(6) > button').click()\r\n time.sleep(5)\r\n break\r\n except:\r\n print(\"can't dropdown menus and download\") \r\n driver.refresh()\r\n time.sleep(60)\r\n \r\n # Turn to download management center page\r\n for j in range(3):\r\n download_page = 'https://partners.wayfair.com/v/supplier/download_center/management/app'\r\n driver.get(download_page)\r\n time.sleep(30)\r\n \r\n #Skip Wayfair system info.\r\n try:\r\n iframe = driver.find_element_by_css_selector('body > div.appcues > appcues-container > iframe')\r\n driver.switch_to_frame(iframe)\r\n \r\n wfe_modal = 'body > appcues > div.appcues-skip > a'\r\n LoadingChecker = (By.CSS_SELECTOR, wfe_modal)\r\n WebDriverWait(driver, 30).until(EC.presence_of_element_located(LoadingChecker))\r\n driver.find_element_by_css_selector(wfe_modal).click()\r\n except:\r\n pass\r\n driver.switch_to_default_content()\r\n\r\n # Confrom Download Center Status\r\n if driver.find_element_by_css_selector(css_body).get_attribute('class') == ' extranet ':\r\n DownloadManagementCenter = 'body > div.wrapper > div.body.wfe_content_wrap.js-wfe-content-wrap > div > div > div > div > div > div > h1'\r\n if driver.find_element_by_css_selector(DownloadManagementCenter).text == \"Download Management Center\":\r\n break\r\n else:\r\n print(\"can't turn to download management center page\")\r\n elif driver.find_element_by_css_selector(css_body).get_attribute('class') == ' extranet body_new_layout':\r\n DownloadManagementCenter = '#app > div > div > div > h1'\r\n if driver.find_element_by_css_selector(DownloadManagementCenter).text == \"Download Management Center\":\r\n break\r\n else:\r\n print(\"can't turn to download management center page\")\r\n \r\n #Skip Wayfair system info.\r\n try:\r\n iframe = driver.find_element_by_css_selector('body > div.appcues > appcues-container > iframe')\r\n driver.switch_to_frame(iframe)\r\n \r\n wfe_modal = 'body > appcues > div.appcues-skip > a'\r\n LoadingChecker = (By.CSS_SELECTOR, wfe_modal)\r\n WebDriverWait(driver, 30).until(EC.presence_of_element_located(LoadingChecker))\r\n driver.find_element_by_css_selector(wfe_modal).click()\r\n except:\r\n pass\r\n driver.switch_to_default_content()\r\n\r\n # Swith Window Handle\r\n window_after = driver.window_handles[-1]\r\n driver.switch_to_window(window_after)\r\n \r\n # Sorting by Created Date and download file\r\n for j in range(3):\r\n # Sorting by Created Date\r\n try:\r\n# LoadingChecker = (By.CSS_SELECTOR, '.js-autogen-column:nth-child(4) .sorting')\r\n# WebDriverWait(driver, 60).until(EC.presence_of_element_located(LoadingChecker))\r\n# driver.find_element_by_css_selector('.js-autogen-column:nth-child(4) .sorting').click()\r\n# time.sleep(20)\r\n \r\n # Confirm Download Button status\r\n if driver.find_element_by_css_selector('tbody .table_row .table_data_cell:nth-child(5)').text == 'Complete' and \\\r\n driver.find_element_by_css_selector('tbody .table_row .table_data_cell:nth-child(2)').text == 'Catalog Export':\r\n # Confirm Sorting Date status\r\n time1 = driver.find_element_by_css_selector('tbody > tr:nth-child(1) > td:nth-child(4)').text\r\n time2 = driver.find_element_by_css_selector('tbody > tr:nth-child(2) > td:nth-child(4)').text\r\n if datetime(int(time1[6:10]), int(time1[0:2]), int(time1[3:5]), int(time1[11:13]), int(time1[14:16]), int(time1[17:19])) > \\\r\n datetime(int(time2[6:10]), int(time2[0:2]), int(time2[3:5]), int(time2[11:13]), int(time2[14:16]), int(time2[17:19])):\r\n driver.find_elements_by_css_selector('.js-document-download')[0].click()\r\n time.sleep(30)\r\n break\r\n else:\r\n print(\"sorting by created date not ready\")\r\n driver.refresh()\r\n time.sleep(30)\r\n else:\r\n print(\"download Button not ready\")\r\n driver.refresh()\r\n time.sleep(60)\r\n except:\r\n print(\"can't sorting by created date\")\r\n driver.refresh()\r\n time.sleep(30)\r\n \r\n #Skip Wayfair system info.\r\n try:\r\n iframe = driver.find_element_by_css_selector('body > div.appcues > appcues-container > iframe')\r\n driver.switch_to_frame(iframe)\r\n \r\n wfe_modal = 'body > appcues > div.appcues-skip > a'\r\n LoadingChecker = (By.CSS_SELECTOR, wfe_modal)\r\n WebDriverWait(driver, 30).until(EC.presence_of_element_located(LoadingChecker))\r\n driver.find_element_by_css_selector(wfe_modal).click()\r\n except:\r\n pass\r\n driver.switch_to_default_content()\r\n \r\n # Check download action\r\n try:\r\n ori_file = [Inv_name for Inv_name in os.listdir(Download_dir) if '.csv' in Inv_name][0]\r\n if country == \"US\" and \"Catalog_Export_Topline\" in ori_file:\r\n print(\"US file download OK\")\r\n break\r\n elif country == \"CAN\" and \"Catalog_Export_41910\" in ori_file:\r\n print(\"CAN file download OK\")\r\n break\r\n else:\r\n os.remove(Download_dir+ori_file)\r\n print(\"file not correct\")\r\n driver.refresh()\r\n time.sleep(30)\r\n except:\r\n print(\"file download fail\")\r\n driver.refresh()\r\n time.sleep(30)\r\n \r\n #Skip Wayfair system info.\r\n try:\r\n iframe = driver.find_element_by_css_selector('body > div.appcues > appcues-container > iframe')\r\n driver.switch_to_frame(iframe)\r\n \r\n wfe_modal = 'body > appcues > div.appcues-skip > a'\r\n LoadingChecker = (By.CSS_SELECTOR, wfe_modal)\r\n WebDriverWait(driver, 30).until(EC.presence_of_element_located(LoadingChecker))\r\n driver.find_element_by_css_selector(wfe_modal).click()\r\n except:\r\n pass\r\n driver.switch_to_default_content()\r\n \r\n # Set File name\r\n US_total_name = date_label + ' 15379_full_catalog_export.csv'#US\r\n CAN_total_name = date_label + ' 41910_full_catalog_export.csv'#CAN \r\n \r\n if country == \"US\":\r\n shutil.move(Download_dir + ori_file, final_Inv_dir + US_total_name)\r\n print(\"US catalog export file is ready\")\r\n elif country == \"CAN\":\r\n shutil.move(Download_dir + ori_file, CAN_final_Inv_dir + CAN_total_name)\r\n print(\"CAN catalog export file is ready\")\r\n\r\ndriver.quit()\r\n", "sub_path": "DL_WF_catalog.py", "file_name": "DL_WF_catalog.py", "file_ext": "py", "file_size_in_byte": 15211, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "datetime.datetime.today", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 11, "usage_type": "name"}, {"api_name": "time.strftime", "line_number": 14, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 22, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 33, "usage_type": "call"}, {"api_name": "selenium.webdriver.ChromeOptions", "line_number": 38, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 38, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 42, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 42, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 48, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 48, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 49, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 49, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 49, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 55, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 63, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 63, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 64, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 64, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 64, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 77, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 77, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 78, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 78, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 78, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 85, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 90, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 95, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 95, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 96, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 96, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 96, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 104, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 109, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 116, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 124, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 124, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 125, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 125, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 125, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 140, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 140, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 141, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 141, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 141, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 143, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 148, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 154, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 162, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 162, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 163, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 163, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 163, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 189, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 189, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 190, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 190, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 190, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 215, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 216, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 218, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 223, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 227, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 231, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 239, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 239, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 240, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 240, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 240, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 248, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 256, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 259, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 263, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 271, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 271, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 272, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 272, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 272, "usage_type": "name"}, {"api_name": "shutil.move", "line_number": 283, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 286, "usage_type": "call"}]} +{"seq_id": "535173655", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom nose.tools import assert_equal, assert_not_equal, assert_raises, assert_true\n\n\ndef abstract_parse_test():\n from logic.parsers.parser_abstract import abstract\n \n user = {'tzone':'EST'}\n timestamp = 1472262973\n url = \"http://www.wired.com/2016/08/white-house-proposes-new-immigration-rule-entrepreneurs/\"\n c = abstract(url, user, timestamp)\n \n assert_equal(c['frmt'], 'article')\n assert_equal(c['dow'], 5)\n \n assert_equal(c['size'], 4843)", "sub_path": "logic/tests/parse_test.py", "file_name": "parse_test.py", "file_ext": "py", "file_size_in_byte": 520, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "logic.parsers.parser_abstract.abstract", "line_number": 12, "usage_type": "call"}, {"api_name": "nose.tools.assert_equal", "line_number": 14, "usage_type": "call"}, {"api_name": "nose.tools.assert_equal", "line_number": 15, "usage_type": "call"}, {"api_name": "nose.tools.assert_equal", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "221706977", "text": "# _*_ coding:utf-8 -*-\n# 开发人员:&杜乾坤\n# 开发工具:&pycharm\nimport json, requests\nfrom lxml import etree\n\nclass KuDog(object):\n def __init__(self):\n self.base_url = 'https://www.kugou.com/yy/singer/index/%s-%s-1.html'\n self.headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'\n }\n self.parse()\n\n # ---------------通过url获取该页面的内容,返回xpath对象\n def get_xpath(self, url, headers):\n try:\n response = requests.get(url, headers=headers)\n return etree.HTML(response.text)\n except Exception:\n print(url, '该页面没有相应!')\n return ''\n\n # --------------------通过歌手详情页获取歌手简介\n def parse_info(self, url):\n html = self.get_xpath(url, self.headers)\n info = html.xpath('//div[@class=\"intro\"]/p/text()')\n return info[0]\n\n # --------------------------写入方法\n def write_json(self, value):\n with open('kugou.json', 'a+', encoding='utf-8') as file:\n file.write('\\n')\n json.dump(value,file,ensure_ascii=False) #添加ensure_ascii=False让json文件可以写入中文\n\n # -----------------------------用ASCII码值来变换abcd...\n def parse(self):\n for j in range(97, 124):\n # 小写字母为97-122,当等于123的时候我们按歌手名单的其他算,路由为null\n if j < 123:\n p = chr(j)\n else:\n p = \"null\"\n for i in range(1, 6):\n response = requests.get(self.base_url % (i, p), headers=self.headers)\n # print(response.text)\n html = etree.HTML(response.text)\n # 由于数据分两个url,所以需要加起来数据列表\n name_list1 = html.xpath('//ul[@id=\"list_head\"]/li/strong/a/text()')\n sing_list1 = html.xpath('//ul[@id=\"list_head\"]/li/strong/a/@href')\n name_list2 = html.xpath('//div[@id=\"list1\"]/ul/li/a/text()')\n sing_list2 = html.xpath('//div[@id=\"list1\"]/ul/li/a/@href')\n singer_name_list = name_list1 + name_list2\n singer_sing_list = sing_list1 + sing_list2\n # print(singer_name_list,singer_sing_list)\n for i, name in enumerate(singer_name_list):\n item = {}\n item['名字'] = name\n item['歌单'] = singer_sing_list[i]\n # item['歌手信息']=parse_info(singer_sing_list[i])#被封了\n print(item)\n self.write_json(item)\n\nmusic = KuDog()\n", "sub_path": "爬虫编程/案例13_爬取酷狗音乐的歌手和歌单.py", "file_name": "案例13_爬取酷狗音乐的歌手和歌单.py", "file_ext": "py", "file_size_in_byte": 2762, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "requests.get", "line_number": 18, "usage_type": "call"}, {"api_name": "lxml.etree.HTML", "line_number": 19, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 19, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 34, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 45, "usage_type": "call"}, {"api_name": "lxml.etree.HTML", "line_number": 47, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "525206841", "text": "import pandas as pd\r\nimport numpy as np\r\nimport datetime\r\nimport sqlalchemy\r\nfrom geopy import distance\r\nfrom sqlalchemy.orm import sessionmaker\r\nfrom sqlalchemy.sql import select\r\nfrom BOM_Weather_Data.api.database import connectToDatabase\r\nfrom BOM_Weather_Data.api.database_models import Station\r\nfrom BOM_Weather_Data.api.database_models import Rainfall\r\nfrom BOM_Weather_Data.api.database_models import Solar\r\nfrom BOM_Weather_Data.api.database_models import Temperature\r\nfrom BOM_Weather_Data.api.database_models import Location\r\n\r\n\r\ndef getData(session, site, type, before_date=None, after_date=None):\r\n return\r\n\r\n\r\ndef getSitesNear(session, Location, range=25, current=False):\r\n \"\"\"Get all sites within `range` km from (`Lat`, `Lon`).\r\n\r\n Generates list of Sites geographically placed within the desired region.\r\n e.g.::\r\n # Find all sites near Brisbane\r\n results = getSitesNear(session, -27.4698, 153.0251)\r\n\r\n `session`: sqlAlchemy Session\r\n\r\n `Lat`: Latitude, (-90, 90)\r\n\r\n `Lon`: Longitutde, (-180, 180)\r\n\r\n `range`: The range or radius from (`Lat`, `Lon`) \\\r\n to search for Sites in kilometers\r\n\r\n `current`: Filter for only current stations \\\r\n (Stations which have not ceased operation).\r\n default: current=True\r\n \"\"\"\r\n results = StationQueryResult(session)\r\n s = select([Station.Site, Station.Lat, Station.Lon])\r\n for row in session.execute(s):\r\n if Location.withinRange(Location(row[1], row[2]), range):\r\n results.append(row[0])\r\n return results\r\n\r\n\r\ndef getCurrentSites(session, asOf=None):\r\n \"\"\"Gets all current Stations (Stations which have not ceased operation).\r\n\r\n `session`: sqlAlchemy Session\r\n\r\n `asOf`: datetime.datetime of when to check if the Site was current\r\n default: None, use today's date\r\n \"\"\"\r\n results = StationQueryResult(session)\r\n # TODO: Change to get current date and not this default\r\n # TODO: Accept the usage of asOf parameter\r\n s = select([Station.Site]).where(Station.End_date == \"2019-07-01\")\r\n for row in session.execute(s):\r\n results.append(row[0])\r\n return results\r\n\r\n\r\ndef isSiteCurrent(session, asOf=None):\r\n \"\"\"Checks if site/s are current (Stations which have not ceased operation).\r\n\r\n Output attempts to follow a similar format to the datatype of `site`\r\n\r\n `session`: sqlAlchemy Session\r\n\r\n `asOf`: datetime.datetime of when to check if the Site was current\r\n default: None, use today's date\r\n\r\n `site`: input can be type \"integer\", \"dataframe\", \"dict\", \"list\"\r\n integer: output=boolean\r\n dataframe: output appends a column `column_Title`\r\n dict: updates dict such that site[siteID] = isCurrent\r\n list: outputs list of tuples [ (siteID, isCurrent), ...]\r\n \"\"\"\r\n\r\n def isDateEqual(date1, date2):\r\n \"\"\" Checks if two dates are equal. Accepts String or datetime.date\r\n \"\"\"\r\n if isinstance(date1, datetime.date):\r\n date1 = date1.strftime(\"%Y-%m-01\")\r\n if isinstance(date2, datetime.date):\r\n date2 = date2.strftime(\"%Y-%m-01\")\r\n return date1 == date2\r\n\r\n # Get current date to test if current\r\n now = datetime.datetime.now()\r\n curr_date = now.strftime(\"%Y-%m-01\")\r\n s = select([Station.End_date, Station.Site])\r\n if isinstance(site, int) or isinstance(site, str):\r\n s = s.where(Station.Site == site)\r\n elif isinstance(site, list):\r\n results = []\r\n s = s.where(Station.Site.in_(site))\r\n elif isinstance(site, dict):\r\n results = site\r\n s = s.where(Station.Site.in_(list(site.keys())))\r\n elif isinstance(site, pd.DataFrame):\r\n results = site\r\n results[column_Title] = False\r\n else:\r\n print(\"Could not figure out type\")\r\n return\r\n\r\n rows = session.execute(s)\r\n\r\n for row in rows:\r\n\r\n if isinstance(site, int) or isinstance(site, str):\r\n return isDateEqual(row[0], curr_date)\r\n\r\n equalDate = isDateEqual(row[0], curr_date)\r\n\r\n if filter and equalDate:\r\n if isinstance(site, list):\r\n results.append((row[1], equalDate))\r\n elif isinstance(site, dict):\r\n results[row[1]] = equalDate\r\n elif isinstance(site, pd.DataFrame):\r\n results.loc[results['Site'] == row[1], column_Title] = equalDate\r\n elif filter is False:\r\n if isinstance(site, list):\r\n results.append((row[1], equalDate))\r\n elif isinstance(site, dict):\r\n results[row[1]] = equalDate\r\n elif isinstance(site, pd.DataFrame):\r\n results.loc[results['Site'] == row[1], column_Title] = equalDate\r\n\r\n return results\r\n\r\n\r\ndef daysSinceUpdate(session, site, obsType, column_Title=\"DaysSinceUpdate\"):\r\n \"\"\"Checks how how many days since Station had its last record.\r\n\r\n Output attempts to follow a similar format to the datatype of `site`\r\n\r\n `session`: sqlAlchemy Session\r\n\r\n `obsType`: Type of observation. Can be 'Temperature', 'Solar', 'Rainfall'\r\n\r\n `site`: input can be type \"integer\", \"dataframe\", \"dict\", \"list\"\r\n integer: output boolean\r\n dataframe: output appends a column `column_Title`\r\n dict: updates dict such that site[siteID] = isCurrent\r\n list: outputs list of tuples [ (siteID, isCurrent), ...]\r\n \"\"\"\r\n\r\n return\r\n", "sub_path": "api/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 5450, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "sqlalchemy.sql.select", "line_number": 42, "usage_type": "call"}, {"api_name": "BOM_Weather_Data.api.database_models.Station.Site", "line_number": 42, "usage_type": "attribute"}, {"api_name": "BOM_Weather_Data.api.database_models.Station", "line_number": 42, "usage_type": "name"}, {"api_name": "BOM_Weather_Data.api.database_models.Station.Lat", "line_number": 42, "usage_type": "attribute"}, {"api_name": "BOM_Weather_Data.api.database_models.Station.Lon", "line_number": 42, "usage_type": "attribute"}, {"api_name": "BOM_Weather_Data.api.database_models.Location.withinRange", "line_number": 44, "usage_type": "call"}, {"api_name": "BOM_Weather_Data.api.database_models.Location", "line_number": 44, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.select", "line_number": 60, "usage_type": "call"}, {"api_name": "BOM_Weather_Data.api.database_models.Station.Site", "line_number": 60, "usage_type": "attribute"}, {"api_name": "BOM_Weather_Data.api.database_models.Station", "line_number": 60, "usage_type": "name"}, {"api_name": "BOM_Weather_Data.api.database_models.Station.End_date", "line_number": 60, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 86, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 88, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 93, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 93, "usage_type": "attribute"}, {"api_name": "sqlalchemy.sql.select", "line_number": 95, "usage_type": "call"}, {"api_name": "BOM_Weather_Data.api.database_models.Station.End_date", "line_number": 95, "usage_type": "attribute"}, {"api_name": "BOM_Weather_Data.api.database_models.Station", "line_number": 95, "usage_type": "name"}, {"api_name": "BOM_Weather_Data.api.database_models.Station.Site", "line_number": 95, "usage_type": "attribute"}, {"api_name": "BOM_Weather_Data.api.database_models.Station.Site", "line_number": 97, "usage_type": "attribute"}, {"api_name": "BOM_Weather_Data.api.database_models.Station", "line_number": 97, "usage_type": "name"}, {"api_name": "BOM_Weather_Data.api.database_models.Station.Site.in_", "line_number": 100, "usage_type": "call"}, {"api_name": "BOM_Weather_Data.api.database_models.Station.Site", "line_number": 100, "usage_type": "attribute"}, {"api_name": "BOM_Weather_Data.api.database_models.Station", "line_number": 100, "usage_type": "name"}, {"api_name": "BOM_Weather_Data.api.database_models.Station.Site.in_", "line_number": 103, "usage_type": "call"}, {"api_name": "BOM_Weather_Data.api.database_models.Station.Site", "line_number": 103, "usage_type": "attribute"}, {"api_name": "BOM_Weather_Data.api.database_models.Station", "line_number": 103, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 125, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 132, "usage_type": "attribute"}]} +{"seq_id": "622759674", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 28 17:55:07 2020\n\n@author: Pankaj Mishra\n\nCode adapted from - https://github.com/moskomule/senet.pytorch/blob/master/senet/se_module.py\n\"\"\"\n\nfrom torch import nn\n\n\nclass SELayer(nn.Module):\n def __init__(self, channel, reduction=2):\n super(SELayer, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, channel // reduction, bias=False),\n nn.ReLU(inplace=True),\n nn.Linear(channel // reduction, channel, bias=False),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n b, c, _, _ = x.size()\n# print(f' bacth{b} channel {c}')\n y = self.avg_pool(x).squeeze(2).squeeze(2)\n y = self.fc(y).view(b, c, 1, 1)\n return x * y.expand_as(x)", "sub_path": "senet.py", "file_name": "senet.py", "file_ext": "py", "file_size_in_byte": 828, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "torch.nn.Module", "line_number": 13, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.AdaptiveAvgPool2d", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "192100723", "text": "from voigt_notation import voigt_one_to_two, voigt_two_to_one, \\\n four_tensor_one_to_two, four_tensor_two_to_one,\\\n four_tensor_matrix_to_list,\\\n four_tensor_list_to_matrix\nimport numpy as np\nfrom sympy import Eijk\nimport itertools\nfrom integrate import zero_theta_integrate\n\ndef initialize_disl_config(config, config_style, latt_const, element_struct, frame_new, lattice_const, disl_center, \n repeat_para=None, ):\n '''\n pre-processing according to settings of dislocaiton configuration\n output: position of disloction (Angstrom),\n e'1, e'2, e'3 components of Burgers vector (Angstrom)\n '''\n disl_line_direction = config[\"disl_line_direction\"]\n if element_struct == 'fcc' or element_struct == 'bcc':\n burgers = [i * latt_const for i in config[\"burgers\"]]\n disl_center = [disl_center[0] * config[\"cell_x\"] * latt_const,\n disl_center[1] * config[\"cell_y\"] * latt_const]\n if element_struct == 'hcp': \n burgers = np.multiply(config[\"burgers\"], lattice_const)\n disl_center = [repeat_para.sum(axis=0)[0]*disl_center[0], repeat_para.sum(axis=0)[1]*disl_center[1]]\n b_vector = []\n for i in range(3):\n b = np.dot(burgers , np.transpose(frame_new)[i])\n b_vector.append(b)\n return disl_center, b_vector\n\n\ndef stress_field (new_elastic_constant, atom_coor, disl_center, S, B, s_theta, q_theta, b_vector):\n num_atom = atom_coor.shape[0]\n x_coor = atom_coor[:, 0] - disl_center[0]\n y_coor = atom_coor[:, 1] - disl_center[1]\n theta = np.arctan2(y_coor, x_coor).reshape((num_atom,1))\n R = np.sqrt(x_coor**2+y_coor**2).reshape((num_atom,1))\n iec = new_elastic_constant\n sigma = np.zeros([num_atom,9])\n for i,j,k,l,s in itertools.product(range(3),repeat=5):\n ij = four_tensor_matrix_to_list[(i, j)]\n ks = four_tensor_matrix_to_list[(k, s)]\n ij__ = voigt_two_to_one[(i, j)]\n __kl = voigt_two_to_one[(k, l)]\n C_ijkl = four_tensor_two_to_one[(ij__, __kl)]\n if l == 0:\n sigma[:,ij] = sigma[:,ij] + 1/(2*np.pi*R[:,0])*iec[C_ijkl]*b_vector[s]*(-np.cos(theta[:,0])*S[:,ks])\n for r in range(3):\n kr = four_tensor_matrix_to_list[(k, r)]\n rs = four_tensor_matrix_to_list[(r, s)]\n sigma[:,ij] = sigma[:,ij] + 1/(2*np.pi*R[:,0])*iec[C_ijkl]*b_vector[s]*\\\n (-np.sin(theta[:,0])*(s_theta[:,kr]*S[:,rs]+q_theta[:,kr]*B[:,rs]))\n \n if l == 1:\n sigma[:,ij] = sigma[:,ij] + 1/(2*np.pi*R[:,0])*iec[C_ijkl]*b_vector[s]*(-np.sin(theta[:,0])*S[:,ks])\n for r in range(3):\n kr = four_tensor_matrix_to_list[(k, r)]\n rs = four_tensor_matrix_to_list[(r, s)]\n sigma[:,ij] = sigma[:,ij] + 1/(2*np.pi*R[:,0])*iec[C_ijkl]*b_vector[s]*\\\n (np.cos(theta[:,0])*(s_theta[:,kr]*S[:,rs]+q_theta[:,kr]*B[:,rs]))\n return sigma\n\n\ndef displacement_field(atom_coor, disl_center, S, B, S_theta, Q_theta, b_vector):\n atom_num = atom_coor.shape[0]\n x_coor = atom_coor[:, 0] - disl_center[0]\n y_coor = atom_coor[:, 1] - disl_center[1]\n r = np.sqrt(x_coor**2+y_coor**2).reshape((atom_num,1))\n u = np.zeros([atom_num,3])\n for k, j in itertools.product(range(3), repeat = 2):\n kj = four_tensor_matrix_to_list[(k, j)]\n u[:,k] = u[:,k] + 1/(2*np.pi) * (-S[0,kj]*np.log(r[:,0]))*b_vector[j]\n for i in range(3):\n ki = four_tensor_matrix_to_list[(k, i)]\n ij = four_tensor_matrix_to_list[(i, j)]\n u[:,k] = u[:,k] + 1/(2*np.pi) * (S_theta[:,ki] * S[0,ij] + Q_theta[:,ki] * B[0,ij])*b_vector[j]\n\n return u\n\ndef partial_init(disl_center, partial_split_dis, b_vector, dislocation_type):\n if dislocation_type == 'screw_ca_pyrII':\n b_vectors = [np.array(b_vector)/2, np.array(b_vector)/2]\n disl_centers = [[disl_center[0]+partial_split_dis/2, disl_center[1]], \n [disl_center[0]-partial_split_dis/2, disl_center[1]]]\n return b_vectors, disl_centers\n \n\n\n# define a function for multiprocessing to calculate displacement for different dislocation line postions\ndef displacement_fields(i, s, q, S, B, atom_coor_ref, disl_center, S_list, Q_list, b_vector):\n # initialize b for pair dislocations\n if i%2 == 1:\n b_vector = -np.array(b_vector)\n\n # main func\n S_theta, s_theta = zero_theta_integrate(s, atom_coor_ref, disl_center, S_list)\n Q_theta, q_theta = zero_theta_integrate(q, atom_coor_ref, disl_center, Q_list)\n u_displacement_field = displacement_field(atom_coor_ref, disl_center, S, B, S_theta, Q_theta, b_vector)\n return u_displacement_field\n\n", "sub_path": "src/dislocation_boy/disl_aniso/aniso_disl_theory.py", "file_name": "aniso_disl_theory.py", "file_ext": "py", "file_size_in_byte": 4759, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "numpy.multiply", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 39, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 40, "usage_type": "call"}, {"api_name": "voigt_notation.four_tensor_matrix_to_list", "line_number": 41, "usage_type": "name"}, {"api_name": "voigt_notation.four_tensor_matrix_to_list", "line_number": 42, "usage_type": "name"}, {"api_name": "voigt_notation.voigt_two_to_one", "line_number": 43, "usage_type": "name"}, {"api_name": "voigt_notation.voigt_two_to_one", "line_number": 44, "usage_type": "name"}, {"api_name": "voigt_notation.four_tensor_two_to_one", "line_number": 45, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 47, "usage_type": "call"}, {"api_name": "voigt_notation.four_tensor_matrix_to_list", "line_number": 49, "usage_type": "name"}, {"api_name": "voigt_notation.four_tensor_matrix_to_list", "line_number": 50, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 55, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 55, "usage_type": "call"}, {"api_name": "voigt_notation.four_tensor_matrix_to_list", "line_number": 57, "usage_type": "name"}, {"api_name": "voigt_notation.four_tensor_matrix_to_list", "line_number": 58, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 69, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 70, "usage_type": "call"}, {"api_name": "voigt_notation.four_tensor_matrix_to_list", "line_number": 71, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 72, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 72, "usage_type": "call"}, {"api_name": "voigt_notation.four_tensor_matrix_to_list", "line_number": 74, "usage_type": "name"}, {"api_name": "voigt_notation.four_tensor_matrix_to_list", "line_number": 75, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 76, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 93, "usage_type": "call"}, {"api_name": "integrate.zero_theta_integrate", "line_number": 96, "usage_type": "call"}, {"api_name": "integrate.zero_theta_integrate", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "553783350", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.12-x86_64/egg/hello52/run.py\n# Compiled at: 2018-12-04 11:56:25\n# Size of source mod 2**32: 1076 bytes\n__author__ = 'youliangzhang'\nimport click, six, hello52\n\ndef read_config(ctx, param, value):\n if not value:\n return {}\n import json\n\n def underline_dict(d):\n if not isinstance(d, dict):\n return d\n return dict(((k.replace('-', '_'), underline_dict(v)) for k, v in six.iteritems(d)))\n\n config = underline_dict(json.load(value))\n ctx.default_map = config\n return config\n\n\n@click.group(invoke_without_command=True)\n@click.option('-c', '--config', callback=read_config, type=(click.File('r')), help='a json file with default values for subcommands. {\"webui\": {\"port\":5001}}')\n@click.option('--debug', envvar='DEBUG', default=False, is_flag=True, help='debug mode')\n@click.version_option(version=(hello52.version.__version__), prog_name=(hello52.version.script_name))\n@click.pass_context\ndef cli(ctx, **kwargs):\n print('------------------------------')\n print(ctx)\n print('------------------------------')\n print(kwargs)\n print(dir(hello52))\n\n\ndef main():\n cli()\n\n\nif __name__ == '__main__':\n main()", "sub_path": "pycfiles/hello52-0.0.5-py3.7/run.cpython-37.py", "file_name": "run.cpython-37.py", "file_ext": "py", "file_size_in_byte": 1347, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "six.iteritems", "line_number": 19, "usage_type": "call"}, {"api_name": "json.load", "line_number": 21, "usage_type": "call"}, {"api_name": "click.group", "line_number": 26, "usage_type": "call"}, {"api_name": "click.option", "line_number": 27, "usage_type": "call"}, {"api_name": "click.File", "line_number": 27, "usage_type": "call"}, {"api_name": "click.option", "line_number": 28, "usage_type": "call"}, {"api_name": "click.version_option", "line_number": 29, "usage_type": "call"}, {"api_name": "hello52.version", "line_number": 29, "usage_type": "attribute"}, {"api_name": "click.pass_context", "line_number": 30, "usage_type": "attribute"}]} +{"seq_id": "38120519", "text": "from typing import Dict, Any, List, Optional, Union\nimport torch.nn as nn\nfrom sciwing.infer.seq_label_inference.BaseSeqLabelInference import (\n BaseSeqLabelInference,\n)\nfrom sciwing.datasets.seq_labeling.base_seq_labeling import BaseSeqLabelingDataset\nfrom sciwing.metrics.token_cls_accuracy import TokenClassificationAccuracy\nfrom torch.utils.data import DataLoader\nfrom sciwing.utils.tensor_utils import move_to_device\nimport torch\nimport pandas as pd\nfrom sciwing.utils.vis_seq_tags import VisTagging\nimport wasabi\nfrom deprecated import deprecated\n\n\nclass ParscitInference(BaseSeqLabelInference):\n def __init__(\n self,\n model: nn.Module,\n model_filepath: str,\n dataset: BaseSeqLabelingDataset,\n device: Optional[Union[str, torch.device]] = torch.device(\"cpu\"),\n ):\n\n super(ParscitInference, self).__init__(\n model=model, model_filepath=model_filepath, dataset=dataset, device=device\n )\n\n self.msg_printer = wasabi.Printer()\n self.labelname2idx_mapping = self.dataset.get_classname2idx()\n self.idx2labelname_mapping = {\n idx: label_name for label_name, idx in self.labelname2idx_mapping.items()\n }\n self.metrics_calculator = TokenClassificationAccuracy(\n idx2labelname_mapping=self.idx2labelname_mapping\n )\n self.output_analytics = None\n self.output_df = None\n self.batch_size = 32\n self.load_model()\n\n num_categories = self.dataset.get_num_classes()\n categories = [self.idx2labelname_mapping[idx] for idx in range(num_categories)]\n self.seq_tagging_visualizer = VisTagging(tags=categories)\n\n def run_inference(self) -> Dict[str, Any]:\n loader = DataLoader(\n dataset=self.dataset, batch_size=self.batch_size, shuffle=False\n )\n output_analytics = {}\n sentences = [] # all the sentences that is seen till now\n predicted_tag_indices = []\n predicted_tag_names = [] # all the tags that are predicted for the sentences\n true_tag_indices = []\n true_tag_names = []\n\n for iter_dict in loader:\n iter_dict = move_to_device(iter_dict, cuda_device=self.device)\n model_output_dict = self.model_forward_on_iter_dict(iter_dict=iter_dict)\n self.metric_calc_on_iter_dict(\n iter_dict=iter_dict, model_output_dict=model_output_dict\n )\n batch_sentences = self.iter_dict_to_sentences(iter_dict=iter_dict)\n\n predicted_tags, predicted_tag_strings = self.model_output_dict_to_prediction_indices_names(\n model_output_dict=model_output_dict\n )\n true_tags, true_labels_strings = self.iter_dict_to_true_indices_names(\n iter_dict=iter_dict\n )\n\n sentences.extend(batch_sentences)\n\n predicted_tag_indices.extend(predicted_tags)\n predicted_tag_names.extend(predicted_tag_strings)\n true_tag_indices.extend(true_tags)\n true_tag_names.extend(true_labels_strings)\n\n output_analytics[\"true_tag_indices\"] = true_tag_indices\n output_analytics[\"predicted_tag_indices\"] = predicted_tag_indices\n output_analytics[\"true_tag_names\"] = true_tag_names\n output_analytics[\"predicted_tag_names\"] = predicted_tag_names\n output_analytics[\"sentences\"] = sentences\n return output_analytics\n\n def print_confusion_matrix(self) -> None:\n \"\"\" Print confusion matrix for the test datasets\n \"\"\"\n self.metrics_calculator.print_confusion_metrics(\n true_tag_indices=self.output_df[\"true_tag_indices\"].tolist(),\n predicted_tag_indices=self.output_df[\"predicted_tag_indices\"].tolist(),\n )\n\n def get_misclassified_sentences(\n self, first_class: int, second_class: int\n ) -> List[str]:\n \"\"\"This returns the true label misclassified as\n pred label idx\n\n Parameters\n ----------\n first_class : int\n The label index of the true class name\n second_class : int\n The label index of the predicted class name\n\n\n Returns\n -------\n List[str]\n A list of strings where the true class is classified as pred class.\n\n \"\"\"\n\n # get rows where true tag has first_class\n true_tag_indices = self.output_df.true_tag_indices.tolist()\n pred_tag_indices = self.output_df.predicted_tag_indices.tolist()\n\n indices = []\n\n for idx, (true_tag_index, pred_tag_index) in enumerate(\n zip(true_tag_indices, pred_tag_indices)\n ):\n true_tags_pred_tags = zip(true_tag_index, pred_tag_index)\n for true_tag, pred_tag in true_tags_pred_tags:\n if true_tag == first_class and pred_tag == second_class:\n indices.append(idx)\n break\n\n sentences = []\n\n for idx in indices:\n sentence = self.output_analytics[\"sentences\"][idx].split()\n true_labels = self.output_analytics[\"true_tag_names\"][idx].split()\n pred_labels = self.output_analytics[\"predicted_tag_names\"][idx].split()\n len_sentence = len(sentence)\n true_labels = true_labels[:len_sentence]\n pred_labels = pred_labels[:len_sentence]\n stylized_string_true = self.seq_tagging_visualizer.visualize_tokens(\n sentence, true_labels\n )\n stylized_string_predicted = self.seq_tagging_visualizer.visualize_tokens(\n sentence, pred_labels\n )\n\n sentence = (\n f\"GOLD LABELS \\n{'*' * 80} \\n{stylized_string_true} \\n\\n\"\n f\"PREDICTED LABELS \\n{'*' * 80} \\n{stylized_string_predicted}\\n\\n\"\n )\n sentences.append(sentence)\n\n return sentences\n\n @deprecated(reason=\"Generate report for paper will be removed in version 0.2\")\n def generate_report_for_paper(self):\n \"\"\" Generates just the fmeasures to be reported on paper\n \"\"\"\n paper_report, row_names = self.metrics_calculator.report_metrics(\n report_type=\"paper\"\n )\n return paper_report, row_names\n\n def model_forward_on_iter_dict(self, iter_dict: Dict[str, Any]):\n with torch.no_grad():\n model_output_dict = self.model(\n iter_dict, is_training=False, is_validation=False, is_test=True\n )\n return model_output_dict\n\n def metric_calc_on_iter_dict(\n self, iter_dict: Dict[str, Any], model_output_dict: Dict[str, Any]\n ):\n self.metrics_calculator.calc_metric(\n iter_dict=iter_dict, model_forward_dict=model_output_dict\n )\n\n def model_output_dict_to_prediction_indices_names(\n self, model_output_dict: Dict[str, Any]\n ) -> (List[int], List[str]):\n predicted_tags = model_output_dict[\"predicted_tags\"] # List[List[str]]\n predicted_tag_strings = []\n for predicted_tag in predicted_tags:\n pred_tag_string = self.dataset.get_class_names_from_indices(predicted_tag)\n pred_tag_string = \" \".join(pred_tag_string)\n predicted_tag_strings.append(pred_tag_string)\n return predicted_tags, predicted_tag_strings\n\n def iter_dict_to_sentences(self, iter_dict: Dict[str, Any]):\n tokens = iter_dict[\"tokens\"]\n tokens_list = tokens.tolist()\n batch_sentences = list(\n map(self.dataset.word_vocab.get_disp_sentence_from_indices, tokens_list)\n )\n return batch_sentences\n\n def iter_dict_to_true_indices_names(self, iter_dict: Dict[str, Any]):\n labels = iter_dict[\"label\"]\n labels_list = labels.tolist()\n true_labels_strings = []\n for tags in labels_list:\n true_tag_names = self.dataset.get_class_names_from_indices(tags)\n true_tag_names = \" \".join(true_tag_names)\n true_labels_strings.append(true_tag_names)\n\n true_labels_strings = list(true_labels_strings)\n return labels_list, true_labels_strings\n\n def infer_single_sentence(self, line: str) -> str:\n \"\"\" Return the tagged string for a single sentence\n\n Parameters\n ----------\n line : str\n A single sentence to be inferred\n\n Returns\n -------\n str\n Returns the tagged string for the line\n\n \"\"\"\n len_words = len(line.split())\n iter_dict = self.dataset.get_iter_dict(line)\n iter_dict = move_to_device(iter_dict, cuda_device=self.device)\n iter_dict[\"tokens\"] = iter_dict[\"tokens\"].unsqueeze(0)\n iter_dict[\"char_tokens\"] = iter_dict[\"char_tokens\"].unsqueeze(0)\n\n model_output_dict = self.model_forward_on_iter_dict(iter_dict=iter_dict)\n _, predicted_tag_names = self.model_output_dict_to_prediction_indices_names(\n model_output_dict=model_output_dict\n )\n predicted_tag_names = predicted_tag_names[0].split()\n len_pred_tag_names = len(predicted_tag_names)\n infer_len = len_words if len_words < len_pred_tag_names else len_pred_tag_names\n predicted_tag_names = predicted_tag_names[:infer_len]\n predicted_tag_names = \" \".join(predicted_tag_names)\n return predicted_tag_names\n\n def report_metrics(self):\n print(self.metrics_calculator.report_metrics())\n\n def run_test(self):\n self.output_analytics = self.run_inference()\n self.output_df = pd.DataFrame(self.output_analytics)\n", "sub_path": "sciwing/infer/seq_label_inference/parscit_inference.py", "file_name": "parscit_inference.py", "file_ext": "py", "file_size_in_byte": 9553, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "sciwing.infer.seq_label_inference.BaseSeqLabelInference.BaseSeqLabelInference", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "sciwing.datasets.seq_labeling.base_seq_labeling.BaseSeqLabelingDataset", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 23, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 23, "usage_type": "attribute"}, {"api_name": "wasabi.Printer", "line_number": 30, "usage_type": "call"}, {"api_name": "sciwing.metrics.token_cls_accuracy.TokenClassificationAccuracy", "line_number": 35, "usage_type": "call"}, {"api_name": "sciwing.utils.vis_seq_tags.VisTagging", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 48, "usage_type": "call"}, {"api_name": "sciwing.utils.tensor_utils.move_to_device", "line_number": 59, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 47, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 47, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 97, "usage_type": "name"}, {"api_name": "deprecated.deprecated", "line_number": 155, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 164, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 164, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 165, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 172, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 172, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 179, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 179, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 180, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 189, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 189, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 197, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 197, "usage_type": "name"}, {"api_name": "sciwing.utils.tensor_utils.move_to_device", "line_number": 225, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 245, "usage_type": "call"}]} +{"seq_id": "648929512", "text": "\nimport os, json, inspect\nimport mimetypes\nfrom html2text import html2text\nfrom RestrictedPython import compile_restricted, safe_globals\nimport frappe\nimport frappe.utils\nimport frappe.utils.data\nfrom frappe.website.utils import (get_shade, get_toc, get_next_link)\nfrom frappe.modules import scrub\nfrom frappe.www.printview import get_visible_columns\nimport frappe.exceptions\n\nclass ServerScriptNotEnabled(frappe.PermissionError): pass\n\ndef safe_exec(script, _globals=None, _locals=None):\n\t# script reports must be enabled via site_config.json\n\tif not frappe.conf.server_script_enabled:\n\t\tfrappe.msgprint('Please Enable Server Scripts')\n\t\traise ServerScriptNotEnabled\n\n\t# build globals\n\texec_globals = get_safe_globals()\n\tif _globals:\n\t\texec_globals.update(_globals)\n\n\t# execute script compiled by RestrictedPython\n\texec(compile_restricted(script), exec_globals, _locals) # pylint: disable=exec-used\n\ndef get_safe_globals():\n\tdatautils = {}\n\tif frappe.db:\n\t\tdate_format = frappe.db.get_default(\"date_format\") or \"yyyy-mm-dd\"\n\telse:\n\t\tdate_format = 'yyyy-mm-dd'\n\n\tadd_module_properties(frappe.utils.data, datautils, lambda obj: hasattr(obj, \"__call__\"))\n\n\tif \"_\" in getattr(frappe.local, 'form_dict', {}):\n\t\tdel frappe.local.form_dict[\"_\"]\n\n\tuser = getattr(frappe.local, \"session\", None) and frappe.local.session.user or \"Guest\"\n\n\tout = frappe._dict(\n\t\t# make available limited methods of frappe\n\t\tjson = json,\n\t\tdict = dict,\n\t\tfrappe = frappe._dict(\n\t\t\t_ = frappe._,\n\t\t\t_dict = frappe._dict,\n\t\t\tflags = frappe.flags,\n\n\t\t\tformat = frappe.format_value,\n\t\t\tformat_value = frappe.format_value,\n\t\t\tdate_format = date_format,\n\t\t\tformat_date = frappe.utils.data.global_date_format,\n\t\t\tform_dict = getattr(frappe.local, 'form_dict', {}),\n\n\t\t\tget_meta = frappe.get_meta,\n\t\t\tget_doc = frappe.get_doc,\n\t\t\tget_cached_doc = frappe.get_cached_doc,\n\t\t\tget_list = frappe.get_list,\n\t\t\tget_all = frappe.get_all,\n\t\t\tget_system_settings = frappe.get_system_settings,\n\n\t\t\tutils = datautils,\n\t\t\tget_url = frappe.utils.get_url,\n\t\t\trender_template = frappe.render_template,\n\t\t\tmsgprint = frappe.msgprint,\n\n\t\t\tuser = user,\n\t\t\tget_fullname = frappe.utils.get_fullname,\n\t\t\tget_gravatar = frappe.utils.get_gravatar_url,\n\t\t\tfull_name = frappe.local.session.data.full_name if getattr(frappe.local, \"session\", None) else \"Guest\",\n\t\t\trequest = getattr(frappe.local, 'request', {}),\n\t\t\tsession = frappe._dict(\n\t\t\t\tuser = user,\n\t\t\t\tcsrf_token = frappe.local.session.data.csrf_token if getattr(frappe.local, \"session\", None) else ''\n\t\t\t),\n\t\t\tsocketio_port = frappe.conf.socketio_port,\n\t\t\tget_hooks = frappe.get_hooks,\n\t\t),\n\t\tstyle = frappe._dict(\n\t\t\tborder_color = '#d1d8dd'\n\t\t),\n\t\tget_toc = get_toc,\n\t\tget_next_link = get_next_link,\n\t\t_ = frappe._,\n\t\tget_shade = get_shade,\n\t\tscrub = scrub,\n\t\tguess_mimetype = mimetypes.guess_type,\n\t\thtml2text = html2text,\n\t\tdev_server = 1 if os.environ.get('DEV_SERVER', False) else 0\n\t)\n\n\tadd_module_properties(frappe.exceptions, out.frappe, lambda obj: inspect.isclass(obj) and issubclass(obj, Exception))\n\n\tif not frappe.flags.in_setup_help:\n\t\tout.get_visible_columns = get_visible_columns\n\t\tout.frappe.date_format = date_format\n\t\tout.frappe.db = frappe._dict(\n\t\t\tget_list = frappe.get_list,\n\t\t\tget_all = frappe.get_all,\n\t\t\tget_value = frappe.db.get_value,\n\t\t\tget_single_value = frappe.db.get_single_value,\n\t\t\tget_default = frappe.db.get_default,\n\t\t\tescape = frappe.db.escape,\n\t\t)\n\n\tif frappe.response:\n\t\tout.frappe.response = frappe.response\n\n\tout.update(safe_globals)\n\n\t# default writer allows write access\n\tout._write_ = _write\n\tout._getitem_ = _getitem\n\n\treturn out\n\ndef _getitem(obj, key):\n\t# guard function for RestrictedPython\n\t# allow any key to be accessed as long as it does not start with underscore\n\tif isinstance(key, str) and key.startswith('_'):\n\t\traise SyntaxError('Key starts with _')\n\treturn obj[key]\n\ndef _write(obj):\n\t# guard function for RestrictedPython\n\t# allow writing to any object\n\treturn obj\n\ndef add_module_properties(module, data, filter_method):\n\tfor key, obj in module.__dict__.items():\n\t\tif key.startswith(\"_\"):\n\t\t\t# ignore\n\t\t\tcontinue\n\n\t\tif filter_method(obj):\n\t\t\t# only allow functions\n\t\t\tdata[key] = obj\n", "sub_path": "frappe/utils/safe_exec.py", "file_name": "safe_exec.py", "file_ext": "py", "file_size_in_byte": 4148, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "frappe.PermissionError", "line_number": 14, "usage_type": "attribute"}, {"api_name": "frappe.conf", "line_number": 18, "usage_type": "attribute"}, {"api_name": "frappe.msgprint", "line_number": 19, "usage_type": "call"}, {"api_name": "RestrictedPython.compile_restricted", "line_number": 28, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 32, "usage_type": "attribute"}, {"api_name": "frappe.db.get_default", "line_number": 33, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 33, "usage_type": "attribute"}, {"api_name": "frappe.utils", "line_number": 37, "usage_type": "attribute"}, {"api_name": "frappe.local", "line_number": 39, "usage_type": "attribute"}, {"api_name": "frappe.local", "line_number": 40, "usage_type": "attribute"}, {"api_name": "frappe.local", "line_number": 42, "usage_type": "attribute"}, {"api_name": "frappe._dict", "line_number": 44, "usage_type": "call"}, {"api_name": "frappe._dict", "line_number": 48, "usage_type": "call"}, {"api_name": "frappe._", "line_number": 49, "usage_type": "attribute"}, {"api_name": "frappe._dict", "line_number": 50, "usage_type": "attribute"}, {"api_name": "frappe.flags", "line_number": 51, "usage_type": "attribute"}, {"api_name": "frappe.format_value", "line_number": 53, "usage_type": "attribute"}, {"api_name": "frappe.format_value", "line_number": 54, "usage_type": "attribute"}, {"api_name": "frappe.utils", "line_number": 56, "usage_type": "attribute"}, {"api_name": "frappe.local", "line_number": 57, "usage_type": "attribute"}, {"api_name": "frappe.get_meta", "line_number": 59, "usage_type": "attribute"}, {"api_name": "frappe.get_doc", "line_number": 60, "usage_type": "attribute"}, {"api_name": "frappe.get_cached_doc", "line_number": 61, "usage_type": "attribute"}, {"api_name": "frappe.get_list", "line_number": 62, "usage_type": "attribute"}, {"api_name": "frappe.get_all", "line_number": 63, "usage_type": "attribute"}, {"api_name": "frappe.get_system_settings", "line_number": 64, "usage_type": "attribute"}, {"api_name": "frappe.utils", "line_number": 67, "usage_type": "attribute"}, {"api_name": "frappe.render_template", "line_number": 68, "usage_type": "attribute"}, {"api_name": "frappe.msgprint", "line_number": 69, "usage_type": "attribute"}, {"api_name": "frappe.utils", "line_number": 72, "usage_type": "attribute"}, {"api_name": "frappe.utils", "line_number": 73, "usage_type": "attribute"}, {"api_name": "frappe.local", "line_number": 74, "usage_type": "attribute"}, {"api_name": "frappe.local", "line_number": 75, "usage_type": "attribute"}, {"api_name": "frappe._dict", "line_number": 76, "usage_type": "call"}, {"api_name": "frappe.local", "line_number": 78, "usage_type": "attribute"}, {"api_name": "frappe.conf", "line_number": 80, "usage_type": "attribute"}, {"api_name": "frappe.get_hooks", "line_number": 81, "usage_type": "attribute"}, {"api_name": "frappe._dict", "line_number": 83, "usage_type": "call"}, {"api_name": "frappe.website.utils.get_toc", "line_number": 86, "usage_type": "name"}, {"api_name": "frappe.website.utils.get_next_link", "line_number": 87, "usage_type": "name"}, {"api_name": "frappe._", "line_number": 88, "usage_type": "attribute"}, {"api_name": "frappe.website.utils.get_shade", "line_number": 89, "usage_type": "name"}, {"api_name": "frappe.modules.scrub", "line_number": 90, "usage_type": "name"}, {"api_name": "mimetypes.guess_type", "line_number": 91, "usage_type": "attribute"}, {"api_name": "html2text.html2text", "line_number": 92, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 93, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 93, "usage_type": "attribute"}, {"api_name": "frappe.exceptions", "line_number": 96, "usage_type": "attribute"}, {"api_name": "inspect.isclass", "line_number": 96, "usage_type": "call"}, {"api_name": "frappe.flags", "line_number": 98, "usage_type": "attribute"}, {"api_name": "frappe.www.printview.get_visible_columns", "line_number": 99, "usage_type": "name"}, {"api_name": "frappe._dict", "line_number": 101, "usage_type": "call"}, {"api_name": "frappe.get_list", "line_number": 102, "usage_type": "attribute"}, {"api_name": "frappe.get_all", "line_number": 103, "usage_type": "attribute"}, {"api_name": "frappe.db", "line_number": 104, "usage_type": "attribute"}, {"api_name": "frappe.db", "line_number": 105, "usage_type": "attribute"}, {"api_name": "frappe.db", "line_number": 106, "usage_type": "attribute"}, {"api_name": "frappe.db", "line_number": 107, "usage_type": "attribute"}, {"api_name": "frappe.response", "line_number": 110, "usage_type": "attribute"}, {"api_name": "frappe.response", "line_number": 111, "usage_type": "attribute"}, {"api_name": "RestrictedPython.safe_globals", "line_number": 113, "usage_type": "argument"}]} +{"seq_id": "428014543", "text": "import time\nfrom selenium import webdriver\n\nclass UseBrowser:\n\n driver=None\n\n def __init__(self,browser_name):\n if browser_name=='Chrome':\n self.driver=webdriver.Chrome('../../chromedriver.exe')\n self.driver.maximize_window()\n self.driver.implicitly_wait(10)\n UseBrowser.driver=self.driver\n else:\n # 打开火狐浏览器,注意这里传输的是谷歌浏览器,因为本机并无火狐浏览器驱动\n self.driver=webdriver.firefox('../chromedriver.exe')\n\n @classmethod\n def quit(cls):\n cls.driver.quit()\n\n\n# if __name__=='__main__':\n# ub=UseBrowser()\n# time.sleep(3)\n# UseBrowser.quit()", "sub_path": "quote_auto/basic/usebrowser.py", "file_name": "usebrowser.py", "file_ext": "py", "file_size_in_byte": 703, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 10, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 10, "usage_type": "name"}, {"api_name": "selenium.webdriver.firefox", "line_number": 16, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "43800277", "text": "\"\"\"Test of the object-store container module.\"\"\"\nimport os\nimport subprocess\nimport sys\n\nfrom disk_objectstore import Container\n\nTHIS_FILE_DIR = os.path.dirname(os.path.realpath(__file__))\nCONCURRENT_DIR = os.path.join(THIS_FILE_DIR, 'concurrent_tests')\n\nNUM_WORKERS = 4\n\n\ndef test_concurrency(temp_dir):\n \"\"\"Test to run concurrently many workers creating objects, and at the same time one packer.\n\n This is needed to see that indeed these operations can happen at the same time.\n Moreover, this is needed to perform a full coverage of the code, since some code will\n be reached only during concurrent access to the object store (reading data while\n packing).\n \"\"\"\n packer_script = os.path.join(CONCURRENT_DIR, 'periodic_packer.py')\n worker_script = os.path.join(CONCURRENT_DIR, 'periodic_worker.py')\n\n # Create folder with the container and initialise it\n container_dir = os.path.join(temp_dir, 'container')\n Container(container_dir).init_container()\n\n # Create folder where each worker will write the MD5 of the objects it created,\n # so that others will read them.\n shared_dir = os.path.join(temp_dir, 'shared')\n os.mkdir(shared_dir)\n\n # Start the packer\n packer_proc = subprocess.Popen([sys.executable, packer_script, '-p', container_dir, '-r', '5', '-w', '0.83'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n # Start the workers\n worker_procs = []\n for worker_id in range(NUM_WORKERS):\n options = ['-r', '4', '-w', '0', '-p', container_dir, '-s', shared_dir]\n if worker_id % 2:\n # One every two will read in bulk, the other within a loop\n options += ['-b']\n worker_procs.append(\n subprocess.Popen([sys.executable, worker_script] + options, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n )\n\n packer_out, packer_err = packer_proc.communicate()\n worker_outs = []\n worker_errs = []\n for worker_proc in worker_procs:\n worker_out, worker_err = worker_proc.communicate()\n worker_outs.append(worker_out)\n worker_errs.append(worker_err)\n\n error_messages = []\n\n if packer_proc.returncode:\n error_messages.append('PACKER process failed with error code {}!'.format(packer_proc.returncode))\n error_messages.append('PACKER output:')\n error_messages.append(packer_out.decode('utf8'))\n error_messages.append('-' * 78)\n error_messages.append('PACKER error:')\n error_messages.append(packer_err.decode('utf8'))\n error_messages.append('=' * 78)\n\n for idx, (worker_proc, worker_out, worker_err) in enumerate(zip(worker_procs, worker_outs, worker_errs)):\n if worker_proc.returncode:\n error_messages.append('WORKER process #{} failed with error code {}!'.format(idx, worker_proc.returncode))\n error_messages.append('WORKER {} output:'.format(idx))\n error_messages.append(worker_out.decode('utf8'))\n error_messages.append('-' * 78)\n error_messages.append('WORKER {} error:'.format(idx))\n error_messages.append(worker_err.decode('utf8'))\n error_messages.append('=' * 78)\n\n error_string = 'At least one of the concurrent processes failed!\\nMessages:\\n' + '\\n'.join(error_messages)\n assert len(error_messages) == 0, error_string\n", "sub_path": "tests/test_concurrency.py", "file_name": "test_concurrency.py", "file_ext": "py", "file_size_in_byte": 3386, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "os.path.dirname", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "disk_objectstore.Container", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 32, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 35, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 35, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 36, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 37, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 47, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 47, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 47, "usage_type": "attribute"}]} +{"seq_id": "433829588", "text": "from django.shortcuts import render\nfrom .models import *\nfrom .forms import *\nfrom django.views.generic import FormView,TemplateView,CreateView,ListView,UpdateView,DetailView\nfrom datatableview.views import XEditableDatatableView, DatatableView\nfrom datatableview import helpers, Datatable, columns\nimport csv\nfrom django.http import HttpResponse, JsonResponse\nfrom django.views.generic.edit import CreateView, DeleteView, UpdateView,View\nfrom django import template\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom .helpers import *\nfrom django.http import HttpResponseRedirect\nfrom django.core import serializers\nimport json\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom datetime import datetime, timedelta, timezone\n\n\nclass AjaxableResponseMixin:\n \"\"\"\n Mixin to add AJAX support to a form. Must be used with an object-based FormView (e.g. CreateView)\n \"\"\"\n def form_invalid(self, form):\n response = super().form_invalid(form)\n if self.request.is_ajax():\n print(\"FORM ERRORS ARE:\",str(form.errors))\n return JsonResponse(form.errors, status=400)\n else:\n return response\n\n def form_valid(self, form):\n # We make sure to call the parent's form_valid() method because\n # it might do some processing (in the case of CreateView, it will\n # call form.save() for example).\n response = super().form_valid(form)\n if self.request.is_ajax():\n print(\"AjaxableResponseMixin REQUEST WAS AJAX**\")\n data = {\n 'pk': self.object.pk,\n }\n return JsonResponse(data)\n else:\n return response\n\n#Any TEP Staff protected views will inherit this class\n#TEP Staff need to be logged in with a django admin account\nclass TEPStaffRequired(LoginRequiredMixin):\n def dispatch(self, request, *args, **kwargs):\n if not request.user.is_superuser: #TEPStaff must ne logged in as superuser\n print(\"IM HERE\")\n return render(request, template_name=\"tallyhq/permission_denied.html\")\n return super().dispatch(request, *args, **kwargs)\n\nclass TeacherLogin(FormView):\n template_name = 'tallyhq/teacherLogin.html'\n #template_name = 'tallyhq/generic_form.html'\n model = Teacher\n form_class = TeacherLoginForm\n exclude = ['phone','address']\n #success_url = '/tallyhq/teacherwaiver/?teacher'\n\n def post(self,request,*args,**kwargs):\n print (\"in post\")\n print (request.POST)\n form = self.get_form()\n first_name = request.POST.get('first_name').strip()\n last_name = request.POST.get('last_name').strip()\n email = request.POST.get('email').strip()\n school = request.POST.get('school')\n print (first_name,last_name,email,school)\n\n if school=='':\n print(\"no school selected\")\n return self.form_invalid(form)\n \n school_obj=School.objects.get(pk=int(school))\n school_active=school_obj.active\n print('school_obj',school_obj,school_active)\n if school_active==False:\n print(\"Inactive school!\")\n return self.form_invalid(form) \n\n num_records = Teacher.objects.filter(first_name=first_name,last_name=last_name,email=email,school=school).count()\n if num_records != 1 :\n print(\"NO SUCH RECORD EXISTS\")\n return self.form_invalid(form) \n\n else:\n print(\"I HAVE FOUND YOUR RECORD\")\n teacher = list(Teacher.objects.filter(first_name=first_name,last_name=last_name,email=email,school=school).values_list('pk', flat=True))[0]\n request.teacher_pk = teacher\n return super().form_valid(form)\n\n\n def get_success_url(self):\n a = self.request.teacher_pk\n url = '/tallyhq/teacherwaiver/'+str(a)\n return url\n\nclass TeacherWaiver(TemplateView):\n template_name = 'tallyhq/teacherSignPDF.html'\n def get_context_data(self, **kwargs):\n #Get the last object uploaded to the waiver model\n context = super(TeacherWaiver, self).get_context_data(**kwargs)\n try:\n last_waiver = Waiver.objects.latest('id')\n last_waiver_url = last_waiver.file.url\n print(\"last_waiver_url\",last_waiver_url)\n context['last_waiver_url']=last_waiver_url\n except:\n print(\"Failed to get the last waiver object, does it exist?\")\n pass\n return context\n\nclass TeacherShoppingReminders(TemplateView):\n template_name = 'tallyhq/teacherShoppingReminders.html'\n\nclass TeacherCheckout(CreateView):\n template_name = 'tallyhq/teacherCheckout.html'\n model = OrderItem\n form_class = OrderItemForm\n\n def get_context_data(self, **kwargs):\n formset = inlineformset_factory(Order,OrderItem, form=OrderItemForm, extra=len(count),max_num=1)\n data=super(TeacherCheckout,self).get_context_data(**kwargs)\n if self.request.POST:\n data['titles'] = formset(self.request.POST)\n else:\n data['titles'] = formset()\n data['stuff'] = Item.objects.all().filter(active=True).order_by('rank')\n data['password'] = ValidationPassword.objects.all().values_list('digest',flat=True).filter().first()\n return data\n\n def post(self, request, teacher_id, *args, **kwargs):\n t = Teacher.objects.all().filter(pk=teacher_id).first()\n o = Order(teacher=t)\n o.save()\n print(request.POST)\n for item in request.POST:\n if 'password' not in item:\n if 'csrfmiddlewaretoken' not in item:\n i = Item.objects.all().filter(active=True, name=item).first()\n units_taken = request.POST.get(item)\n print(\"units_taken\",units_taken)\n if units_taken!='' and str(units_taken)!='0':\n oi = OrderItem(order=o,units_taken=units_taken,item=i)\n oi.save()\n return redirect('/tallyhq/teacherordersuccess/')\n\nclass TeacherOrderSuccess(TemplateView):\n template_name = 'tallyhq/teacherOrderSuccess.html'\n\n\nclass ManageOrders(TEPStaffRequired,DatatableView):\n model=Order\n template_name = 'tallyhq/manageorders.html'\n class datatable_class(Datatable):\n action = columns.TextColumn(\"Details\",sources=None,processor='make_action_column')\n formatted_datetime = columns.TextColumn(\"Checkout Date\",sources='checkout_time',processor='format_datetime')\n def format_datetime(self,instance,**kwargs):\n loc = instance.checkout_time - timedelta(hours=5, minutes=0) #format & localize the time manually\n return loc.strftime(\"%b %d %Y, %I:%M %p\")\n\n def make_action_column(self,instance,**kwargs):\n the_pk = instance.pk\n my_list = \"\"\"\"\"\" % (the_pk)\n return my_list\n class Meta:\n structure_template = 'tallyhq/bootstrap_structure.html'\n footer=False\n columns=['id','formatted_datetime','teacher','downloaded']\n exclude=['password_hash','checkout_time']\n ordering=['-pk']\n def get_datatable_kwargs(self):\n kwargs = super(ManageOrders, self).get_datatable_kwargs()\n kwargs['url'] = '/tallyhq/manageorders/'\n return kwargs\n\n\nclass Dashboard(TEPStaffRequired,UpdateView):\n template_name = 'tallyhq/dashboard.html'\n model = ValidationPassword\n form_class = ValidationPasswordForm\n success_url = '/tallyhq/dashboard/'\n\n def get_object(self):\n try:\n return ValidationPassword.objects.get(pk=1)\n except:\n return None\n\n def get_context_data(self, **kwargs):\n context = super(Dashboard,self).get_context_data(**kwargs)\n try:\n last_waiver = Waiver.objects.latest('id')\n last_waiver_url = last_waiver.file.url\n except:\n last_waiver = None\n last_waiver_url = None \n recent_orders = Order.objects.order_by('-pk')\n context['waiver_info'] = last_waiver\n context['last_waiver_url'] = last_waiver_url\n context['datatable'] = ManageOrders().get_datatable() #Get the datatable from ManageOrders. Embed it in the dashboard.html\n try:\n time_diff_hours = ((datetime.now(timezone.utc)-self.get_object().uploaded_date).total_seconds())/3600.\n except:\n time_diff_hours = 0\n if time_diff_hours < 1:\n context['time_diff']='< 1 hour'\n else:\n context['time_diff']=str(int(time_diff_hours))+ ' hours'\n print (context)\n return context\n\n def form_valid(self, form):\n post = super().form_valid(form)\n record = ValidationPassword.objects.get(pk=1)\n record.uploaded_date = datetime.now()\n record.save()\n print(record)\n return (post)\n\nclass OrderView(TEPStaffRequired,TemplateView):\n template_name = 'tallyhq/orderview.html'\n\n\n\nclass ExportData(TEPStaffRequired,TemplateView):\n template_name = 'tallyhq/exportdata.html'\n def post(self,request,*args,**kwargs):\n\n if 'exportsincelast' in request.POST:\n return ExportOrderSinceLast(request, 'Orders_since_last_export',OrderItem,['order','item','units_taken'])\n if 'customexport' in request.POST:\n \n try:\n\n startdate = datetime.strptime(request.POST.get('startdate'), '%m/%d/%Y').date()\n enddate = datetime.strptime(request.POST.get('enddate'), '%m/%d/%Y').date()\n except Exception as e:\n startdate = datetime.strptime(request.POST.get('startdate'), '%Y-%m-%d').date()\n enddate = datetime.strptime(request.POST.get('enddate'), '%Y-%m-%d').date()\n print(str(e))\n return ExportOrder(request, 'Orders_between',OrderItem,['order','item','units_taken'],startdate,enddate)\n\nclass ImportData(TEPStaffRequired,TemplateView):\n template_name = 'tallyhq/importdata.html'\n\nclass DeleteData(TEPStaffRequired,TemplateView):\n template_name = 'tallyhq/deletedata.html'\n\n\nclass ManageTeachers(TEPStaffRequired,DatatableView):\n model = Teacher\n template_name = 'tallyhq/manageteachers.html'\n\n def post(self,request,*args,**kwargs):\n print(\"im posting\")\n context={}\n context['datatable']=self.get_datatable()\n context['error'] =''\n the_file = self.request.FILES['file']\n print(\"fn\",str(the_file))\n if str(the_file)!=\"Teacher Upload.csv\":\n context['error']=\"The file name must be called Teacher Upload.csv! You gave me \" + str(the_file) \n else:\n data_list = the_file.read().decode('utf-8').splitlines()\n for elem in data_list:\n r = elem.split(',')\n first,last,email,school = r[0].strip(),r[1].strip(),r[2].strip(),r[-1].strip()\n #lookup the school pk in the school model\n try:\n the_school = School.objects.get(name=school)\n except Exception as e:\n context['error'] +='
'+ \"The record: \" + elem+ \" contains a school that does not exist in TallyHQ, please add the school \" + str(school) + \" and then try again.\"\n \n new_row = Teacher(first_name=first,last_name=last,email=email,school=the_school)\n try:\n new_row.save()\n except Exception as e:\n context['error']+='
'+str(e)\n \n print(context)\n return render(self.request,template_name = self.template_name,context=context)\n\n class datatable_class(Datatable):\n school = columns.TextColumn(\"School\",sources=\"school__name\") #since school is foreign key in the teacher model, make it searchable by using source = school__name\n action = columns.TextColumn(\"Actions\",sources=None,processor='make_action_column')\n def make_action_column(self,instance,**kwargs):\n the_pk = instance.pk\n my_trashcan = \"\"\"\"\"\" % (the_pk)\n my_pencil = \"\"\"\"\"\" % (the_pk)\n button_pair = \"\"\"
%s
%s
\"\"\" % (my_pencil,my_trashcan)\n return button_pair\n class Meta:\n structure_template = 'tallyhq/bootstrap_structure.html'\n footer=False\n columns= [\n 'first_name',\n 'last_name',\n 'email',\n 'school'\n ]\n\n\nclass ManageSchools(TEPStaffRequired,DatatableView):\n model = School\n template_name = 'tallyhq/manageschools.html'\n\n def post(self,request,*args,**kwargs):\n print(\"im posting\")\n context={}\n context['datatable']=self.get_datatable()\n context['error']=''\n the_file = self.request.FILES['file']\n print(\"fn\",str(the_file))\n if str(the_file)!=\"schools.csv\":\n context['error']=\"The file name must be called schools.csv! You gave me \" + str(the_file) \n else:\n data_list = the_file.read().decode('utf-8').splitlines()\n for elem in data_list:\n new_row = School(name= elem.strip(),active=True)\n try:\n new_row.save()\n except Exception as e:\n context['error']+='
'+str(e)\n pass \n print(context)\n return render(self.request,template_name = self.template_name,context=context)\n\n class datatable_class(Datatable):\n action = columns.TextColumn(\"Actions\",sources=None,processor='make_action_column')\n def make_action_column(self,instance,**kwargs):\n the_pk = instance.pk\n my_trashcan = \"\"\"\"\"\" % (the_pk)\n my_pencil = \"\"\"\"\"\" % (the_pk)\n button_pair = \"\"\"
%s
%s
\"\"\" % (my_pencil,my_trashcan)\n return button_pair\n class Meta:\n structure_template = 'tallyhq/bootstrap_structure.html'\n footer=False\n labels={\n 'name':'School'\n }\n columns= [\n 'name',\n 'active',\n 'action'\n ]\n\n\nclass ManageItems(TEPStaffRequired,DatatableView):\n model=Item\n template_name = 'tallyhq/manageitems.html'\n class datatable_class(Datatable):\n action = columns.TextColumn(\"Actions\",sources=None,processor='make_action_column')\n def make_action_column(self,instance,**kwargs):\n the_pk = instance.pk\n my_trashcan = \"\"\"\"\"\" % (the_pk)\n my_pencil = \"\"\"\"\"\" % (the_pk)\n button_pair = \"\"\"
%s
%s
\"\"\" % (my_pencil,my_trashcan)\n return button_pair\n class Meta:\n structure_template = 'tallyhq/bootstrap_structure.html'\n footer=False\n ordering=['rank']\n exclude=['id']\n\n\nclass TestView(TEPStaffRequired,DatatableView):\n model = Teacher\n template_name = 'tallyhq/datatable_example_1.html'\n\n class datatable_class(Datatable):\n class Meta:\n structure_template = 'tallyhq/bootstrap_structure.html'\n footer=True\n\nclass TestView2(TEPStaffRequired,XEditableDatatableView):\n model = Teacher\n template_name = 'tallyhq/datatable_example_2.html'\n\n class datatable_class(Datatable):\n class Meta:\n structure_template = 'tallyhq/bootstrap_structure.html'\n footer=True\n columns= [\n 'id',\n 'first_name',\n 'last_name',\n ]\n processors={\n 'first_name':helpers.make_xeditable,\n 'last_name':helpers.make_xeditable,\n\n }\n\n#Any common logic in the basic CRUD views should go here in CommonView\n#This helper class allows me to customize the crud views a little bit by creating a form_header attribute & point them all to the same template \n#All my basic CRUD views will use this generic_form.html template or a generic_form_for_modal.html template \nclass CommonView(View):\n template_name = \"tallyhq/generic_form_for_modal_ajax.html\" #Use this one when you want all modal forms\n #template_name = \"tallyhq/generic_form.html\" #Use this one when you want all normal (non-modal) forms\n def get_context_data(self,*args,**kwargs):\n context = super().get_context_data(**kwargs)\n if hasattr(self,'form_header'):\n context['form_header'] = self.form_header\n else:\n context['form_header'] = \"Please fill out the form below\" #default value if the user doesnt specify a form_header\n return context\n\n##########################################\n############ BASIC CRUD VIEWS ############\n##########################################\n\nclass TeacherCreate(AjaxableResponseMixin,TEPStaffRequired,CommonView,CreateView):\n model = Teacher\n success_url = \"/tallyhq/manageteachers/\"\n fields = ['first_name','last_name','email','school']\n form_header = \"Create a new teacher by filling out the form below\"\n\nclass SchoolCreate(AjaxableResponseMixin,TEPStaffRequired,CommonView,CreateView):\n model = School\n success_url = \"/tallyhq/manageschools/\"\n fields = ['name','active']\n form_header = \"Create a new school by filling out the form below\"\n\n\nclass ItemCreate(AjaxableResponseMixin,TEPStaffRequired,CommonView,CreateView):\n model = Item\n success_url = \"/tallyhq/manageitems/\"\n fields = ['name','unit_label_name','max_units','qty_per_unit','rank','active']\n form_header = \"Create a new item by filling out the form below\"\n\nclass TeacherUpdate(AjaxableResponseMixin,TEPStaffRequired,CommonView,UpdateView):\n model = Teacher\n success_url = \"/tallyhq/manageteachers/\"\n fields = ['first_name','last_name','email','school']\n form_header = \"Update the teacher information in the form below\"\n\nclass SchoolUpdate(AjaxableResponseMixin,TEPStaffRequired,CommonView,UpdateView):\n model = School\n success_url = \"/tallyhq/manageschools/\"\n fields = ['name','active']\n form_header = \"Update the school information in the form below\"\n\nclass ItemUpdate(AjaxableResponseMixin,TEPStaffRequired,CommonView,UpdateView):\n model = Item\n success_url = \"/tallyhq/manageitems/\"\n fields = ['name','unit_label_name','max_units','qty_per_unit','rank','active']\n form_header = \"Update the item information in the form below\"\n\nclass TeacherDelete(AjaxableResponseMixin,TEPStaffRequired,CommonView,DeleteView):\n model = Teacher\n success_url = \"/tallyhq/manageteachers/\"\n form_header = \"Are you sure you want to delete this teacher?\"\n\nclass SchoolDelete(AjaxableResponseMixin,TEPStaffRequired,CommonView,DeleteView):\n model = School\n success_url = \"/tallyhq/manageschools/\"\n form_header = \"Are you sure you want to delete this school?\"\n\nclass ItemDelete(AjaxableResponseMixin,TEPStaffRequired,CommonView,DeleteView):\n model = Item\n success_url = \"/tallyhq/manageitems/\"\n form_header = \"Are you sure you want to delete this item?\"\n\nclass TeacherPurge(AjaxableResponseMixin, TEPStaffRequired,CommonView,DeleteView):\n model = Teacher\n success_url = \"/tallyhq/manageteachers/\"\n form_header = \"Are you sure you want to delete all the teachers?\"\n\n def get_object(self, queryset=None):\n obj=Teacher.objects.all()\n return obj\n\nclass SchoolPurge(AjaxableResponseMixin, TEPStaffRequired,CommonView,DeleteView):\n model = School\n success_url = \"/tallyhq/manageschools/\"\n form_header = \"Are you sure you want to delete all the schools?\"\n\n def get_object(self, queryset=None):\n obj=School.objects.all()\n return obj\n\nclass ItemPurge(AjaxableResponseMixin,TEPStaffRequired,CommonView,DeleteView):\n model = Item\n success_url = \"/tallyhq/manageitems/\"\n form_header = \"Are you sure you want to delete all the items?\"\n\n def get_object(self, queryset=None):\n obj=Item.objects.all()\n return obj\n\ndef OrderDetailView(request, pk):\n print(pk)\n obj = OrderItem.objects.filter(order__pk=pk).values('units_taken', 'item__name','order__pk','item__rank')\n lst = list(obj)\n sorted_list = sorted(lst, key = lambda i: i['item__rank'])\n data=json.dumps(sorted_list, cls=DjangoJSONEncoder)\n print(\"data\",data)\n return HttpResponse(data, content_type='applicaiton/json')\n\nclass WaiverUpload(TEPStaffRequired,CommonView,CreateView):\n success_url = \"/tallyhq/dashboard/\"\n model = Waiver\n fields = ['file']\n\n \ndef custom_404_handler(request, exception=None):\n return render(request,template_name='tallyhq/404.html', status=404)\n\n\n\nclass stevecheckout(CreateView):\n model = OrderItem\n form_class = OrderItemForm\n template_name = \"tallyhq/generic_form_many.html\"\n\n def get_context_data(self, **kwargs):\n context = super(stevecheckout, self).get_context_data(**kwargs)\n all_items = Item.objects.all()\n cnt = all_items.count()\n some_order = Order.objects.first()\n #LETS SET SOME INITIAL DATA FOR EACH FORM, ITS A LIST OF DICTS\n my_init =[ {'units_taken': '23','item':elem,'order':some_order} for elem in all_items ]\n print(\"my_init\",my_init)\n OrderItemFormFactory = modelformset_factory(OrderItem, fields=(\"order\", \"item\",\"units_taken\"),extra=cnt)\n context['formset'] = OrderItemFormFactory(queryset=OrderItem.objects.none(),initial=my_init) \n return context\n\n def post(self, request, *args, **kwargs):\n print(\"here a\")\n all_items = Item.objects.all()\n cnt = all_items.count()\n OrderItemFormFactory = modelformset_factory(OrderItem, fields=(\"order\", \"item\",\"units_taken\"),extra=cnt)\n formset = OrderItemFormFactory(request.POST)\n if formset.is_valid():\n print(\"here b\")\n return self.form_valid(formset)\n\n def form_valid(self, formset):\n for form in formset:\n print(\"form\",form)\n #form.save()\n #formset.save()\n return HttpResponseRedirect('/tallyhq/manageteachers')\n\n def form_invalid(self, formset):\n return self.render_to_response(self.get_context_data(formset=formset))\n", "sub_path": "backend/tep/tallyhq/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 23297, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "django.http.JsonResponse", "line_number": 28, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 42, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 48, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 52, "usage_type": "call"}, {"api_name": "django.views.generic.FormView", "line_number": 55, "usage_type": "name"}, {"api_name": "django.views.generic.TemplateView", "line_number": 101, "usage_type": "name"}, {"api_name": "django.views.generic.TemplateView", "line_number": 116, "usage_type": "name"}, {"api_name": "django.views.generic.edit.CreateView", "line_number": 119, "usage_type": "name"}, {"api_name": "django.views.generic.TemplateView", "line_number": 151, "usage_type": "name"}, {"api_name": "datatableview.views.DatatableView", "line_number": 155, "usage_type": "name"}, {"api_name": "datatableview.Datatable", "line_number": 158, "usage_type": "name"}, {"api_name": "datatableview.columns.TextColumn", "line_number": 159, "usage_type": "call"}, {"api_name": "datatableview.columns", "line_number": 159, "usage_type": "name"}, {"api_name": "datatableview.columns.TextColumn", "line_number": 160, "usage_type": "call"}, {"api_name": "datatableview.columns", "line_number": 160, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 162, "usage_type": "call"}, {"api_name": "datatableview.columns", "line_number": 172, "usage_type": "name"}, {"api_name": "django.views.generic.edit.UpdateView", "line_number": 181, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 206, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 206, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 206, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 206, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 219, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 219, "usage_type": "name"}, {"api_name": "django.views.generic.TemplateView", "line_number": 224, "usage_type": "name"}, {"api_name": "django.views.generic.TemplateView", "line_number": 229, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 239, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 239, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 240, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 240, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 242, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 242, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 243, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 243, "usage_type": "name"}, {"api_name": "django.views.generic.TemplateView", "line_number": 247, "usage_type": "name"}, {"api_name": "django.views.generic.TemplateView", "line_number": 250, "usage_type": "name"}, {"api_name": "datatableview.views.DatatableView", "line_number": 254, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 285, "usage_type": "call"}, {"api_name": "datatableview.Datatable", "line_number": 287, "usage_type": "name"}, {"api_name": "datatableview.columns.TextColumn", "line_number": 288, "usage_type": "call"}, {"api_name": "datatableview.columns", "line_number": 288, "usage_type": "name"}, {"api_name": "datatableview.columns.TextColumn", "line_number": 289, "usage_type": "call"}, {"api_name": "datatableview.columns", "line_number": 289, "usage_type": "name"}, {"api_name": "datatableview.columns", "line_number": 299, "usage_type": "name"}, {"api_name": "datatableview.views.DatatableView", "line_number": 307, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 330, "usage_type": "call"}, {"api_name": "datatableview.Datatable", "line_number": 332, "usage_type": "name"}, {"api_name": "datatableview.columns.TextColumn", "line_number": 333, "usage_type": "call"}, {"api_name": "datatableview.columns", "line_number": 333, "usage_type": "name"}, {"api_name": "datatableview.columns", "line_number": 346, "usage_type": "name"}, {"api_name": "datatableview.views.DatatableView", "line_number": 353, "usage_type": "name"}, {"api_name": "datatableview.Datatable", "line_number": 356, "usage_type": "name"}, {"api_name": "datatableview.columns.TextColumn", "line_number": 357, "usage_type": "call"}, {"api_name": "datatableview.columns", "line_number": 357, "usage_type": "name"}, {"api_name": "datatableview.views.DatatableView", "line_number": 371, "usage_type": "name"}, {"api_name": "datatableview.Datatable", "line_number": 375, "usage_type": "name"}, {"api_name": "datatableview.views.XEditableDatatableView", "line_number": 380, "usage_type": "name"}, {"api_name": "datatableview.Datatable", "line_number": 384, "usage_type": "name"}, {"api_name": "datatableview.columns", "line_number": 388, "usage_type": "name"}, {"api_name": "datatableview.helpers.make_xeditable", "line_number": 394, "usage_type": "attribute"}, {"api_name": "datatableview.helpers", "line_number": 394, "usage_type": "name"}, {"api_name": "datatableview.helpers.make_xeditable", "line_number": 395, "usage_type": "attribute"}, {"api_name": "datatableview.helpers", "line_number": 395, "usage_type": "name"}, {"api_name": "django.views.generic.edit.View", "line_number": 402, "usage_type": "name"}, {"api_name": "django.views.generic.edit.CreateView", "line_number": 417, "usage_type": "name"}, {"api_name": "django.views.generic.edit.CreateView", "line_number": 423, "usage_type": "name"}, {"api_name": "django.views.generic.edit.CreateView", "line_number": 430, "usage_type": "name"}, {"api_name": "django.views.generic.edit.UpdateView", "line_number": 436, "usage_type": "name"}, {"api_name": "django.views.generic.edit.UpdateView", "line_number": 442, "usage_type": "name"}, {"api_name": "django.views.generic.edit.UpdateView", "line_number": 448, "usage_type": "name"}, {"api_name": "django.views.generic.edit.DeleteView", "line_number": 454, "usage_type": "name"}, {"api_name": "django.views.generic.edit.DeleteView", "line_number": 459, "usage_type": "name"}, {"api_name": "django.views.generic.edit.DeleteView", "line_number": 464, "usage_type": "name"}, {"api_name": "django.views.generic.edit.DeleteView", "line_number": 469, "usage_type": "name"}, {"api_name": "django.views.generic.edit.DeleteView", "line_number": 478, "usage_type": "name"}, {"api_name": "django.views.generic.edit.DeleteView", "line_number": 487, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 501, "usage_type": "call"}, {"api_name": "django.core.serializers.json.DjangoJSONEncoder", "line_number": 501, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 503, "usage_type": "call"}, {"api_name": "django.views.generic.edit.CreateView", "line_number": 505, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 512, "usage_type": "call"}, {"api_name": "django.views.generic.edit.CreateView", "line_number": 516, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 548, "usage_type": "call"}]} +{"seq_id": "245002118", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Load data into MySQL table \n\nfrom __future__ import print_function\nimport dbi as dev\nfrom sqlalchemy import func\nfrom sqlalchemy.sql import label\nimport move_files\nimport sys\nimport os\nimport time\nimport shutil\nimport socket\nimport subprocess\nimport smtplib\nfrom email.MIMEMultipart import MIMEMultipart\nfrom email.MIMEBase import MIMEBase\nfrom email import Encoders\n\n### Script to load paperdistiller with files from the paperfeed table\n### Checks /data4 for space, moves entire days of data, then loads into paperdistiller\n\n### Author: Immanuel Washington\n### Date: 11-23-14\n\ndef set_feed(source, output_host, output_dir, moved_to_distill=True):\n dbi = dev.DataBaseInterface()\n FEED = dbi.get_entry('feed', source)\n dbi.set_entry(FEED, 'host', output_host)\n dbi.set_entry(FEED, 'path', output_dir)\n dbi.set_entry(FEED, 'moved_to_distill', moved_to_distill)\n return None\n\ndef move_feed_files(input_host, input_paths, output_host, output_dir):\n #different from move_files, adds to feed\n named_host = socket.gethostname()\n destination = ''.join((output_host, ':', output_dir))\n if named_host == input_host:\n for source in input_paths:\n move_files.rsync_copy(source, destination)\n set_feed(source, output_host, output_dir)\n shutil.rmtree(source)\n else:\n ssh = dev.login_ssh(output_host)\n for source in input_paths:\n rsync_copy_command = '''rsync -ac {source} {destination}'''.format(source=source, destination=destination)\n rsync_del_command = '''rm -r {source}'''.format(source=source)\n ssh.exec_command(rsync_copy_command)\n set_feed(source, output_host, output_dir)\n ssh.exec_command(rsync_del_command)\n ssh.close()\n\n print('Completed transfer')\n return None\n\ndef count_days():\n dbi = dev.DataBaseInterface()\n s = dbi.Session()\n table = getattr(dev, 'Feed')\n count_FEEDs = s.query(getattr(table, 'julian_day'), label('count', func.count(getattr(table, 'julian_day'))))\\\n .group_by(getattr(table, 'julian_day')).all()\n all_FEEDs = s.query(table).all()\n good_days = tuple(getattr(FEED, 'julian_day') for FEED in count_FEEDs if getattr(FEED, 'count') == 288 or getattr(FEED, 'count') == 72)\n to_move = tuple(getattr(FEED, 'full_path') for FEED in all_FEEDs if getattr(FEED, 'julian_day') in good_days)\n s.close()\n\n for full_path in to_move:\n FEED = dbi.get_entry('feed', source)\n dbi.set_entry(FEED, 'ready_to_move', True)\n\n return None\n\ndef find_data():\n dbi = dev.DataBaseInterface()\n s = dbi.Session()\n table = getattr(dev, 'Feed')\n FEEDs = s.query(table).filter(getattr(table, 'moved_to_distill') == False).filter(getattr(table, 'ready_to_move') == True).all()\n s.close()\n\n #only move one day at a time\n feed_host = FEEDs[0].host\n feed_day = FEEDs[0].julian_day\n feed_paths = tuple(os.path.join(getattr(FEED, 'path'), getattr(FEED, 'filename'))\n for FEED in FEEDs if getattr(FEED, 'julian_day') == feed_day)\n feed_filenames = tuple(getattr(FEED, 'filename') for FEED in FEEDs if getattr(FEED, 'julian_day') == feed_day)\n\n return feed_paths, feed_host, feed_filenames\n\ndef email_paperfeed(files):\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.ehlo()\n server.starttls()\n\n #Next, log in to the server\n server.login('paperfeed.paper@gmail.com', 'papercomesfrom1tree')\n\n header = 'From: PAPERFeed \\nSubject: FILES ARE BEING MOVED\\n'\n msgs = header\n #Send the mail\n for filename in files:\n msgs = ''.join(msgs, '\\n', filename, ' is being moved.\\n')\n\n server.sendmail('paperfeed.paper@gmail.com', 'immwa@sas.upenn.edu', msgs)\n server.sendmail('paperfeed.paper@gmail.com', 'jaguirre@sas.upenn.edu', msgs)\n server.sendmail('paperfeed.paper@gmail.com', 'saul.aryeh.kohn@gmail.com', msgs)\n server.sendmail('paperfeed.paper@gmail.com', 'jacobsda@sas.upenn.edu', msgs)\n\n server.quit()\n\n return None\n\ndef feed_bridge():\n #Minimum amount of space to move a day ~3.1TiB\n required_space = 1112373311360\n output_dir = '/data4/paper/feed/' #CHANGE WHEN KNOW WHERE DATA USUALLY IS STORED\n\n #Move if there is enough free space\n if move_files.enough_space(required_space, output_dir):\n #check how many days are in each\n count_days()\n #FIND DATA\n input_paths, input_host, input_filenames = find_data()\n #pick directory to output to\n output_host = 'folio'\n #MOVE DATA AND UPDATE PAPERFEED TABLE THAT FILES HAVE BEEN MOVED, AND THEIR NEW PATHS\n move_feed_files(input_host, input_paths, output_host, output_dir)\n #EMAIL PEOPLE THAT DATA IS BEING MOVED AND LOADED\n email_paperfeed(input_paths)\n #ADD_OBSERVATIONS.PY ON LIST OF DATA IN NEW LOCATION\n out_dir = os.path.join(output_dir, 'zen.*.uv')\n add_obs = 'python /usr/global/paper/CanopyVirtualEnvs/PAPER_Distiller/bin/add_observations.py {out_dir}'.format(out_dir=out_dir)\n #shell = True because wildcards can't be done without it\n subprocess.call(add_obs, shell=True)\n else:\n table = 'Feed'\n move_files.email_space(table)\n time.sleep(21600)\n\n return None\n\nif __name__ == '__main__':\n feed_bridge()\n", "sub_path": "paper/dev/feed_bridge.py", "file_name": "feed_bridge.py", "file_ext": "py", "file_size_in_byte": 5389, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "dbi.DataBaseInterface", "line_number": 28, "usage_type": "call"}, {"api_name": "dbi.get_entry", "line_number": 29, "usage_type": "call"}, {"api_name": "dbi.set_entry", "line_number": 30, "usage_type": "call"}, {"api_name": "dbi.set_entry", "line_number": 31, "usage_type": "call"}, {"api_name": "dbi.set_entry", "line_number": 32, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 37, "usage_type": "call"}, {"api_name": "move_files.rsync_copy", "line_number": 41, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 43, "usage_type": "call"}, {"api_name": "dbi.login_ssh", "line_number": 45, "usage_type": "call"}, {"api_name": "dbi.DataBaseInterface", "line_number": 58, "usage_type": "call"}, {"api_name": "dbi.Session", "line_number": 59, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.label", "line_number": 61, "usage_type": "call"}, {"api_name": "sqlalchemy.func.count", "line_number": 61, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 61, "usage_type": "name"}, {"api_name": "dbi.get_entry", "line_number": 69, "usage_type": "call"}, {"api_name": "dbi.set_entry", "line_number": 70, "usage_type": "call"}, {"api_name": "dbi.DataBaseInterface", "line_number": 75, "usage_type": "call"}, {"api_name": "dbi.Session", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "smtplib.SMTP", "line_number": 91, "usage_type": "call"}, {"api_name": "move_files.enough_space", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "subprocess.call", "line_number": 134, "usage_type": "call"}, {"api_name": "move_files.email_space", "line_number": 137, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 138, "usage_type": "call"}]} +{"seq_id": "408233463", "text": "#!/usr/bin/env python\n\nimport sys\nfrom os.path import join, dirname\n\nsys.path.append(join(dirname(__file__), 'src'))\nfrom ez_setup import use_setuptools\nuse_setuptools()\nfrom setuptools import setup\n\nVERSION = \"\"\"\n1.0.0\n\"\"\".strip()\n\nDESCRIPTION = \"\"\"\nZoomba\n\"\"\".strip()\n\nCLASSIFIERS = \"\"\"\nDevelopment Status :: 5 - Production/Stable\nOperating System :: OS Independent\nProgramming Language :: Python\nTopic :: Software Development :: Communication\n\"\"\".strip().splitlines()\n\nsetup(name = 'zoomba',\n version = VERSION,\n description = 'Robot Framework mini-framework.',\n long_description = DESCRIPTION,\n url = 'https://github.com/Accruent/zoomba',\n license = 'apache',\n keywords = 'Robot Framework',\n platforms = 'any',\n install_requires= [\n \"robotframework==3.0\",\n \"robotframework-requests==0.4.5\",\n \"robotframework-selenium2library==1.7.4\",\n \"robotframework-extendedselenium2library==0.9.1\",\n \"robotframework-debuglibrary==0.8\",\n \"robotframework-databaselibrary==0.8.1\",\n \"robotframework-sudslibrary==0.8\",\n \"requests==2.11.1\",\n \"selenium==2.53.6\"\n ],\n classifiers = CLASSIFIERS,\n zip_safe = True,\n package_dir = {'' : 'src'},\n packages = ['Zoomba']\n )", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1346, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "sys.path.append", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 6, "usage_type": "call"}, {"api_name": "ez_setup.use_setuptools", "line_number": 8, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "246196603", "text": "'''\nver3.1\nbug:1、每次自动生成2个数? (修复)\n 2、判断失败有问题,在全屏幕满了的时候,往不能相加的方向移动也误判为lose\n 3、移动判断当向某边滑动而不变化时,不应该生成一个数字\n 4、同3,判断不变化时会出现无法继续运行的bug,往相反方向也不生成数字\n\n添加功能:\n 1、计分\n 2、补全图片\n'''\n\nimport random\nimport sys\nimport time\n\nimport pygame\nfrom pygame.locals import *\n\npygame.init()\nscreen = pygame.display.set_mode((400,500))\nscore = 0\n\n#生成初始图\ndef firstMap():\n a = [0, 0, 0, 0]\n b = [0, 0, 0, 0]\n c = [0, 0, 0, 0]\n d = [0, 0, 0, 0]\n allMap = [a,b,c,d]\n i = random.randint(0,3)\n allMap[i][i] = random.randrange(2,5,2)\n i = random.randint(0, 3)\n allMap[i][i] = random.randrange(2, 5, 2)\n return allMap\n\n#生成一个分数的图爿\ndef countScore(gg):\n font2 = pygame.font.SysFont('arial', 80)\n sur = font2.render(str(gg), True, (0, 0, 0,), (255, 255, 255))\n pygame.image.save(sur, 'cangku/sur.png')\n\n#数字匹配显示\ndef sucaiShow(num,j,i):\n s_map = num\n if s_map[j][i] == 2:\n sucai = pygame.image.load(\"cangku/2.png\")\n\n if s_map[j][i] == 4:\n sucai = pygame.image.load(\"cangku/4.png\")\n\n if s_map[j][i] == 8:\n sucai = pygame.image.load(\"cangku/8.png\")\n\n if s_map[j][i] == 16:\n sucai = pygame.image.load(\"cangku/16.png\")\n\n if s_map[j][i] == 32:\n sucai = pygame.image.load(\"cangku/32.png\")\n\n if s_map[j][i] == 64:\n sucai = pygame.image.load(\"cangku/64.png\")\n\n if s_map[j][i] == 128:\n sucai = pygame.image.load(\"cangku/128.png\")\n\n if s_map[j][i] == 256:\n sucai = pygame.image.load(\"cangku/256.png\")\n\n if s_map[j][i] == 512:\n sucai = pygame.image.load(\"cangku/512.png\")\n\n if s_map[j][i] == 1024:\n sucai = pygame.image.load(\"cangku/1024.png\")\n\n if s_map[j][i] == 2048:\n sucai = pygame.image.load(\"cangku/2048.png\")\n\n if score != 0:\n showscore = pygame.image.load(\"cangku/sur.png\")\n screen.blit(showscore,(0,0))\n\n screen.blit(sucai, (i * 100, j * 100 + 100))\n\n#数字图片显示在图上\ndef showMap(num):\n s_map = num\n startupScreen()\n for j in range(4):\n for i in range(4):\n if s_map[j][i] != 0:\n sucaiShow(s_map,j,i)\n\n#随机弹出一个2或4\ndef ariseOne(num):\n if (0 in num[0]) or (0 in num[1]) or (0 in num[2]) or (0 in num[3]):\n while True:\n j = random.randint(0,3)\n i = random.randint(0,3)\n if num[j][i] == 0:\n num[j][i] = random.randrange(2, 5, 2)\n break\n else:\n continue\n\n return num\n\n#矩阵旋转\ndef rotation_Clockwise(num):\n b = []\n\n for j in range(4):\n t = []\n for i in range(4):\n t.append(num[3-i][j])\n b.append(t)\n\n return b\n\ndef rotation_Anticlockwise(num):\n b = []\n\n for i in range(4):\n t = []\n for j in range(4):\n t.append(num[j][3-i])\n b.append(t)\n\n return b\n\n#把数组中的零先提取,在把数字移到一边\ndef byebyeZero_Right(num):\n b = num\n for j in range(4):\n for i in range(4):\n if b[j][i] == 0:\n middle = b[j]\n middle.pop(i)\n middle.insert(0, 0)\n b[j] = middle\n\n\n\n b = addNum_Right(b)\n b = ariseOne(b)\n\n return b\n\ndef byebyeZero_Left(num):\n b = num\n for j in range(4):\n for i in range(4):\n if b[j][3 - i] == 0:\n middle = b[j]\n middle.pop(3 - i)\n middle.insert(3, 0)\n b[j] = middle\n\n b = addNum_Left(b)\n b = ableMove(b, num)\n\n return b\n\ndef byebyeZero_Up(num):\n b = num\n zhuanzhi = rotation_Clockwise(b)\n zhuanzhi = byebyeZero_Right(zhuanzhi)\n\n zhuanzhi = rotation_Anticlockwise(zhuanzhi)\n\n return zhuanzhi\n\ndef byebyeZero_Down(num):\n b = num\n zhuanzhi = rotation_Clockwise(num)\n zhuanzhi = byebyeZero_Left(zhuanzhi)\n\n zhuanzhi = rotation_Anticlockwise(zhuanzhi)\n\n return zhuanzhi\n\n#实现相同数字翻倍\ndef addNum_Right(moveMap):\n k = moveMap\n global score\n for j in range(4):\n for i in range(4):\n if k[j][3-i] == k[j][3-(i+1)]:\n k[j][3-i] = k[j][3-i]*2\n score = score + k[j][3 - i]\n middle = k[j]\n middle.pop(3-(i+1))\n middle.insert(0,0)\n k[j] = middle\n print(score)\n countScore(score)\n return k\n\ndef addNum_Left(moveMap):\n k = moveMap\n global score\n for j in range(4):\n for i in range(3):\n if k[j][i] == k[j][i+1]:\n k[j][i] = k[j][i+1]*2\n score = score + k[j][i]\n middle = k[j]\n middle.pop(i+1)\n middle.insert(3,0)\n k[j] = middle\n\n print(score)\n countScore(score)\n return k\n\n#是否随机弹出一个数字判断\ndef ableMove(display,olddisplay):\n if display == olddisplay:\n res = olddisplay\n else:\n display = ariseOne(display)\n res = display\n\n return res\n\n#界面初始化\ndef startupScreen():\n\n\n pygame.display.set_caption('2048 --test0')\n background_color = 0,0,200\n white = 238, 228, 218\n screen.fill(white)\n line_color = 0,0,0\n line_with = 8\n\n #画格子\n for i in range(1,4):\n pygame.draw.line(screen, line_color,(i*100,100),(i*100,500))\n\n for i in range(1,5):\n pygame.draw.line(screen, line_color,(0,i*100),(400,i*100))\n\n#操作判断\ndef buttonpandun(display):\n for event in pygame.event.get():\n if event.type == QUIT:\n sys.exit()\n\n elif event.type == KEYDOWN:\n\n if event.key == K_w or event.key == K_UP:\n display = byebyeZero_Up(display)\n\n elif event.key == K_s or event.key == K_DOWN:\n display = byebyeZero_Down(display)\n\n elif event.key == K_a or event.key == K_LEFT:\n display = byebyeZero_Left(display)\n\n elif event.key == K_d or event.key == K_RIGHT:\n display = byebyeZero_Right(display)\n\n return display\n\n#输判断\ndef losergg(num):\n pass\n\n#主\ndef main():\n startupScreen()\n\n #游戏结果显示\n firstallMap =firstMap()\n for i in firstallMap:\n print(i)\n\n display = firstallMap\n\n #屏幕显示结果\n showMap(display)\n while True:\n olddisplay = display\n display = buttonpandun(display)\n #display = ableMove(display,olddisplay)\n pygame.display.update()\n showMap(display)\n time.sleep(0.01)\n\nif __name__ == \"__main__\":\n main()\n\n\n", "sub_path": "2048ver3.1.py", "file_name": "2048ver3.1.py", "file_ext": "py", "file_size_in_byte": 6765, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pygame.init", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 21, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 31, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 32, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 33, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.font.SysFont", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.image.save", "line_number": 41, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 47, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 50, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 53, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 56, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 59, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 59, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 62, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 65, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 68, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 68, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 71, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 74, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 77, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 80, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 80, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 98, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 99, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 101, "usage_type": "call"}, {"api_name": "pygame.display.set_caption", "line_number": 230, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 230, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 239, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 239, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 242, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 242, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 246, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 246, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 248, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 287, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 287, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 289, "usage_type": "call"}]} +{"seq_id": "434967122", "text": "#! /usr/bin/env python3.4\n\n# This project is my second approach to download all my ratings, this is based on https://github.com/mmihaljevic/flixter from mmihaljevic. \n#I have expanded on it and converted into python3\n#\n\n# First, Check Py version\n\nimport sys\nimport urllib.request\n\nif sys.version_info < (3, 0):\n print(\"must use python 3.0 or greater\")\n sys.exit()\nelse:\n\tpass\n\n\n# Second connect to flixster\n\n## 2.1 Trick flixster so we avoid 403 error \n# I know this function is deprecated haven't found other option though\n\nclass UrlOpener(urllib.request.FancyURLopener):\n\n\tversion = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11'\n\n## 2.2 Get user input\n\n\nclass Rating(object):\n\n\tdef __init__(self, user_ID, num_ratings):\n\t\t\"\"\" defines url based on given user id and number of ratings \"\"\"\t\t\n\t\tif user_ID is None:\n\t\t\traise KeyError('user id cannot be empty')\t\n\t\tif num_ratings is None:\n\t\t\traise KeyError('number of ratings must be set')\n\t\t\n\t\tself.opener=UrlOpener()\n\t\tself.url = \"http://www.flixster.com/api/users/{}/movies/ratings?scoreTypes=numeric&page=1&limit={}\".format(user_ID, num_ratings)\n\t\tself.data = self.opener.open(self.url)\n\t\tself.content = self.data.read().decode(\"utf-8\")\n\t\tprint('Downloading from',self.url)\n\t\n\tdef read(self, filename):\n\t\tif filename:\n\t\t\twith open(filename, \"w\") as textfile:\n\t\t\t\tfor line in self.content:\n\t\t\t\t\ttextfile.write(line.rstrip())\n\t\t\t\t\t\n\t\t\n\n\n# user_ID=input('What is your user_ID?')\n# num_ratings=input('How many ratings?')\n\n\n# 2.3 Open url with the fancy opener\n\n\n\nmy_file=open('./exclude/user.txt','r') \nuser_ID=my_file.readline().replace('\\n','')\nnum_ratings=my_file.readline().replace('\\n','')\nmy_file.close()\n\nif user_ID and num_ratings:\n\t# print('You are downloading {} movies from user {}'.format(num_ratings,user_ID))\n\tpass\t\nelse:\n\t# print('Remember you have to setup a txt file called user.txt in a folder /exclude with the user id in the first line and the number of movies in the second line')\n\tpass\n\nBase=Rating(user_ID,num_ratings)\n\nimport json\nimport io\nimport csv\n\n# Parse results\n\n# class Remove_Blankline(object):\n# \tdef __init__(self,filename):\n# \t\tfilename=self.filename\n# \t\tfor line in open(filename):\n# \t\t\tline = line.rstrip()\n# \t\t\tif line != '':\n# \t\t\tprint(line)\n\nclass Result(object):\n def __init__(self):\n \"\"\" \"\"\"\n def get_raw_data(self, filename):\n \"\"\" returns raw data - movies list after parsing json \"\"\"\n f = io.open(filename, 'r', encoding='latin-1') \n data = json.loads(f.readline())\n f.close()\n return data\n\nfilename='./exclude/a.txt'\ncsv_file='./exclude/a.csv'\nBase.read(filename)\n# Remove_Blankline(filename)\n\nNew=Result()\nmovielist=New.get_raw_data(filename)\n\nclass Csv_parser(object):\n\tdef __init__(self):\n\t\t\"\"\"\"\"\"\n\tdef parse(self, filename):\n\t\tmy_csv=open(filename, 'w')\n\t\twriter=csv.writer(my_csv,quotechar='\\\"', quoting=csv.QUOTE_MINIMAL)\n\t\tmovielist2=[]\n\t\tfor i in range(len(movielist)):\n\t\t\tmovielist2.append(movielist[i]['movie']['title'])\n\t\tprint(movielist2)\n\t\tfor i in range(len(movielist)):\n\t\t\twriter.writerow(movielist[i]['movie']['title'])\n\nCsv_parser().parse(csv_file)\n\n# keys=a[0]['movie'].keys()\n# for key in keys:\n# \tprint(key,':',a[0]['movie'][key])\n\n\n", "sub_path": "flixter-dwnld.py", "file_name": "flixter-dwnld.py", "file_ext": "py", "file_size_in_byte": 3258, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "sys.version_info", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 14, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 24, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 24, "usage_type": "name"}, {"api_name": "io.open", "line_number": 96, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 97, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 114, "usage_type": "call"}, {"api_name": "csv.QUOTE_MINIMAL", "line_number": 114, "usage_type": "attribute"}]} +{"seq_id": "139483073", "text": "import numpy as np\nimport pytest\nimport torch\n\nfrom raylab import envs\nfrom raylab.envs.wrappers import RandomIrrelevant\n\n\n@pytest.fixture(scope=\"module\")\ndef size():\n return 4\n\n\n@pytest.fixture(scope=\"module\")\ndef loc():\n return 0.0\n\n\n@pytest.fixture(scope=\"module\")\ndef scale():\n return 1.0\n\n\n@pytest.fixture\ndef wrapped(env, size, loc, scale):\n return RandomIrrelevant(env, size, loc, scale)\n\n\ndef test_observation_space(wrapped, size):\n base = wrapped.env.observation_space\n wrap = wrapped.observation_space\n\n assert wrap.dtype == base.dtype\n assert wrap.shape == (base.shape[0] + size,)\n\n\ndef test_seed(wrapped):\n base = wrapped.env\n seeds = base.seed() or []\n\n assert hasattr(wrapped, \"np_random\")\n if hasattr(base, \"np_random\"):\n assert wrapped.np_random is not base.np_random\n\n assert len(wrapped.seed()) == len(seeds) + 1\n\n\ndef test_reset(wrapped, size):\n base = wrapped.env.observation_space\n\n obs = wrapped.reset()\n assert obs in wrapped.observation_space\n assert obs.shape == (base.shape[0] + size,)\n\n\ndef test_step(wrapped, size):\n wrapped.reset()\n base = wrapped.env.observation_space\n\n action = wrapped.action_space.sample()\n obs, rew, done, info = wrapped.step(action)\n assert obs in wrapped.observation_space\n assert np.isscalar(rew)\n assert isinstance(done, bool)\n assert isinstance(info, dict)\n assert obs.shape == (base.shape[0] + size,)\n\n\n@pytest.fixture\ndef reward_fn(env_name, size, env_config):\n base = envs.get_reward_fn(env_name, env_config)\n wrapped = RandomIrrelevant.wrap_env_function(base, size)\n return wrapped\n\n\ndef test_wrapped_reward_fn(wrapped, reward_fn):\n done = True\n for _ in range(10):\n if done:\n obs = wrapped.reset()\n done = False\n\n action = wrapped.action_space.sample()\n new_obs, rew, done, _ = wrapped.step(action)\n\n rew_ = reward_fn(*map(torch.from_numpy, (obs, action, new_obs))).item()\n assert np.allclose(rew, rew_, atol=1e-5)\n\n obs = new_obs\n", "sub_path": "tests/raylab/envs/wrappers/test_random_irrelevant.py", "file_name": "test_random_irrelevant.py", "file_ext": "py", "file_size_in_byte": 2056, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pytest.fixture", "line_number": 9, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 14, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 19, "usage_type": "call"}, {"api_name": "raylab.envs.wrappers.RandomIrrelevant", "line_number": 26, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.isscalar", "line_number": 63, "usage_type": "call"}, {"api_name": "raylab.envs.get_reward_fn", "line_number": 71, "usage_type": "call"}, {"api_name": "raylab.envs", "line_number": 71, "usage_type": "name"}, {"api_name": "raylab.envs.wrappers.RandomIrrelevant.wrap_env_function", "line_number": 72, "usage_type": "call"}, {"api_name": "raylab.envs.wrappers.RandomIrrelevant", "line_number": 72, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 69, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 86, "usage_type": "attribute"}, {"api_name": "numpy.allclose", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "214729034", "text": "#!/usr/bin/env python\n\n\"\"\"\nPlot official reco and true ehad histograms with official weights.\nAlso plot KRR reco ehad overlaid.\n\"\"\"\n\nfrom __future__ import print_function\nprint(__doc__)\n\nfrom ROOT import *\nfrom sklearn.externals import joblib\nimport argparse\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\n\n#~ # helper function for calculating statistic moments of distributions\n#~ def integrator(f,data,freq):\n #~ diffs = np.roll(data,-1)-data\n #~ return (f(data[:-1])*freq[:-1]*diffs[:-1]).sum()\n\n#~ # function for returning weighted moments\n#~ def weighted_moments(data,freq):\n #~ freq_norm = freq/integrator(lambda x:1,data,freq)\n \n #~ exp_x = integrator(lambda x:x,data,freq_norm)\n #~ exp_x2 = integrator(lambda x:x**2,data,freq_norm)\n #~ exp_x4 = integrator(lambda x:x**4,data,freq_norm)\n \n #~ mean = exp_x\n #~ rms = integrator(lambda x: ((x-exp_x)/std)**2,data,freq_norm)\n #~ kurt = integrator(lambda x: ((x-exp_x)/std)**4,data,freq_norm)\n #~ skew = integrator(lambda x: ((x-exp_x)/std)**3,data,freq_norm)\n \n #~ return mean, rms, skew, kurt\n\ndef weighted_moments(data,freq):\n totw = freq.sum()\n mean = (data*freq).sum()/totw\n std = np.sqrt((freq*(data-mean)**2).sum()/totw)\n skew = (freq*((data-mean)/std)**3).sum()/totw\n kurt = (freq*((data-mean)/std)**4).sum()/totw - 3\n \n return mean, std, skew, kurt\n\n\n# parse command line arguments\nparser = argparse.ArgumentParser(description='Train a hadronic energy KRR with specified parameters and weighted sample.')\nparser.add_argument('-n','--number_of_files',type=int,default='10')\nargs = parser.parse_args()\n\n# specified parameters\nnfiles = args.number_of_files\n\ny = np.empty(1)\ny_off = np.empty(1)\ny_krr = np.empty(1)\nwei = np.empty(1)\nfor i in range(nfiles):\n y = np.append(y, joblib.load('partial_output/5d/sample_weighted_hadronic_true_a0.01gNonestep100offset{}.pkl'.format(i)))\n y_off = np.append(y_off, joblib.load('partial_output/5d/sample_weighted_hadronic_official_a0.01gNonestep100offset{}.pkl'.format(i)))\n y_krr = np.append(y_krr, joblib.load('partial_output/5d/sample_weighted_hadronic_krr_a0.01gNonestep100offset{}.pkl'.format(i)))\n wei = np.append(wei, joblib.load('partial_output/5d/sample_weighted_hadronic_weight_a0.01gNonestep100offset{}.pkl'.format(i)))\n\nos.system('mkdir -p plots/5d')\n# plot true ehad with reco overlaid\nfigehad = plt.figure(1)\nn, bins, patches = plt.hist(y, bins=np.linspace(-1,10,550), histtype='stepfilled', weights=wei, color='yellow', label='true')\nplt.hist(y_off, bins=bins, histtype='step', color='red', linewidth=2, weights=wei, label='prod4')\nplt.hist(y_krr, bins=bins, histtype='step', color='blue', linewidth=2, weights=wei, label='shallow learning')\nplt.xlim(0,2)\nplt.legend()\nplt.title('hadronic energy spectra')\nplt.xlabel('hadronic energy (GeV)')\nfigehad.savefig('plots/5d/ehad_spectra.pdf')\n\n# plot resolution\nfigres = plt.figure(2)\nres_off = (y_off-y)/y\nres_krr = (y_krr-y)/y\nn, bins, patches = plt.hist(res_off, bins=np.linspace(-2,2,200), linewidth=2, histtype='step', weights=wei, color='red', label='prod4')\nplt.hist(res_krr, bins=bins, histtype='step', color='blue', linewidth=2, weights=wei, label='shallow learning')\nplt.xlim(-1,1)\nplt.legend()\nplt.title('hadronic energy resolution')\nplt.xlabel('(reco-true)/true')\nplt.axvline(x=0., color='green', linestyle='--')\nfigres.savefig('plots/5d/ehad_resolution.pdf')\n\n# plot side by side\nfigsbs = plt.figure(figsize=(12,5))\nplt.subplot(121)\nn, bins, patches = plt.hist(y, bins=np.linspace(-1,10,550), histtype='stepfilled', weights=wei, color='yellow', label='true')\nplt.hist(y_off, bins=bins, histtype='step', color='red', linewidth=2, weights=wei, label='prod4')\nplt.hist(y_krr, bins=bins, histtype='step', color='blue', linewidth=2, weights=wei, label='shallow learning')\nplt.xlim(0,2)\nplt.legend()\nplt.title('hadronic energy spectra')\nplt.xlabel('hadronic energy (GeV)')\nplt.subplot(122)\nplt.hist(res_off, bins=np.linspace(-2,2,200), linewidth=2, histtype='step', weights=wei, color='red', label='prod4')\nplt.hist(res_krr, bins=bins, histtype='step', color='blue', linewidth=2, weights=wei, label='shallow learning')\nplt.xlim(-1,1)\nplt.legend()\nplt.title('hadronic energy resolution')\nplt.xlabel('(reco-true)/true')\nplt.axvline(x=0., color='green', linestyle='--')\nfigsbs.savefig('plots/5d/ehad_spec_res_side_by_side.pdf')\nfigsbs.savefig('plots/5d/ehad_spec_res_side_by_side.png')\n#~ plt.show()\n\n# print out the statistic moments of resolution histograms\n#~ m1, m2, m3, m4 = weighted_moments(res_off, wei)\n#~ print(m1, m2, m3, m4)\n#~ m1, m2, m3, m4 = weighted_moments(res_krr, wei)\n#~ print(m1, m2, m3, m4)\n\n# use ROOT to double check...\nnoff, bins, patches = plt.hist(res_off, bins=np.linspace(-1,1,100), linewidth=2, histtype='step', weights=wei, color='red', label='prod4')\nnkrr, bins, patches = plt.hist(res_krr, bins=bins, histtype='step', color='blue', linewidth=2, weights=wei, label='shallow learning')\nh = TH1F('h','',len(bins),bins[0],bins[-1])\nfor i in range(1,len(noff)+1):\n h.SetBinContent(i, noff[i-1])\nprint(h.GetMean(), h.GetRMS(), h.GetSkewness(), h.GetKurtosis(), h.Integral())\n#~ h.Draw()\nfor i in range(1,len(nkrr)+1):\n h.SetBinContent(i, nkrr[i-1])\nprint(h.GetMean(), h.GetRMS(), h.GetSkewness(), h.GetKurtosis(), h.Integral())\n#~ raw_input('press enter')\n", "sub_path": "energy_estimation/nd/fhc/krr/xsec_flux_weights/plot_reco_true_ehad_from_partial_5d.py", "file_name": "plot_reco_true_ehad_from_partial_5d.py", "file_ext": "py", "file_size_in_byte": 5286, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "numpy.sqrt", "line_number": 42, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 62, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.load", "line_number": 62, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 62, "usage_type": "name"}, {"api_name": "numpy.append", "line_number": 63, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.load", "line_number": 63, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 63, "usage_type": "name"}, {"api_name": "numpy.append", "line_number": 64, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.load", "line_number": 64, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 64, "usage_type": "name"}, {"api_name": "numpy.append", "line_number": 65, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.load", "line_number": 65, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 65, "usage_type": "name"}, {"api_name": "os.system", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}]} +{"seq_id": "633894624", "text": "\"\"\"Card and Hand objects to represent poker card and 5-card poker hands respectively\"\"\"\n\nfrom collections import Counter as counter\n\ncards = {\"2\" : 2, \"3\" : 3, \"4\" : 4, \"5\" : 5, \"6\" : 6, \"7\" : 7,\n \"8\" : 8, \"9\" : 9, \"0\" : 10, \"J\" : 11, \"Q\" : 12, \"K\" : 13, \"A\" : 14,}\n\nsuits = {\"D\" : \"Diamonds\", \"C\" : \"Clubs\", \"H\" : \"Hearts\", \"S\" : \"Spades\"}\n\nclass card():\n \"\"\"Card object with the card number/value and card suit\"\"\"\n\n def __init__(self, card):\n if type(card) is not str:\n raise TypeError(\"Parameter must be of type string\")\n if card[0] not in cards or card[1] not in suits or len(card) != 2:\n raise ValueError\n self.card = cards[card[0]]\n self.suit = suits[card[1]]\n\n def __str__(self):\n return str(self.card) + \" of \" + self.suit\n\nclass hand():\n \"\"\"Hand object with a list of five card objects\"\"\"\n \n def __init__(self, cards):\n if type(cards) is not list:\n raise TypeError(\"Parameter must be a list of 5 card objects\")\n if not all(isinstance(x, card) for x in cards) or len(cards) != 5:\n raise ValueError\n self.cards = cards\n self.ranking()\n\n def check_straight(self):\n \"\"\"Store the cards in the hand in sorted order by value\"\"\"\n return sorted([c.card for c in self.cards])\n\n def card_numbers(self):\n \"\"\"Store the cards in the hand without sorting by value\"\"\"\n return [c.card for c in self.cards]\n\n def card_suits(self):\n \"\"\"Store the cards in the hand without sorting by suits\"\"\"\n return [c.suit for c in self.cards]\n\n def ranking(self):\n \"\"\"Determine the poker hand ranking and sets the hand's ranking from 1 to 10\"\"\"\n\n sorted_card_numbers = self.check_straight() #Sorted card numbers\n card_numbers = self.card_numbers() #Card numbers\n card_suits = self.card_suits() #Card suits\n\n if len(counter(card_suits)) == 1:\n if (sorted_card_numbers == [10, 11, 12, 13, 14]):\n self.rank = 1 #Royal Flush\n self.high_card = 14\n self.kicker = [14]\n elif set(sorted_card_numbers) == set(range(sorted_card_numbers[0], sorted_card_numbers[0] + 5)):\n self.rank = 2 #Straight Flush\n self.high_card = sorted_card_numbers[4]\n self.kicker = [sorted_card_numbers[4]]\n else:\n self.rank = 5 #Flush\n self.high_card = sorted_card_numbers[4]\n self.kicker = sorted_card_numbers[:4][::-1]\n elif len(counter(card_numbers).keys()) == 2:\n if counter(card_numbers).most_common(1)[0][1] == 4:\n self.rank = 3 #4 of a kind\n self.high_card = counter(card_numbers).most_common(1)[0][0]\n self.kicker = [counter(card_numbers).most_common(2)[1][0]]\n else:\n self.rank = 4 #full house\n self.high_card = counter(card_numbers).most_common(1)[0][0]\n self.kicker = [counter(card_numbers).most_common(2)[1][0]]\n elif set(sorted_card_numbers) == set(range(sorted_card_numbers[0], sorted_card_numbers[0] + 5)):\n self.rank = 6 #Straight\n self.high_card = sorted_card_numbers[4]\n self.kicker = [sorted_card_numbers[4]]\n elif len(counter(card_numbers).keys()) == 3:\n if counter(card_numbers).most_common(1)[0][1] == 3:\n if 2 not in counter(card_numbers).values():\n kickers = counter(card_numbers).most_common(3)\n self.rank = 7 #3 of a kind\n self.high_card = counter(card_numbers).most_common(1)[0][0]\n self.kicker = [max(kickers[1][0], kickers[2][0])]\n self.kicker.append(min(kickers[1][0], kickers[2][0]))\n else:\n kickers = counter(card_numbers).most_common(3)\n self.rank = 8 #2 pair\n self.high_card1 = max(kickers[0][0], kickers[1][0])\n self.high_card2 = min(kickers[0][0], kickers[1][0])\n self.kicker = [counter(card_numbers).most_common(3)[2][0]]\n\n elif len(counter(card_numbers).keys()) == 4:\n self.rank = 9 #1 pair\n self.high_card = counter(card_numbers).most_common(1)[0][0]\n self.kicker = sorted([counter(card_numbers).most_common(4)[1][0],\n counter(card_numbers).most_common(4)[2][0],\n counter(card_numbers).most_common(4)[3][0]])\n else:\n self.rank = 10\n self.high_card = sorted_card_numbers[4]\n self.kicker = sorted_card_numbers[:4][::-1]\n", "sub_path": "card_objects.py", "file_name": "card_objects.py", "file_ext": "py", "file_size_in_byte": 4706, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "collections.Counter", "line_number": 54, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 67, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 68, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 70, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 71, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 74, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 75, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 80, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 81, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 82, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 83, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 85, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 89, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 93, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 95, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 97, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 98, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 99, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "556309979", "text": "from django.db.models.signals import pre_save\nfrom django.dispatch import receiver\nfrom django.contrib.auth.models import User\nfrom ctlweb.views.backend import send_user, remove_user\nfrom django.db.models.signals import post_save, post_delete\nfrom ctlweb.models import Components, Components_Cluster\n\n@receiver(post_save, sender=User)\ndef resend_user(sender, **kwargs):\n \"\"\"\n Sends the saved User to all registered Clusters.\n \"\"\"\n send_user(kwargs[u'instance'])\n\n@receiver(post_delete, sender=User)\ndef redelete_user(sender, **kwargs):\n \"\"\"\n Deletes the User from all registered Clusters.\n \"\"\"\n remove_user(kwargs[u'instance'])\n\n@receiver(post_save, sender=Components_Cluster)\n@receiver(post_delete, sender=Components_Cluster)\ndef components_renamed(sender, **kwargs):\n \"\"\"\n Create a new string for Components.name\n Also, if a component is deleted on a cluster, check if component is reachable.\n Exterminate it otherwise.\n \"\"\"\n try:\n comp = kwargs[u'instance'].component\n namelist = comp.components_cluster_set.\\\n values_list('name', flat=True).distinct().order_by('name')\n comp.names = ', '.join(namelist)\n comp.save()\n connections = comp.components_cluster_set.all()\n if list(connections) == []:\n comp.delete()\n except Components.DoesNotExist:\n pass #do nothing, as component has already been deleted\n\n@receiver(pre_save, sender=Components)\ndef components_active(sender, **kwargs):\n \"\"\"\n Set is_active \"False\"\n \"\"\"\n comp = kwargs[u'instance']\n if comp in Components.objects.all():\n comp2 = Components.objects.get(id=comp.id)\n if comp2.is_active :\n comp.is_active = False\n else :\n comp.is_active = False\n\n", "sub_path": "src/frontend/app/ctlweb/models/signal_handlers.py", "file_name": "signal_handlers.py", "file_ext": "py", "file_size_in_byte": 1776, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "ctlweb.views.backend.send_user", "line_number": 13, "usage_type": "call"}, {"api_name": "django.dispatch.receiver", "line_number": 8, "usage_type": "call"}, {"api_name": "django.db.models.signals.post_save", "line_number": 8, "usage_type": "argument"}, {"api_name": "django.contrib.auth.models.User", "line_number": 8, "usage_type": "name"}, {"api_name": "ctlweb.views.backend.remove_user", "line_number": 20, "usage_type": "call"}, {"api_name": "django.dispatch.receiver", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.models.signals.post_delete", "line_number": 15, "usage_type": "argument"}, {"api_name": "django.contrib.auth.models.User", "line_number": 15, "usage_type": "name"}, {"api_name": "ctlweb.models.Components.DoesNotExist", "line_number": 39, "usage_type": "attribute"}, {"api_name": "ctlweb.models.Components", "line_number": 39, "usage_type": "name"}, {"api_name": "django.dispatch.receiver", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models.signals.post_save", "line_number": 22, "usage_type": "argument"}, {"api_name": "ctlweb.models.Components_Cluster", "line_number": 22, "usage_type": "name"}, {"api_name": "django.dispatch.receiver", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models.signals.post_delete", "line_number": 23, "usage_type": "argument"}, {"api_name": "ctlweb.models.Components_Cluster", "line_number": 23, "usage_type": "name"}, {"api_name": "ctlweb.models.Components.objects.all", "line_number": 48, "usage_type": "call"}, {"api_name": "ctlweb.models.Components.objects", "line_number": 48, "usage_type": "attribute"}, {"api_name": "ctlweb.models.Components", "line_number": 48, "usage_type": "name"}, {"api_name": "ctlweb.models.Components.objects.get", "line_number": 49, "usage_type": "call"}, {"api_name": "ctlweb.models.Components.objects", "line_number": 49, "usage_type": "attribute"}, {"api_name": "ctlweb.models.Components", "line_number": 49, "usage_type": "name"}, {"api_name": "django.dispatch.receiver", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.models.signals.pre_save", "line_number": 42, "usage_type": "argument"}, {"api_name": "ctlweb.models.Components", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "77043219", "text": "# Helpers for use in solutions\n\nimport functools\nimport itertools\nimport math\nimport time\n\ndef iterate(fn, val):\n while True:\n yield val\n val = fn(val)\n\ndef nth(seq, n):\n for i, val in enumerate(seq):\n if i == n:\n return val\n\ndef fix_point(seq):\n last = None\n for val in seq:\n if val == last:\n return last\n last = val\n\nclass Timer:\n def __enter__(self):\n self.start = time.time()\n return self\n\n def __exit__(self, *args):\n self.duration = time.time() - self.start\n\ndef product(nums):\n return functools.reduce((lambda a, b: a * b), nums)\n\ndef lcm2(a, b):\n return int(a * b / math.gcd(a, b))\n\ndef lcm(nums):\n return functools.reduce(lcm2, nums)\n\ndef modular_inverse(a, m):\n m0 = m\n y = 0\n x = 1\n\n if m == 1:\n return 0\n\n while a > 1:\n # q is quotient\n if m == 0:\n return None\n q = a // m\n\n t = m\n\n # m is remainder now, process\n # same as Euclid's algo\n m = a % m\n a = t\n t = y\n\n # Update x and y\n y = x - q * y\n x = t\n\n # Make x positive\n if x < 0:\n x += m0\n\n return x\n\ndef range2d(endx, endy):\n yield from itertools.product(range(endx), range(endy))\n", "sub_path": "helpers.py", "file_name": "helpers.py", "file_ext": "py", "file_size_in_byte": 1290, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "time.time", "line_number": 27, "usage_type": "call"}, {"api_name": "time.time", "line_number": 31, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 34, "usage_type": "call"}, {"api_name": "math.gcd", "line_number": 37, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 40, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "424286777", "text": "# -*- coding:utf-8 -*-\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nmpl.rcParams[\"font.sans-serif\"] = [\"SimHei\"]\nmpl.rcParams[\"axes.unicode_minus\"] = False\n\ntime = np.arange(1,11,0.5)\nmachinePower = np.power(time,2)+0.7\n\nplt.plot(time, machinePower,\n linestyle=\"-\",\n linewidth=2,\n color=\"r\")\n\nplt.xlim(10,1)\n\nplt.xlabel(\"使用年限\")\nplt.ylabel(\"机器功率\")\n\nplt.title(\"机器损耗曲线\")\n\nplt.grid(ls=\":\",lw=1,color=\"gray\",alpha=0.5)\n\nplt.show()\n", "sub_path": "C04/4.2.3_xlim.py", "file_name": "4.2.3_xlim.py", "file_ext": "py", "file_size_in_byte": 508, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "matplotlib.rcParams", "line_number": 7, "usage_type": "attribute"}, {"api_name": "matplotlib.rcParams", "line_number": 8, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "165294781", "text": "from __future__ import print_function\n\nimport argparse\nimport os\nimport sys\nimport time\nfrom PIL import Image\nimport tensorflow as tf\nimport numpy as np\nfrom scipy import misc\nimport shutil\n\nfrom model import ICNet_BN\nfrom tools import decode_labels\n\nIMG_MEAN = np.array((61.90575142, 69.36021519, 84.3502593), dtype=np.float32)\nnum_classes = 13\n\nmodel_train30k = './model/icnet_cityscapes_train_30k.npy'\nmodel_trainval90k = './model/icnet_cityscapes_trainval_90k.npy'\nmodel_train30k_bn = './model/icnet_cityscapes_train_30k_bnnomerge.npy'\nmodel_trainval90k_bn = './model/icnet_cityscapes_trainval_90k_bnnomerge.npy'\n\nIMG_PATH = '/media/luo/Dataset/CARLA/[divide_train][ICNET_BN] [RNNTEst]/[episode19][feature_map]/Data/RGB'\nsnapshot_dir = './model/CRALA_episode19'\nSAVE_DIR = './temp/test'\n\n\ndef get_arguments():\n parser = argparse.ArgumentParser(description=\"Reproduced PSPNet\")\n parser.add_argument(\"--img-path\", type=str, default=IMG_PATH,\n help=\"Path to the RGB image file.\",\n required=False)\n parser.add_argument(\"--model\", type=str, default='others',\n help=\"Model to use.\",\n choices=['train', 'trainval', 'train_bn', 'trainval_bn', 'others'],\n required=False)\n parser.add_argument(\"--save-dir\", type=str, default=SAVE_DIR,\n help=\"Path to save output.\")\n parser.add_argument(\"--flipped-eval\", action=\"store_true\",\n help=\"whether to evaluate with flipped img.\")\n\n return parser.parse_args()\n\n\ndef calculate_time(sess, net, feed_dict):\n start = time.time()\n sess.run(net.layers['data'], feed_dict=feed_dict)\n data_time = time.time() - start\n\n start = time.time()\n sess.run(net.layers['conv6_cls'], feed_dict=feed_dict)\n total_time = time.time() - start\n\n inference_time = total_time - data_time\n\n print('inference time: {}'.format(inference_time))\n\n\ndef save(saver, sess, logdir, step):\n model_name = 'model.ckpt'\n checkpoint_path = os.path.join(logdir, model_name)\n\n if not os.path.exists(logdir):\n os.makedirs(logdir)\n saver.save(sess, checkpoint_path, global_step=step)\n print('The checkpoint has been created.')\n\n\ndef load(saver, sess, ckpt_path):\n saver.restore(sess, ckpt_path)\n print(\"Restored model parameters from {}\".format(ckpt_path))\n\n\ndef load_img(img_path):\n if os.path.isfile(img_path):\n print('successful load img: {0}'.format(img_path))\n else:\n print('not found file: {0}'.format(img_path))\n sys.exit(0)\n\n filename = img_path.split('/')[-1]\n img = misc.imread(img_path, mode='RGB')\n print('input image shape: ', img.shape)\n\n return img, filename\n\n\ndef preprocess(img):\n # Convert RGB to BGR\n # img_r, img_g, img_b = tf.split(axis=2, num_or_size_splits=3, value=img)\n # img = tf.cast(tf.concat(axis=2, values=[img_b, img_g, img_r]), dtype=tf.float32)\n # Extract mean.\n img -= IMG_MEAN\n\n img = tf.expand_dims(img, dim=0)\n\n return img\n\n\ndef check_input(img):\n ori_h, ori_w = img.get_shape().as_list()[1:3]\n\n if ori_h % 16 != 0 or ori_w % 16 != 0:\n new_h = (int(ori_h / 16) + 1) * 16\n new_w = (int(ori_w / 16) + 1) * 16\n shape = [new_h, new_w]\n\n img = tf.image.pad_to_bounding_box(img, 0, 0, new_h, new_w)\n\n print('Image shape cannot divided by 16, padding to ({0}, {1})'.format(new_h, new_w))\n else:\n shape = [ori_h, ori_w]\n\n return img, shape\n\n\ndef Get_bilinearInterp_value_of_PointWithIndex(items,xy):\n \"\"\"\n\n :param items: 3 dim array\n :param xy: xy_index\n :return: points_value after bilinear interp\n \"\"\"\n xy_floor = np.floor(xy)\n x_floor = xy_floor[:, 0]\n y_floor = xy_floor[:, 1]\n xy_ceil = np.ceil(xy)\n x_ceil = xy_ceil[:, 0]\n y_ceil = xy_ceil[:, 1]\n # Get valid RGB index\n\n RGBindex_A = (np.int32(y_ceil), np.int32(x_floor))\n RGBindex_B = (np.int32(y_ceil), np.int32(x_ceil))\n RGBindex_C = (np.int32(y_floor), np.int32(x_ceil))\n RGBindex_D = (np.int32(y_floor), np.int32(x_floor))\n x0 = (xy[:, 0] - x_floor)\n x1 = (x_ceil - xy[:, 0])\n y0 = (xy[:, 1] - y_floor)\n y1 = (y_ceil - xy[:, 1])\n x0 = np.expand_dims(x0, axis=1)\n x1 = np.expand_dims(x1, axis=1)\n y0 = np.expand_dims(y0, axis=1)\n y1 = np.expand_dims(y1, axis=1)\n valueArray = items[RGBindex_A] * x1 * y0 + items[RGBindex_B] * x0 * y0 + items[\n RGBindex_C] * x0 * y1 + items[RGBindex_D] * x1 * y1\n return valueArray\n\ndef Get_around_value_of_PointWithIndex(items,xy):\n Index=(np.int32(xy[:,1]),np.int32(xy[:,0]))\n return items[Index]\n\n\ndef main():\n args = get_arguments()\n shape = [1024, 1024, 3]\n x = tf.placeholder(dtype=tf.float32, shape=shape)\n # imgshape=tf.placeholder(dtype=tf.int32,shape=[3])\n # x = tf.placeholder(dtype=tf.float32, shape=imgshape)\n img_tf = preprocess(x)\n img_tf, n_shape = check_input(img_tf)\n\n # Create network.\n if 1:\n with tf.variable_scope('ICNET'):\n net = ICNet_BN({'data': img_tf}, is_training=False, num_classes=num_classes)\n # elif args.model == 'others':\n # net = ICNet_BN({'data': img_tf}, num_classes=num_classes)\n # else:\n # net = ICNet({'data': img_tf}, num_classes=num_classes)\n\n raw_output = tf.nn.softmax(net.layers['conv6_cls'])\n feature_output = net.layers['sub12_sum_interp']\n # Predictions.\n out_feature = tf.image.resize_bilinear(feature_output, size=n_shape, align_corners=True)\n raw_output_up = tf.image.resize_bilinear(raw_output, size=n_shape, align_corners=True)\n raw_output_P = tf.image.crop_to_bounding_box(raw_output_up, 0, 0, shape[0], shape[1])\n print(raw_output_P)\n raw_output_ID = tf.argmax(raw_output_P, dimension=3)\n pred = decode_labels(raw_output_ID, shape, num_classes)\n\n # Init tf Session\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n\n sess.run(init)\n\n var_list = tf.trainable_variables()\n g_list = tf.global_variables()\n bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]\n bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]\n var_list += bn_moving_vars\n\n if args.model == 'train':\n print('Restore from train30k model...')\n net.load(model_train30k, sess)\n elif args.model == 'trainval':\n print('Restore from trainval90k model...')\n net.load(model_trainval90k, sess)\n elif args.model == 'train_bn':\n print('Restore from train30k bnnomerge model...')\n net.load(model_train30k_bn, sess)\n elif args.model == 'trainval_bn':\n print('Restore from trainval90k bnnomerge model...')\n net.load(model_trainval90k_bn, sess)\n else:\n ckpt = tf.train.get_checkpoint_state(snapshot_dir)\n if ckpt and ckpt.model_checkpoint_path:\n loader = tf.train.Saver(var_list=var_list)\n load_step = int(os.path.basename(ckpt.model_checkpoint_path).split('-')[1])\n load(loader, sess, ckpt.model_checkpoint_path)\n\n camera_dir_list = os.listdir(args.img_path)\n frame_dir = os.path.join(args.img_path, camera_dir_list[0])\n frame_list=os.listdir(frame_dir)\n\n for frame in frame_list:\n Points_feature=[]\n\n for camera_dir in camera_dir_list:\n img_dir=os.path.join(args.img_path,camera_dir)\n img_path=os.path.join(img_dir,frame)\n img, filename = load_img(img_path)\n\n\n pointsIndex_path=img_path.replace('RGB','img_index').replace('png','npy')\n pointsIndex=np.load(pointsIndex_path)\n xyz=pointsIndex[:,:3]\n xy_index=pointsIndex[:,3:]\n\n pred_ID, features = sess.run([raw_output_ID[0], out_feature[0]], feed_dict={x: img})\n pred_ID = np.repeat(np.expand_dims(pred_ID, axis=2), 3, axis=2)\n\n points_feature=Get_bilinearInterp_value_of_PointWithIndex(features,xy_index)\n points_feature=np.concatenate([xyz,points_feature],axis=1)\n Points_feature.append(points_feature)\n\n\n Infer_dir = img_dir.replace('RGB', 'Infer_sem')\n if not os.path.exists(Infer_dir):\n os.makedirs(Infer_dir)\n feature_dir = args.img_path.replace('RGB', 'points_feature')\n if not os.path.exists(feature_dir):\n os.makedirs(feature_dir)\n\n misc.imsave(img_path.replace('RGB', 'Infer_sem'), np.uint8(pred_ID))\n\n Points_feature=np.concatenate(Points_feature,axis=0)\n np.save(os.path.join(feature_dir,frame.replace('.png','')), np.float32(Points_feature))\n pass\n\nif __name__ == '__main__':\n main()\n", "sub_path": "merge_tools/Get_pointFeature.py", "file_name": "Get_pointFeature.py", "file_ext": "py", "file_size_in_byte": 8743, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "numpy.array", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 16, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 30, "usage_type": "call"}, {"api_name": "time.time", "line_number": 47, "usage_type": "call"}, {"api_name": "time.time", "line_number": 49, "usage_type": "call"}, {"api_name": "time.time", "line_number": 51, "usage_type": "call"}, {"api_name": "time.time", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 80, "usage_type": "call"}, {"api_name": "scipy.misc.imread", "line_number": 83, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 83, "usage_type": "name"}, {"api_name": "tensorflow.expand_dims", "line_number": 96, "usage_type": "call"}, {"api_name": "tensorflow.image.pad_to_bounding_box", "line_number": 109, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 109, "usage_type": "attribute"}, {"api_name": "numpy.floor", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 150, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 157, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 157, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 165, "usage_type": "call"}, {"api_name": "model.ICNet_BN", "line_number": 166, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax", "line_number": 172, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 172, "usage_type": "attribute"}, {"api_name": "tensorflow.image.resize_bilinear", "line_number": 175, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 175, "usage_type": "attribute"}, {"api_name": "tensorflow.image.resize_bilinear", "line_number": 176, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 176, "usage_type": "attribute"}, {"api_name": "tensorflow.image.crop_to_bounding_box", "line_number": 177, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 177, "usage_type": "attribute"}, {"api_name": "tensorflow.argmax", "line_number": 179, "usage_type": "call"}, {"api_name": "tools.decode_labels", "line_number": 180, "usage_type": "call"}, {"api_name": "tensorflow.ConfigProto", "line_number": 183, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 185, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 186, "usage_type": "call"}, {"api_name": "tensorflow.trainable_variables", "line_number": 190, "usage_type": "call"}, {"api_name": "tensorflow.global_variables", "line_number": 191, "usage_type": "call"}, {"api_name": "tensorflow.train.get_checkpoint_state", "line_number": 209, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 209, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Saver", "line_number": 211, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 211, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 212, "usage_type": "call"}, {"api_name": "os.path", "line_number": 212, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 215, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 216, "usage_type": "call"}, {"api_name": "os.path", "line_number": 216, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 217, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 223, "usage_type": "call"}, {"api_name": "os.path", "line_number": 223, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 224, "usage_type": "call"}, {"api_name": "os.path", "line_number": 224, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 237, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 242, "usage_type": "call"}, {"api_name": "os.path", "line_number": 242, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 243, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 245, "usage_type": "call"}, {"api_name": "os.path", "line_number": 245, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 246, "usage_type": "call"}, {"api_name": "scipy.misc.imsave", "line_number": 248, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 248, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 251, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 251, "usage_type": "call"}, {"api_name": "os.path", "line_number": 251, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 251, "usage_type": "call"}]} +{"seq_id": "33842909", "text": "import os\nimport json\n\n\nclass Configuration(object):\n \"\"\"\n Class to edit configuration file.\n \"\"\"\n GMAPS_KEY = \"gmaps\"\n DATABASES = \"databases\"\n TOKEN = \"token\"\n\n _CONFIG_FILENAME = \"config.json\"\n _CONFIG_FILE_PATH = \"/etc/quick/%s\" % _CONFIG_FILENAME\n _CONFIG_DIR = \"/etc/quick/\"\n _MONGO_OBJECT_NAME = \"mongodb\"\n _MONGO_DATABASES = \"databases\"\n _GMAPS = \"gmaps\"\n _TOKEN = \"token\"\n _FILE_TEMPLATE = {\"databases\": []}\n\n @staticmethod\n def checkRoot():\n \"\"\"\n Ensures that the user currently executing is the root user.\n @throws If not the root an exception is thrown\n \"\"\"\n if os.getuid() != 0:\n raise Exception(\"Must be executed with root privileges.\")\n\n\n def makeConfigFile(self):\n \"\"\"\n Creates the configuration file where settings and configurations\n are stored.\n \"\"\"\n Configuration.checkRoot()\n if not os.path.exists(self._CONFIG_DIR):\n os.mkdir(self._CONFIG_DIR)\n\n # Let user know that this configration file already exists\n # and that proceeding will overwrite the current configuration\n if os.path.exists(self._CONFIG_FILE_PATH):\n print(\"%s already exists. Overwrite? y/n\" % self._CONFIG_FILE_PATH)\n response = raw_input()\n if response != \"y\":\n return\n print(\"Overwriting... %s\" % self._CONFIG_FILE_PATH)\n with open(self._CONFIG_FILE_PATH, \"w+\") as fp:\n json.dump(self._FILE_TEMPLATE, fp, indent=2)\n print(\"Configuration file successfully created.\")\n\n\n def addMongoDatabase(self, uri, port, database, username=None, password=None):\n \"\"\"\n Adds basic details for a Mongo database to the configuration file.\n\n @param uri:(str) The URI to find the database.\n\n @param port:(int) The port the MongoDB instance is running on.\n\n @param database:(str) The name of the database to connect to.\n\n @param username:(str) The username.\n\n @param password:(str) The password.\n \"\"\"\n fileContents = self.__readConfigFile()\n newDatabase = {\"uri\": uri, \"port\": port, \"database\": database}\n\n if username != None and password != None:\n newDatabase[\"username\"] = username\n newDatabase[\"password\"] = password\n\n fileContents[self._MONGO_DATABASES].append(newDatabase)\n self.__writeConfigFile(fileContents)\n\n def deleteMongoDatabase(self, uri, port, database):\n \"\"\"\n Removes details for a database, from the databases list in the\n configuration file.\n\n @param uri:(str) The database uri.\n\n @param port:(int) The port.\n\n @param database:(str) The name of the database.\n \"\"\"\n fileContents = self.__readConfigFile()\n databases = fileContents[self._MONGO_DATABASES]\n dbToCheck = {\"uri\": uri, \"port\": port, \"database\": database}\n for (i, value) in enumerate(databases):\n if (self.__dbCompare(value, dbToCheck)):\n del databases[i]\n print(databases)\n\n fileContents[self._MONGO_DATABASES] = databases\n self.__writeConfigFile(fileContents)\n\n def __dbCompare(self, db1, db2):\n \"\"\"\n Compares the details of two databases, to check if they are the same.\n \"\"\"\n if db1[\"uri\"] == db2[\"uri\"] and \\\n db1[\"port\"] == db2[\"port\"] and \\\n db1[\"database\"] == db2[\"database\"]:\n return True\n else:\n return False\n\n\n def addGoogleMapsKey(self, key):\n \"\"\"\n Adds a Google Maps API key to the configuration file.\n\n @param key:(str) The Google Maps API key to add to the configuration file.\n \"\"\"\n fileContents = self.__readConfigFile()\n fileContents[self._GMAPS] = key\n self.__writeConfigFile(fileContents)\n\n def deleteGoogleMapsKey(self):\n \"\"\"\n Deletes the Google Maps API key in the configuration file\n \"\"\"\n fileContents = self.__readConfigFile()\n del fileContents[self._GMAPS]\n self.__writeConfigFile(fileContents)\n\n\n def addTokenSecretKey(self, secret):\n \"\"\"\n Adds the token secret to the configuration file.\n\n @param secret:(str) The token secret key.\n \"\"\"\n fileContents = self.__readConfigFile()\n newTokenSecret = {\"secret\": secret}\n fileContents[self._TOKEN] = newTokenSecret\n self.__writeConfigFile(fileContents)\n\n\n def deleteTokenSecretKey(self):\n \"\"\"\n Deletes the token secret from the configuration file.\n \"\"\"\n fileContents = self.__readConfigFile()\n del fileContents[self._TOKEN]\n self.__writeConfigFile(fileContents)\n\n def read(self, prop):\n \"\"\"\n Reads a property or properties from the config file.\n If the property exists it will be returned, otherwise if\n the property doesn't exist then the method will silently fail\n and None will be returned.\n\n @param prop:(list) The property or properties to retrieve.\n @return list of the properties.\n \"\"\"\n fileContents = self.__readConfigFile()\n if isinstance(prop, list):\n data = []\n for p in prop:\n if p in fileContents:\n data.append(fileContents[p])\n return data\n raise ValueError(\"Expected type 'list' got %s\" % type(prop))\n\n def __readConfigFile(self):\n \"\"\"\n Read from the configuration file defined by __CONFIG_FILE_PATH.\n \"\"\"\n with open(self._CONFIG_FILE_PATH) as fp:\n return json.load(fp)\n\n\n def __writeConfigFile(self, contents):\n \"\"\"\n Write to the configuration file defined by __CONFIG_FILE_PATH.\n Note: must have root priveleges.\n \"\"\"\n Configuration.checkRoot()\n with open(self._CONFIG_FILE_PATH, \"w+\") as fp:\n contents = json.dumps(contents, indent=2, sort_keys=True)\n fp.write(contents)\n", "sub_path": "QuickProactive/proactive/config/configuration.py", "file_name": "configuration.py", "file_ext": "py", "file_size_in_byte": 5517, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "os.getuid", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 50, "usage_type": "call"}, {"api_name": "json.load", "line_number": 175, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 185, "usage_type": "call"}]} +{"seq_id": "148880869", "text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nimport logging\n\nfrom telegram.ext import Dispatcher, CommandHandler\n\nfrom utils.callback import callback_delete_message\nfrom utils.config_loader import config\nfrom utils.restricted import restricted\n\nlogger = logging.getLogger(__name__)\n\n\ndef init(dispatcher: Dispatcher):\n \"\"\"𝗚𝗲𝗿𝗲𝗸𝗹𝗶𝗹𝗶𝗸𝗹𝗲𝗿𝗶 𝗦𝗮𝗴𝗹𝗮.\"\"\"\n dispatcher.add_handler(CommandHandler('start', start))\n\n\n@restricted\ndef start(update, context):\n rsp = update.message.reply_text('🔺 𝗢𝗻𝗰𝗲 𝗦𝗔 𝗱𝗼𝘀𝘆𝗮𝗹𝗮𝗿ı𝗻ı 𝗶ç𝗲𝗿𝗲𝗻 𝗯𝗶𝗿 𝗭𝗜𝗣 𝗮𝗿ş𝗶𝘃𝗶 𝗴ö𝗻𝗱𝗲𝗿𝗶𝗻 𝘃𝗲 𝗸𝗼��𝘂𝘆𝗮 /sa 𝗲𝗸𝗹𝗲𝘆𝗶𝗻.\\n'\n '📂 𝗕𝘂𝗻𝗱𝗮𝗻 𝘀𝗼𝗻𝗿𝗮, 𝗵𝗲𝗱𝗲𝗳 𝗸𝗹𝗮𝘀ö𝗿𝗹𝗲𝗿𝗶 𝗮𝘆𝗮𝗿𝗹𝗮𝗺𝗮𝗸 𝗶ç𝗶𝗻 /folders 𝗸𝘂𝗹𝗹𝗮𝗻ı𝗻.\\n'\n '🔗 𝗚𝗼𝗼𝗴𝗹𝗲 𝗗𝗿𝗶𝘃𝗲 𝗯𝗮ğ𝗹𝗮𝗻𝘁ı𝘀ı 𝗴ö𝗻𝗱𝗲𝗿𝗺𝗲𝗻𝗶𝘇 𝘆𝗲𝘁𝗲𝗿𝗹𝗶.')\n rsp.done.wait(timeout=60)\n message_id = rsp.result().message_id\n if update.message.chat_id < 0:\n context.job_queue.run_once(callback_delete_message, config.TIMER_TO_DELETE_MESSAGE,\n context=(update.message.chat_id, message_id))\n context.job_queue.run_once(callback_delete_message, config.TIMER_TO_DELETE_MESSAGE,\n context=(update.message.chat_id, update.message.message_id))\n", "sub_path": "telegram_gcloner/handlers/start.py", "file_name": "start.py", "file_ext": "py", "file_size_in_byte": 1699, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "telegram.ext.Dispatcher", "line_number": 14, "usage_type": "name"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 16, "usage_type": "call"}, {"api_name": "utils.callback.callback_delete_message", "line_number": 27, "usage_type": "argument"}, {"api_name": "utils.config_loader.config.TIMER_TO_DELETE_MESSAGE", "line_number": 27, "usage_type": "attribute"}, {"api_name": "utils.config_loader.config", "line_number": 27, "usage_type": "name"}, {"api_name": "utils.callback.callback_delete_message", "line_number": 29, "usage_type": "argument"}, {"api_name": "utils.config_loader.config.TIMER_TO_DELETE_MESSAGE", "line_number": 29, "usage_type": "attribute"}, {"api_name": "utils.config_loader.config", "line_number": 29, "usage_type": "name"}, {"api_name": "utils.restricted.restricted", "line_number": 19, "usage_type": "name"}]} +{"seq_id": "18572320", "text": "import cv2\nimport numpy as np\n\ndef goToVideo():\n while True:\n print('What kind of video processing would you like to do?')\n print('1. Capture live stream video with camera')\n print('2. Play a video from file')\n print('3. Optical flow')\n print('4. Dense optical flow')\n print('5. Background subtraction')\n print('6. Exit')\n x = input()\n if x == '1':\n cap = cv2.VideoCapture(0)\n print('Press ESC key to stop recording')\n while(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n # Carry out operations on the frame, make the frames turn gray\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # Display the resulting frame\n cv2.imshow('frame', gray)\n if cv2.waitKey(1) & 0xFF == ord('\\x1B'):\n break\n # When everything done, release the capture\n cap.release()\n cv2.destroyAllWindows()\n elif x == '2':\n print('Enter the video path')\n vidPath = input()\n cap = cv2.VideoCapture(vidPath)\n print('Press ESC key to stop the video')\n while(cap.isOpened()):\n ret, frame = cap.read()\n cv2.imshow('frame', frame)\n # Slow the video down by limiting the display frame rate\n if cv2.waitKey(30) & 0xFF == ord('\\x1B'):\n break\n cap.release()\n cv2.destroyAllWindows()\n elif x == '3':\n # Optical flow is the pattern of apparent motion of image objects between two consecutive frames caused \n # by the movemement of object or camera\n # It is 2D vector field where each vector is a displacement vector \n # showing the movement of points from first frame to second\n print('Enter the video path')\n vidPath = input()\n cap = cv2.VideoCapture(vidPath)\n # Parameters for Shi-Tomasi corner detection\n feature_params = dict(maxCorners = 100,\n qualityLevel = 0.01,\n minDistance = 10,\n blockSize = 7)\n # Parameters for lucas kanade optical flow\n lk_params = dict(winSize = (15, 15),\n maxLevel = 2,\n criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n # Create some random colors\n color = np.random.randint(0, 255, (100, 3))\n # Take first frame and find corners in it using Shi-Tomasi corner detection\n ret, old_frame = cap.read()\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)\n # Create a mask image for drawing purposes\n mask = np.zeros_like(old_frame)\n print('Press ESC key to stop the video')\n # Repeatedly compare corner points between old frame and new frame for the entire video\n # to get the vectors which make up optical flow\n while(1):\n ret,frame = cap.read()\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # Calculate optical flow\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)\n # Select good points\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n # Draw the tracks\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a,b = new.ravel()\n c,d = old.ravel()\n mask = cv2.line(mask, (a,b), (c,d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a,b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n # Show the frame\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 0xFF == ord('\\x1B'):\n break\n # Now update the previous frame and previous points, the new frame/points will become old frame/points \n # and the next loop, the captured frame/points will be the new frame/points\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n cv2.destroyAllWindows()\n cap.release()\n elif x == '4':\n # Dense optical flow basically computes the optical flow for all the points in the frame instead of just corner points.\n print('Enter the video path')\n vidPath = input()\n cap = cv2.VideoCapture(vidPath)\n\n # First frame\n ret, frame = cap.read()\n old_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame)\n hsv[...,1] = 255\n\n print('Press ESC key to stop the video')\n # Repeatedly compare between old frame and new frame for the entire video\n while(1):\n ret, frame = cap.read()\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n flow = cv2.calcOpticalFlowFarneback(old_gray, frame_gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)\n\n mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])\n hsv[...,0] = ang*180/np.pi/2\n hsv[...,2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n img = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)\n\n # Show the frame\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 0xFF == ord('\\x1B'):\n break\n # Now update the previous frame and previous points, the new frame/points will become old frame/points \n # and the next loop, the captured frame/points will be the new frame/points\n old_gray = frame_gray\n cap.release()\n cv2.destroyAllWindows()\n elif x == '5':\n # Background subtraction is a major preprocessing step in many vision based applications\n # Technically, we are extracting the moving foreground from static background\n print('Enter the video path')\n vidPath = input()\n cap = cv2.VideoCapture(vidPath)\n print('Press ESC key to stop the video')\n \n # Before the loop, we need to use cv2.createBackgroundSubtractorMOG2()\n # to create a background object using the function\n background = cv2.createBackgroundSubtractorMOG2()\n\n while(1):\n ret, frame = cap.read()\n\n # This kernel allows morphological opening to use the first few (120 by default) frames for background modelling.\n # It employs probabilistic foreground segmentation algorithm that identifies \n # possible foreground objects using Bayesian inference. \n # The estimates are adaptive; newer observations are more heavily weighted \n # than old observations to accommodate variable illumination. \n # Several morphological filtering operations like closing and opening are done to remove unwanted noise. \n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))\n\n # Use backgroundsubtractor.apply() method to get the foreground mask.\n mask = background.apply(frame)\n\n # Apply morphological opening to the resulting mask to remove the noises\n mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)\n\n # Show the frame\n cv2.imshow('frame', mask)\n if cv2.waitKey(30) & 0xFF == ord('\\x1B'):\n break\n cap.release()\n cv2.destroyAllWindows()\n else:\n break", "sub_path": "video.py", "file_name": "video.py", "file_ext": "py", "file_size_in_byte": 7964, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "cv2.VideoCapture", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 49, "usage_type": "call"}, {"api_name": "cv2.TERM_CRITERIA_EPS", "line_number": 58, "usage_type": "attribute"}, {"api_name": "cv2.TERM_CRITERIA_COUNT", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 60, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 63, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 63, "usage_type": "attribute"}, {"api_name": "cv2.goodFeaturesToTrack", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 72, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 72, "usage_type": "attribute"}, {"api_name": "cv2.calcOpticalFlowPyrLK", "line_number": 74, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 83, "usage_type": "call"}, {"api_name": "cv2.add", "line_number": 84, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 86, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 87, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 93, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 99, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 103, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 103, "usage_type": "attribute"}, {"api_name": "numpy.zeros_like", "line_number": 104, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 111, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 111, "usage_type": "attribute"}, {"api_name": "cv2.calcOpticalFlowFarneback", "line_number": 113, "usage_type": "call"}, {"api_name": "cv2.cartToPolar", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 116, "usage_type": "attribute"}, {"api_name": "cv2.normalize", "line_number": 117, "usage_type": "call"}, {"api_name": "cv2.NORM_MINMAX", "line_number": 117, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 118, "usage_type": "call"}, {"api_name": "cv2.COLOR_HSV2BGR", "line_number": 118, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 121, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 122, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 128, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 134, "usage_type": "call"}, {"api_name": "cv2.createBackgroundSubtractorMOG2", "line_number": 139, "usage_type": "call"}, {"api_name": "cv2.getStructuringElement", "line_number": 150, "usage_type": "call"}, {"api_name": "cv2.MORPH_ELLIPSE", "line_number": 150, "usage_type": "attribute"}, {"api_name": "cv2.morphologyEx", "line_number": 156, "usage_type": "call"}, {"api_name": "cv2.MORPH_OPEN", "line_number": 156, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 159, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 160, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 163, "usage_type": "call"}]} +{"seq_id": "504552418", "text": "import json\n# from sys import getsizeof\nfrom User import *\nfrom ParallelDataGroup import *\n\nbars = {\n \"gender\": [],\n \"status\": [],\n \"device\": [],\n \"age\": [],\n \"categories\": [],\n \"time\": 0,\n \"amount\": 0,\n \"maxStateVal\": 0\n}\n\n\ndef getUsers(data):\n users = []\n usersObjects = {}\n stateDict = {}\n for u in data:\n user = u[\"session_id\"]\n if user not in users:\n users.append(user)\n userObj = User(\n user_id=u[\"session_id\"],\n gender=u[\"gender\"],\n status=u[\"marital_status\"],\n device=u[\"device\"],\n location=u[\"location\"],\n age=u[\"age\"])\n num = stateDict.get(u[\"location\"][\"state\"], 0) + 1\n stateDict[u[\"location\"][\"state\"]] = num\n usersObjects[user] = userObj\n else:\n usersObjects[user].checkUserData(\n user_id=u[\"session_id\"],\n gender=u[\"gender\"],\n status=u[\"marital_status\"],\n device=u[\"device\"],\n location=u[\"location\"],\n age=u[\"age\"])\n\n founded = u[\"event_name\"] == \"Fund Project\"\n\n # \" \" + str(len(u[\"location\"])))\n usersObjects[user].addEvent(\n time=u[\"client_time\"],\n founded=founded,\n category=u[\"category\"],\n location=u[\"location\"],\n amount=u.get(\"amount\", 0)\n )\n if u[\"gender\"] not in bars[\"gender\"]:\n bars[\"gender\"].append(u[\"gender\"])\n if u[\"marital_status\"] not in bars[\"status\"]:\n bars[\"status\"].append(u[\"marital_status\"])\n if u[\"device\"] not in bars[\"device\"]:\n bars[\"device\"].append(u[\"device\"])\n if u[\"age\"] not in bars[\"age\"]:\n bars[\"age\"].append(u[\"age\"])\n if u[\"category\"] not in bars[\"categories\"]:\n bars[\"categories\"].append(u[\"category\"])\n bars[\"age\"].sort(reverse=True)\n bars[\"maxStateVal\"] = max(stateDict.values())\n return usersObjects.values()\n\n\ndef getParallelData(users):\n dataGroup = {}\n for u in users:\n u.prepare4json()\n key = u.age + u.device + u.gender + u.status\n if key not in dataGroup:\n dataGroup[key] = ParallelDataGroup(\n gender=u.gender,\n status=u.status,\n device=u.device,\n age=u.age\n )\n dataGroup[key].addSpecificUser(u)\n if u.time > bars[\"time\"]:\n bars[\"time\"] = u.time\n if u.amount > bars[\"amount\"]:\n bars[\"amount\"] = u.amount\n\n print (\"len(dataGroup): \" + str(len(dataGroup)))\n return dataGroup\n\n\nif __name__ == '__main__':\n json_data = open(\"../data/data.json\")\n data = json.load(json_data)[\"data\"]\n users = getUsers(data)\n dataGroup = getParallelData(users)\n with open('../data/preProcessedData.json', 'w') as outfile:\n json.dump(dataGroup.values(), outfile, cls=ParallelDataEncoder,\n separators=(',', ':'))\n with open('../data/preProcessedData_redeable.json', 'w') as outfile:\n json.dump(dataGroup.values(), outfile, cls=ParallelDataEncoder,\n separators=(',', ':'), indent=2)\n\n categories = bars[\"categories\"]\n newCategories = []\n for category in categories:\n newCategories.append(\"viewed \" + category)\n for category in categories:\n newCategories.append(\"founded \" + category)\n bars[\"categories\"] = newCategories\n\n with open('../data/barsValues.json', 'w') as outfile:\n json.dump(bars, outfile, cls=ParallelDataEncoder,\n separators=(',', ':'), indent=2)\n json_data.close()\n", "sub_path": "preProcessing/preProcessing.py", "file_name": "preProcessing.py", "file_ext": "py", "file_size_in_byte": 3690, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "json.load", "line_number": 94, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 98, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 101, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 113, "usage_type": "call"}]} +{"seq_id": "609475672", "text": "from datetime import timedelta as td\nfrom django.utils import timezone\nfrom django.utils.dateparse import parse_datetime\nfrom django.conf import settings\nfrom employee.models import Employee\n\nclass SetLastSeenMiddleware(object):\n KEY = \"last-activity\"\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n if request.user.is_authenticated():\n str_last_activity = request.session.get(self.KEY)\n if str_last_activity:\n last_activity = parse_datetime(str_last_activity)\n else:\n last_activity = None\n\n # If key is old enough, update database.\n too_old_time = timezone.now() - td(seconds=settings.LAST_ACTIVITY_INTERVAL_SECS)\n if not last_activity or last_activity < too_old_time:\n user = Employee.objects.get(pk=request.user.pk)\n user.last_activity = timezone.now()\n user.save()\n request.session[self.KEY] = str(timezone.now())\n\n response = self.get_response(request)\n\n return response\n", "sub_path": "Jiller/middleware/SetLastSeenMiddleware.py", "file_name": "SetLastSeenMiddleware.py", "file_ext": "py", "file_size_in_byte": 1123, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "django.utils.dateparse.parse_datetime", "line_number": 17, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 22, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 22, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 22, "usage_type": "call"}, {"api_name": "django.conf.settings.LAST_ACTIVITY_INTERVAL_SECS", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 22, "usage_type": "name"}, {"api_name": "employee.models.Employee.objects.get", "line_number": 24, "usage_type": "call"}, {"api_name": "employee.models.Employee.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "employee.models.Employee", "line_number": 24, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 25, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 25, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 27, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "370046969", "text": "import nibabel as nib\r\nimport numpy as np\r\nimport scipy\r\nfrom scipy import ndimage\r\nimport skimage\r\nfrom skimage import measure\r\nfrom skimage.morphology import convex_hull\r\n\r\n\r\ndef save_nifti_image(img_data, filename):\r\n \"\"\"\r\n A function for saving nifti image\r\n\r\n :param img_data: the data of nifti image\r\n :param filename: the filename of the image to write to\r\n \"\"\"\r\n img = nib.Nifti1Image(img_data, np.eye(4))\r\n nib.save(img, filename)\r\n\r\n\r\ndef IsolateBody(CT_scan):\r\n \"\"\"\r\n Isolates the body from the image\r\n :param CT_scan: ct scan of image\r\n :return: binary body segmentation image\r\n \"\"\"\r\n\r\n #thresholding\r\n Imin = -500\r\n Imax = 2000\r\n body_seg = CT_scan.get_data()\r\n body_seg[body_seg < Imin] = 0\r\n body_seg[body_seg > Imax] = 0\r\n body_seg[body_seg != 0] = 1\r\n\r\n #removing small area pixels\r\n # #connected components:\r\n labeled_components_seg, cur_connected_components = \\\r\n skimage.measure.label(body_seg, return_num=True)\r\n id_sizes = np.array(ndimage.sum(body_seg, labeled_components_seg, range(cur_connected_components + 1)))\r\n area_mask_small_areas = (id_sizes < 70)\r\n body_seg[area_mask_small_areas[labeled_components_seg]] = 0\r\n\r\n #noise filtering\r\n body_seg = scipy.ndimage.binary_opening(body_seg).astype(int)\r\n\r\n #returning the largest connected component:\r\n labeled_components_seg, cur_connected_components = \\\r\n skimage.measure.label(body_seg, return_num=True)\r\n id_sizes = np.array(ndimage.sum(body_seg, labeled_components_seg, range(cur_connected_components + 1)))\r\n biggest_component = np.amax(id_sizes)\r\n area_mask = (id_sizes < biggest_component)\r\n body_seg[area_mask[labeled_components_seg]] = 0\r\n input_filename = CT_scan.get_filename()\r\n filename_till_point = input_filename.split(\".\")[0]\r\n save_nifti_image(body_seg, filename_till_point + \"body_seg.nii.gz\")\r\n\r\n return body_seg\r\n\r\n\r\n\r\ndef IsolateBS(body_seg):\r\n \"\"\"\r\n Isolates the breathing system from the body\r\n :param body_seg: biggest connected component of body segmentation\r\n :return:\r\n \"\"\"\r\n\r\n #identifying the holes inside the body segmentation\r\n inverse_body_seg = 1 - body_seg\r\n labeled_components_inverse_seg, cur_connected_components_inverse = \\\r\n skimage.measure.label(inverse_body_seg, return_num=True)\r\n id_sizes = np.array(ndimage.sum(inverse_body_seg, labeled_components_inverse_seg,\r\n range(cur_connected_components_inverse + 1)))\r\n second_largest_component = np.partition(id_sizes, -2)[-2]\r\n area_mask = (id_sizes != second_largest_component)\r\n inverse_body_seg[area_mask[labeled_components_inverse_seg]] = 0\r\n breathing_seg = inverse_body_seg\r\n\r\n #identifying the slices:\r\n #inferior slice- BB:\r\n dim1, dim2, dim3 = breathing_seg.shape\r\n\r\n lowest_z = 0\r\n flag_z = 0\r\n widest_slice_size = 0\r\n widest_slice_z_id = 0\r\n for z in range(dim3):\r\n if np.count_nonzero(breathing_seg[:, :, z]) > 0:\r\n if not flag_z:\r\n flag_z = 1\r\n lowest_z = z\r\n # widest slice- CC:\r\n widest_slice_z_id = lowest_z\r\n widest_slice_size = np.count_nonzero(breathing_seg[:, :, lowest_z])\r\n continue\r\n pixels_in_slice = np.count_nonzero(breathing_seg[:, :, z])\r\n if pixels_in_slice > widest_slice_size:\r\n widest_slice_z_id = z\r\n widest_slice_size = pixels_in_slice\r\n else:\r\n if flag_z:\r\n break\r\n\r\n slice_BB_index = lowest_z\r\n\r\n #widest slice / upper slice:\r\n slice_CC_index = widest_slice_z_id\r\n\r\n return breathing_seg, slice_BB_index, slice_CC_index\r\n\r\n\r\n\r\n\r\ndef ThreeDBand(body_seg, breathing_seg, BB_index, CC_index):\r\n \"\"\"\r\n Creates a 3d band between the convex hull of the breathing system and\r\n the gap to the body segmentation\r\n :param body_seg:\r\n :param breathing_seg:\r\n :param BB_index:\r\n :param CC_index:\r\n :return:\r\n \"\"\"\r\n\r\n max_z = CC_index\r\n breathing_seg[:, :, max_z:] = 0 #clearing the values beyond CC slice\r\n min_z = BB_index\r\n breathing_seg_copy = breathing_seg\r\n # convex hull of breathing system segmantation:\r\n convex_hull_seg = np.zeros(breathing_seg_copy.shape)\r\n\r\n for z in range(min_z, max_z):\r\n convex_hull_seg[:, :, z] = convex_hull.convex_hull_image(breathing_seg_copy[:, :, z]).astype(int)\r\n body_seg[:, :, :min_z] = 0 #clearing the values of body_seg below BB slice\r\n body_seg[:, :, max_z:] = 0 #clearing the values of body_seg beyond CC slice\r\n\r\n confined_region = body_seg - convex_hugfll_seg\r\n confined_region[confined_region < 0] = 0\r\n\r\n return confined_region\r\n\r\n\r\n\r\ndef spine_by_threshold(CT_scan):\r\n \"\"\"\r\n Gets the bones of CT scan from thresholding the image\r\n between wanted values\r\n :param CT_scan: image of ct scan\r\n :return: segmentation of bones in image\r\n \"\"\"\r\n\r\n Imin = 500\r\n Imax = 2000\r\n spine_seg = CT_scan.get_data()\r\n spine_seg[spine_seg < Imin] = 0\r\n spine_seg[spine_seg > Imax] = 0\r\n spine_seg[spine_seg != 0] = 1\r\n\r\n return spine_seg\r\n\r\n\r\n\r\ndef SpineROI(Aorta_segmentation, CT_scan):\r\n \"\"\"\r\n This function returns the ROI of the spine.\r\n :param ROI_aorta: segmentation of Aorta\r\n :param CT_scan: image of CT scan\r\n :returns ROI_aorta: \"folding\" 3d-rectangle around the aorta in image, using\r\n region-growing algorithm\r\n \"\"\"\r\n\r\n spine_seg_partial = spine_by_threshold(CT_scan)\r\n Aorta_seg_data = Aorta_segmentation.get_data()\r\n\r\n wanted_shift = 70\r\n aorta_shift1 = np.roll(Aorta_seg_data, shift=wanted_shift, axis=1)\r\n aorta_shift2 = np.roll(Aorta_seg_data, shift=-wanted_shift, axis=1)\r\n spine_ROI_1 = scipy.ndimage.binary_dilation(aorta_shift1, iterations=8,\r\n structure=np.ones((9, 9, 9))) .astype(aorta_shift1.dtype)\r\n spine_ROI_2 = scipy.ndimage.binary_dilation(aorta_shift2, iterations=8,\r\n structure=np.ones((9, 9, 9))).astype(aorta_shift2.dtype)\r\n intersect_bones1 = np.logical_and(spine_ROI_1, spine_seg_partial).astype(int)\r\n intersect_bones2 = np.logical_and(spine_ROI_2, spine_seg_partial).astype(int)\r\n number_intersect1 = np.count_nonzero(intersect_bones1)\r\n number_intersect2 = np.count_nonzero(intersect_bones2)\r\n if number_intersect1 > number_intersect2:\r\n spine_ROI = spine_ROI_1\r\n else:\r\n spine_ROI = spine_ROI_2\r\n\r\n #\r\n # remarks for saving image\r\n # input_filename = Aorta_segmentation.get_filename()\r\n # filename_till_point = input_filename.split(\".\")[0]\r\n # save_nifti_image(spine_ROI, filename_till_point + \"_spine_ROI.nii.gz\")\r\n return spine_ROI\r\n\r\n\r\ndef MergedROI(confined_ROI, spine_ROI, CT_scan):\r\n \"\"\"\r\n Build a merged ROI that contains the spine and the chest.\r\n :param spine_ROI: ROI of spine\r\n :param confined_body_ROI: ROI between the lungs and the body\r\n :param CT_scan: image of CT scan\r\n :return: merged ROI, and saves the output\r\n \"\"\"\r\n\r\n merged_ROI_data = np.logical_or(spine_ROI, confined_ROI).astype(int)\r\n input_filename = CT_scan.get_filename()\r\n filename_till_point = input_filename.split(\".\")[0]\r\n save_nifti_image(merged_ROI_data, filename_till_point + \"_ROI.nii.gz\")\r\n\r\n return merged_ROI_data\r\n\r\n\r\n\r\n\r\n\r\ndef create_ROI_img(ctFileName, AortaFileName):\r\n \"\"\"\r\n This function runs the code on given CT image and creates the ROI of chest + spine\r\n :param ctFileName: filename of CT image, nifti file\r\n :param AortaFileName: filenaem of aorta image, nifti file\r\n \"\"\"\r\n filename_till_point = ctFileName.split(\".\")[0]\r\n cur_ct = nib.load(ctFileName)\r\n body_seg = IsolateBody(cur_ct)\r\n bs_seg, BB_slice, CC_slice = IsolateBS(body_seg)\r\n confined_ROI = ThreeDBand(body_seg, bs_seg, BB_slice, CC_slice)\r\n aorta_img = nib.load(AortaFileName)\r\n spine_ROI = SpineROI(aorta_img, cur_ct)\r\n merged_ROI_fin = MergedROI(confined_ROI, spine_ROI, cur_ct)\r\n return merged_ROI_fin\r\n\r\n\r\n\r\ndef run_and_save_reg_case(save_image=False):\r\n \"\"\"\r\n This function runs the code on regular cases given\r\n I assume nave fromats for Aorta files similar to what's given to us\r\n :param save_image: deciding if saving some images outputs\r\n \"\"\"\r\n for i in range(1, 6):\r\n filename_input = \"Case\" + str(i) +\"_CT.nii.gz\"\r\n filename_till_point = filename_input.split(\".\")[0]\r\n cur_ct = nib.load(filename_input)\r\n body_seg = IsolateBody(cur_ct)\r\n bs_seg, BB_slice, CC_slice = IsolateBS(body_seg)\r\n confined_ROI = ThreeDBand(body_seg, bs_seg, BB_slice, CC_slice)\r\n if save_image:\r\n save_nifti_image(confined_ROI, filename_till_point + \"_confined_ROI.nii.gz\")\r\n filename_till_CT = filename_input.split(\"CT\")[0]\r\n aorta_img = nib.load(filename_till_CT + \"Aorta.nii.gz\")\r\n spine_ROI = SpineROI(aorta_img, cur_ct)\r\n merged_ROI_fin = MergedROI(confined_ROI, spine_ROI, cur_ct)\r\n\r\n\r\n\r\ndef run_and_save_hard_case(save_image=False):\r\n \"\"\"\r\n This function runs the code on regular cases given\r\n I assume nave fromats for Aorta files similar to what's given to us\r\n :param save_image: deciding if saving some images outputs\r\n \"\"\"\r\n for i in range(1, 6):\r\n filename_input = \"HardCase\" + str(i) + \"_CT.nii.gz\"\r\n filename_till_point = filename_input.split(\".\")[0]\r\n cur_ct = nib.load(filename_input)\r\n body_seg = IsolateBody(cur_ct)\r\n bs_seg, BB_slice, CC_slice = IsolateBS(body_seg)\r\n confined_ROI = ThreeDBand(body_seg, bs_seg, BB_slice, CC_slice)\r\n if save_image:\r\n save_nifti_image(confined_ROI, filename_till_point + \"_confined_ROI.nii.gz\")\r\n filename_till_CT = filename_input.split(\"CT\")[0]\r\n aorta_img = nib.load(filename_till_CT + \"Aorta.nii.gz\")\r\n spine_ROI = SpineROI(aorta_img, cur_ct)\r\n merged_ROI_fin = MergedROI(confined_ROI, spine_ROI, cur_ct)\r\n\r\n\r\n#running on input images: unmark each line below\r\n# run_and_save_reg_case(True)\r\n# run_and_save_hard_case(True)\r\n", "sub_path": "spineSeg_partB.py", "file_name": "spineSeg_partB.py", "file_ext": "py", "file_size_in_byte": 10211, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "nibabel.Nifti1Image", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 17, "usage_type": "call"}, {"api_name": "nibabel.save", "line_number": 18, "usage_type": "call"}, {"api_name": "skimage.measure.label", "line_number": 39, "usage_type": "call"}, {"api_name": "skimage.measure", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 40, "usage_type": "call"}, {"api_name": "scipy.ndimage.sum", "line_number": 40, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 40, "usage_type": "name"}, {"api_name": "scipy.ndimage.binary_opening", "line_number": 45, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 45, "usage_type": "attribute"}, {"api_name": "skimage.measure.label", "line_number": 49, "usage_type": "call"}, {"api_name": "skimage.measure", "line_number": 49, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "scipy.ndimage.sum", "line_number": 50, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 50, "usage_type": "name"}, {"api_name": "numpy.amax", "line_number": 51, "usage_type": "call"}, {"api_name": "skimage.measure.label", "line_number": 72, "usage_type": "call"}, {"api_name": "skimage.measure", "line_number": 72, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 73, "usage_type": "call"}, {"api_name": "scipy.ndimage.sum", "line_number": 73, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 73, "usage_type": "name"}, {"api_name": "numpy.partition", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 131, "usage_type": "call"}, {"api_name": "skimage.morphology.convex_hull.convex_hull_image", "line_number": 134, "usage_type": "call"}, {"api_name": "skimage.morphology.convex_hull", "line_number": 134, "usage_type": "name"}, {"api_name": "numpy.roll", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 178, "usage_type": "call"}, {"api_name": "scipy.ndimage.binary_dilation", "line_number": 179, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 179, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 180, "usage_type": "call"}, {"api_name": "scipy.ndimage.binary_dilation", "line_number": 181, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 181, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.logical_or", "line_number": 209, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 227, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 231, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 247, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 254, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 269, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 276, "usage_type": "call"}]} +{"seq_id": "98488241", "text": "import keras, numpy as np\nfrom model import create_model\nfrom keras.datasets import cifar10\n\nmodel = create_model()\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\n\n# Convert class vectors to binary class matrices.\nnum_classes = 10\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\nx_train, x_test = x_train.astype(np.float32)/256, x_test.astype(np.float32)/256\n\nbatch_size = 1000\n\nmodel.fit(x_train, y_train, batch_size=batch_size, verbose=1, validation_data = (x_test, y_test))\n\n\n", "sub_path": "ZZZ/ML/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 561, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "model.create_model", "line_number": 5, "usage_type": "call"}, {"api_name": "keras.datasets.cifar10.load_data", "line_number": 6, "usage_type": "call"}, {"api_name": "keras.datasets.cifar10", "line_number": 6, "usage_type": "name"}, {"api_name": "keras.utils.to_categorical", "line_number": 11, "usage_type": "call"}, {"api_name": "keras.utils", "line_number": 11, "usage_type": "attribute"}, {"api_name": "keras.utils.to_categorical", "line_number": 12, "usage_type": "call"}, {"api_name": "keras.utils", "line_number": 12, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 14, "usage_type": "attribute"}, {"api_name": "model.fit", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "78400413", "text": "import os\r\nfrom zipfile import ZipFile as zf\r\nfrom tkinter import *\r\nfrom tkinter import filedialog\r\nfrom tkinter import messagebox\r\nimport ctypes\r\n\r\nclass Rozbal:\r\n\tdef __init__(self):\r\n\t\tself.pracovnyPriecinok = os.getcwd()\r\n\t\tself.subory = dict()\r\n\r\n\tdef nastavPriecinok(self, priecinok):\r\n\t\tos.chdir(priecinok)\r\n\t\tself.pracovnyPriecinok = os.getcwd()\r\n\r\n\tdef ziskajSubory(self):\r\n\t\tself.subory = dict()\r\n\t\tbuff = os.listdir(self.pracovnyPriecinok)\r\n\t\tfor i in range(len(buff)):\r\n\t\t\tr = buff[i].replace('.zip', '')\r\n\t\t\tif (r == buff[i]):\r\n\t\t\t\tif ('zip' in buff):\r\n\t\t\t\t\tself.subory.setdefault(buff[i], True)\r\n\t\t\telse:\r\n\t\t\t\tif ('.zip' in buff[i]):\r\n\t\t\t\t\tself.subory.setdefault(buff[i], False)\r\n\r\n\tdef rozbal(self):\r\n\t\tfor nazov, stav in self.subory.items():\r\n\t\t\tif (stav == False):\r\n\t\t\t\twith zf(self.pracovnyPriecinok +'\\\\' + nazov, 'r') as f:\r\n\t\t\t\t\tf.extractall(self.pracovnyPriecinok +'\\\\' + nazov.replace('.zip', ''))\r\n\t\t\t\t\tself.subory[nazov] = True\r\n\t\t\r\n\tdef rozbal2(self, subor):\r\n\t\twith zf(self.pracovnyPriecinok +'\\\\' + subor, 'r') as f:\r\n\t\t\tf.extractall(self.pracovnyPriecinok +'\\\\' + subor.replace('.zip', ''))\r\n\t\t\tself.subory[subor] = True\r\n\t\t\r\n\tdef kontrola(self):\r\n\t\tfor nazov, stav in self.subory.items():\r\n\t\t\tif (not stav):\r\n\t\t\t\treturn False\r\n\r\nclass Rozbalovac:\r\n\r\n\tdef __init__(self):\r\n\t\tself.rozbalovac = Rozbal()\r\n\t\tself.buttony = {}\r\n\t\tself.polozky = []\r\n\t\t\r\n\t\tself.directory = None\r\n\t\tself.root = Tk()\r\n\t\tself.root.title('EXTRACTOR')\r\n\t\tself.root.geometry('500x500')\r\n\r\n\t\tb = Button(self.root, text='VYBER ADRESAR', command=self.directoryName)\r\n\t\tb.pack()\r\n\r\n\t\t#b = Button(self.root, text='REFRESH', command=self.refresh)\r\n\t\t#b.pack()\r\n\r\n\t\tb = Button(self.root, text='UNZIP CHOOSEN', command=self.unzipPicked)\r\n\t\tb.pack()\r\n\r\n\t\tb = Button(self.root, text='UNZIPALL', command=self.unzipAll)\r\n\t\tb.pack()\r\n\r\n\t\tmenubar = Menu(self.root)\r\n\r\n\t\t# create a pulldown menu, and add it to the menu bar\r\n\t\tfilemenu = Menu(menubar, tearoff=0)\r\n\t\tfilemenu.add_command(label='Load', command=self.load)\r\n\t\tfilemenu.add_command(label='Save', command=self.uloz)\r\n\t\tfilemenu.add_command(label='Exit', command=self.root.destroy)\r\n\t\tmenubar.add_cascade(label='File', menu=filemenu)\r\n\r\n\t\thelpmenu = Menu(menubar, tearoff=0)\r\n\t\thelpmenu.add_command(label='About', command=self.About)\r\n\t\tmenubar.add_cascade(label='Help', menu=helpmenu)\r\n\r\n\t\t# display the menu\r\n\t\tself.root.config(menu=menubar)\r\n\r\n\t\t\r\n\tdef uloz(self):\r\n\t\tpass\r\n\tdef load(self):\r\n\t\tpass\r\n\tdef About(self):\r\n\t\tmessagebox.showinfo(title='About', message = '''Created by: Marek Krafčik\r\nVersion: 1.0\r\nEGUI - it is and GUI interface for zipFile python build in library for ordinary users. ''')\r\n\t\treturn\r\n\tdef Achtung(self):\r\n\t\treturn\r\n\tdef unzipAll(self):\r\n\t\tself.unzipFilesAll()\r\n\tdef unzipPicked(self):\r\n\t\tself.unzip()\r\n\r\n\tdef directoryName(self):\r\n\t\tself.directory = filedialog.askdirectory()\r\n\t\tself.rozbalovac.nastavPriecinok(self.directory)\r\n\t\tself.getFiles()\r\n\t\tself.drawFiles()\r\n\t\tprint(self.directory)\r\n\r\n\tdef refresh(self):\r\n\t\tfor i in self.polozky:\r\n\t\t\ti.destroy()\r\n\t\tself.root.update()\r\n\t\tself.root.after(1000)\r\n\t\tself.drawFiles()\r\n\r\n\tdef getFiles(self):\r\n\t\tif self.directory is not None:\r\n\t\t\tself.rozbalovac.ziskajSubory()\r\n\r\n\tdef drawFiles(self):\r\n\t\tself.polozky = []\r\n\t\t#self.getFiles()\r\n\t\tpom = list(self.rozbalovac.subory.keys())\r\n\t\tself.buttony = dict()\r\n\t\tfor i in range(len(pom)):\r\n\t\t\tif self.rozbalovac.subory[pom[i]] == False:\r\n\t\t\t\tvar = BooleanVar()\r\n\t\t\t\tw = Checkbutton(self.root, text = pom[i], variable = var)\r\n\t\t\t\tw.pack()\r\n\t\t\t\tself.polozky.append(w)\r\n\t\t\t\tself.buttony[pom[i]] = var\r\n\t'''\r\n\r\n\t'''\r\n\tdef unzipFilesAll(self):\r\n\t\tself.rozbalovac.rozbal()\r\n\t\t#print(self.rozbalovac.subory)\r\n\t\tself.refresh()\r\n\t\t\r\n\tdef unzip(self):\r\n\t\tfor nazov, stav in self.buttony.items():\r\n\t\t\t#print('Nazov:\\t{}\\tstav:\\t{}'.format(nazov,stav.get())\r\n\t\t\t\tif stav.get() == True:\r\n\t\t\t\t\tself.rozbalovac.rozbal2(nazov)\r\n\t\tself.refresh()\r\n\r\nc = Rozbalovac()", "sub_path": "rozbal.py", "file_name": "rozbal.py", "file_ext": "py", "file_size_in_byte": 3920, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "os.getcwd", "line_number": 10, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 14, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 15, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 19, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 32, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 37, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 92, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 92, "usage_type": "name"}, {"api_name": "tkinter.filedialog.askdirectory", "line_number": 104, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 104, "usage_type": "name"}]} +{"seq_id": "149388408", "text": "\nimport numpy as np\nimport parl\nimport os.path\n#import paddle\nimport paddle.fluid as fluid\nfrom parl.utils import logger\n# Author for Paddle(): Shiva Verma\n# Author :skywalk\n\nimport turtle as t\n\n\nclass Paddle():\n\n def __init__(self):\n\n self.done = False\n self.reward = 0\n self.hit, self.miss = 0, 0\n\n # Setup Background\n\n self.win = t.Screen()\n self.win.title('Paddle')\n self.win.bgcolor('black')\n self.win.setup(width=600, height=600)\n self.win.tracer(0)\n\n # Paddle\n\n self.paddle = t.Turtle()\n self.paddle.speed(0)\n self.paddle.shape('square')\n self.paddle.shapesize(stretch_wid=1, stretch_len=5)\n self.paddle.color('white')\n self.paddle.penup()\n self.paddle.goto(0, -275)\n\n # Ball\n\n self.ball = t.Turtle()\n self.ball.speed(0)\n self.ball.shape('circle')\n self.ball.color('red')\n self.ball.penup()\n self.ball.goto(0, 100)\n self.ball.dx = 3\n self.ball.dy = -3\n\n # Score\n\n self.score = t.Turtle()\n self.score.speed(0)\n self.score.color('white')\n self.score.penup()\n self.score.hideturtle()\n self.score.goto(0, 250)\n self.score.write(\"Hit: {} Missed: {}\".format(self.hit, self.miss), align='center', font=('Courier', 24, 'normal'))\n\n # -------------------- Keyboard control ----------------------\n\n self.win.listen()\n self.win.onkey(self.paddle_right, 'Right')\n self.win.onkey(self.paddle_left, 'Left')\n\n # Paddle movement\n\n def paddle_right(self):\n\n x = self.paddle.xcor()\n if x < 225:\n self.paddle.setx(x+20)\n\n def paddle_left(self):\n\n x = self.paddle.xcor()\n if x > -225:\n self.paddle.setx(x-20)\n\n # ------------------------ AI control ------------------------\n\n # 0 move left\n # 1 do nothing\n # 2 move right\n\n def reset(self):\n\n self.paddle.goto(0, -275)\n self.ball.goto(0, 100)\n self.reward = 0\n return [self.paddle.xcor()*0.01, self.ball.xcor()*0.01, self.ball.ycor()*0.01, self.ball.dx/3*2+self.ball.dy/3]\n\n def step(self, action, render=False):\n\n self.reward = 0\n self.done = 0\n\n if action == 0:\n self.paddle_left()\n self.reward -= .01 #.1\n\n if action == 2:\n self.paddle_right()\n self.reward -= .01 #.1\n\n if render:\n self.run_frame()\n else:\n self.run_frame_quick()\n# dcx=self.ball.dx/3*2\n# dcy=self.ball.dy/3\n \n state = [self.paddle.xcor()*0.01, self.ball.xcor()*0.01, self.ball.ycor()*0.01, self.ball.dx/3*2+self.ball.dy/3 ]\n return self.reward, state, self.done\n\n def run_frame(self):\n\n self.win.update()\n\n # Ball moving\n\n self.ball.setx(self.ball.xcor() + self.ball.dx)\n self.ball.sety(self.ball.ycor() + self.ball.dy)\n\n # Ball and Wall collision\n\n if self.ball.xcor() > 290:\n self.ball.setx(290)\n self.ball.dx *= -1\n\n if self.ball.xcor() < -290:\n self.ball.setx(-290)\n self.ball.dx *= -1\n\n if self.ball.ycor() > 290:\n self.ball.sety(290)\n self.ball.dy *= -1\n\n # Ball Ground contact\n\n if self.ball.ycor() < -290:\n self.ball.goto(0, 100)\n self.miss += 1\n self.score.clear()\n self.score.write(\"Hit: {} Missed: {}\".format(self.hit, self.miss), align='center', font=('Courier', 24, 'normal'))\n #self.score.write(\"Hit: {} Missed: {}\".format(self.hit, self.miss), align='center', font=('Courier', 24, 'normal'))\n\n\n\n self.reward -= 3 #-=\n self.done = True\n\n # Ball Paddle collision\n\n if abs(self.ball.ycor() + 250) < 2 and abs(self.paddle.xcor() - self.ball.xcor()) < 55:\n self.ball.dy *= -1\n self.hit += 1\n self.score.clear()\n self.score.write(\"Hit: {} Missed: {}\".format(self.hit, self.miss), align='center', font=('Courier', 24, 'normal'))\n self.reward += 3 #+=\n #self.score.write(\"Hit: {} Missed: {}\".format(self.hit, self.miss), align='center', font=('Courier', 24, 'normal'))\n\n\n\n def run_frame_quick(self):\n\n #self.win.update()\n\n # Ball moving\n\n self.ball.setx(self.ball.xcor() + self.ball.dx)\n self.ball.sety(self.ball.ycor() + self.ball.dy)\n\n # Ball and Wall collision\n\n if self.ball.xcor() > 290:\n self.ball.setx(290)\n self.ball.dx *= -1\n\n if self.ball.xcor() < -290:\n self.ball.setx(-290)\n self.ball.dx *= -1\n\n if self.ball.ycor() > 290:\n self.ball.sety(290)\n self.ball.dy *= -1\n\n # Ball Ground contact\n\n if self.ball.ycor() < -290:\n self.ball.goto(0, 100)\n self.miss += 1\n# self.score.clear()\n# self.score.write(\"Hit: {} Missed: {}\".format(self.hit, self.miss), align='center', font=('Courier', 24, 'normal'))\n #logger,info(f\"Hit: {self.hit} Missed: {self.miss}\")\n# logger.info(f\"Game Over Hit:{self.hit} Missed:{self.miss}\")\n print(\".\", end=\" \")\n self.reward -= 3 #3 -=\n self.done = True\n\n # Ball Paddle collision\n\n if abs(self.ball.ycor() + 250) < 2 and abs(self.paddle.xcor() - self.ball.xcor()) < 55:\n self.ball.dy *= -1\n self.hit += 1\n self.score.clear()\n# self.score.write(\"Hit: {} Missed: {}\".format(self.hit, self.miss), align='center', font=('Courier', 24, 'normal'))\n# logger.info(f\"^-^ Good job!Hit: {self.hit} Missed: {self.miss}\")\n print(\"!\", end=\" \")\n self.reward += 3 #3 +=\n\n#这里好像有点问题,\n\n# while True:\n#\n# env.run_frame()\nimport parl\nfrom parl import layers\n\nclass BallModel(parl.Model):\n def __init__(self, act_dim):\n act_dim = act_dim\n hid1_size = act_dim * 20\n\n self.fc1 = layers.fc(size=hid1_size, act='tanh')\n\n #self.fc3 = layers.fc(size=hid1_size, act='tanh')\n\n self.fc2 = layers.fc(size=act_dim, act='softmax')\n\n def forward(self, obs):\n out = self.fc1(obs)\n #out = self.fc3(out)\n out = self.fc2(out)\n return out\n \nclass BallAgent(parl.Agent):\n def __init__(self, algorithm, obs_dim, act_dim):\n self.obs_dim = obs_dim\n self.act_dim = act_dim\n super(BallAgent, self).__init__(algorithm)\n\n def build_program(self):\n self.pred_program = fluid.Program()\n self.learn_program = fluid.Program() #train_program\n\n with fluid.program_guard(self.pred_program):\n obs = layers.data(\n name='obs', shape=[self.obs_dim], dtype='float32')\n self.act_prob = self.alg.predict(obs)\n\n with fluid.program_guard(self.learn_program):\n obs = layers.data(\n name='obs', shape=[self.obs_dim], dtype='float32')\n act = layers.data(name='act', shape=[1], dtype='int64')\n reward = layers.data(name='reward', shape=[], dtype='float32')\n self.cost = self.alg.learn(obs, act, reward)\n\n def sample(self, obs):\n obs = np.expand_dims(obs, axis=0)\n act_prob = self.fluid_executor.run(\n self.pred_program,\n feed={'obs': obs.astype('float32')},\n fetch_list=[self.act_prob])[0]\n act_prob = np.squeeze(act_prob, axis=0)\n act = np.random.choice(range(self.act_dim), p=act_prob)\n return act\n\n def predict(self, obs):\n obs = np.expand_dims(obs, axis=0)\n act_prob = self.fluid_executor.run(\n self.pred_program,\n feed={'obs': obs.astype('float32')},\n fetch_list=[self.act_prob])[0]\n act_prob = np.squeeze(act_prob, axis=0)\n act = np.argmax(act_prob)\n return act\n\n def learn(self, obs, act, reward):\n act = np.expand_dims(act, axis=-1)\n feed = {\n 'obs': obs.astype('float32'),\n 'act': act.astype('int64'),\n 'reward': reward.astype('float32')\n }\n cost = self.fluid_executor.run(\n self.learn_program, feed=feed, fetch_list=[self.cost])[0]\n return cost\nOBS_DIM = 4\nACT_DIM = 3\nLEARNING_RATE = 4e-3\nGAMMA=0.98\nmodel = BallModel(act_dim=3)\nalg = parl.algorithms.PolicyGradient(model, lr=LEARNING_RATE)\n#alg=parl.algorithms.DQN(model,lr=0.001,gamma=0.99)\nagent = BallAgent(alg, obs_dim=OBS_DIM, act_dim=3)\n#成功执行\ndef run_episode(env, agent, train_or_test='train'):\n obs_list, action_list, reward_list = [], [], []\n obs = env.reset()\n while True:\n obs_list.append(obs)\n if train_or_test == 'train':\n action = agent.sample(obs)\n action_list.append(action)\n reward, obs, done = env.step(action)\n else:\n action = agent.predict(obs)\n action_list.append(action)\n\n #obs, reward, done, info = env.step(action)\n reward, obs, done = env.step(action,True)\n reward_list.append(reward)\n\n if done:\n break\n return obs_list, action_list, reward_list\n\n# def calc_reward_to_go(reward_list):\n# for i in range(len(reward_list) - 2, -1, -1):\n# reward_list[i] += reward_list[i + 1]\n# return np.array(reward_list)\n\ndef calc_reward_to_go(reward_list, gamma=0.99):\n \"\"\"calculate discounted reward\"\"\"\n reward_arr = np.array(reward_list)\n for i in range(len(reward_arr) - 2, -1, -1):\n # G_t = r_t + γ·r_t+1 + ... = r_t + γ·G_t+1\n reward_arr[i] += gamma * reward_arr[i + 1]\n # normalize episode rewards\n dctmp = reward_arr - np.mean(reward_arr)\n dctmp1 = dctmp / np.std(reward_arr)\n reward_arr=dctmp1\n return reward_arr\n\n\n#env = gym.make(\"CartPole-v0\")\nenv = Paddle()\nfor i in range(1000):\n obs_list, action_list, reward_list = run_episode(env, agent)\n if i % 10 == 0:\n logger.info(\"\\nEpisode {}, Reward Sum {}.\".format(i, sum(reward_list)))\n\n batch_obs = np.array(obs_list)\n batch_action = np.array(action_list)\n #batch_reward = calc_discount_norm_reward(reward_list, GAMMA)\n batch_reward = calc_reward_to_go(reward_list, GAMMA)\n\n agent.learn(batch_obs, batch_action, batch_reward)\n if (i + 1) % 100 == 0:\n _, _, reward_list = run_episode(env, agent, train_or_test='test')\n total_reward = np.sum(reward_list)\n logger.info('Test reward: {}'.format(total_reward))\n \n agent.save('./paddleball_test.ckpt')\n \n \nagent.save('./paddleball.ckpt')\n\n\n", "sub_path": "pb.py", "file_name": "pb.py", "file_ext": "py", "file_size_in_byte": 10740, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "turtle.Screen", "line_number": 24, "usage_type": "call"}, {"api_name": "turtle.Turtle", "line_number": 32, "usage_type": "call"}, {"api_name": "turtle.Turtle", "line_number": 42, "usage_type": "call"}, {"api_name": "turtle.Turtle", "line_number": 53, "usage_type": "call"}, {"api_name": "parl.Model", "line_number": 221, "usage_type": "attribute"}, {"api_name": "parl.layers.fc", "line_number": 226, "usage_type": "call"}, {"api_name": "parl.layers", "line_number": 226, "usage_type": "name"}, {"api_name": "parl.layers.fc", "line_number": 230, "usage_type": "call"}, {"api_name": "parl.layers", "line_number": 230, "usage_type": "name"}, {"api_name": "parl.Agent", "line_number": 238, "usage_type": "attribute"}, {"api_name": "paddle.fluid.Program", "line_number": 245, "usage_type": "call"}, {"api_name": "paddle.fluid", "line_number": 245, "usage_type": "name"}, {"api_name": "paddle.fluid.Program", "line_number": 246, "usage_type": "call"}, {"api_name": "paddle.fluid", "line_number": 246, "usage_type": "name"}, {"api_name": "paddle.fluid.program_guard", "line_number": 248, "usage_type": "call"}, {"api_name": "paddle.fluid", "line_number": 248, "usage_type": "name"}, {"api_name": "parl.layers.data", "line_number": 249, "usage_type": "call"}, {"api_name": "parl.layers", "line_number": 249, "usage_type": "name"}, {"api_name": "paddle.fluid.program_guard", "line_number": 253, "usage_type": "call"}, {"api_name": "paddle.fluid", "line_number": 253, "usage_type": "name"}, {"api_name": "parl.layers.data", "line_number": 254, "usage_type": "call"}, {"api_name": "parl.layers", "line_number": 254, "usage_type": "name"}, {"api_name": "parl.layers.data", "line_number": 256, "usage_type": "call"}, {"api_name": "parl.layers", "line_number": 256, "usage_type": "name"}, {"api_name": "parl.layers.data", "line_number": 257, "usage_type": "call"}, {"api_name": "parl.layers", "line_number": 257, "usage_type": "name"}, {"api_name": "numpy.expand_dims", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 267, "usage_type": "attribute"}, {"api_name": "numpy.expand_dims", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 276, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 277, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 281, "usage_type": "call"}, {"api_name": "parl.algorithms.PolicyGradient", "line_number": 295, "usage_type": "call"}, {"api_name": "parl.algorithms", "line_number": 295, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 327, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 332, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 333, "usage_type": "call"}, {"api_name": "parl.utils.logger.info", "line_number": 343, "usage_type": "call"}, {"api_name": "parl.utils.logger", "line_number": 343, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 345, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 346, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 353, "usage_type": "call"}, {"api_name": "parl.utils.logger.info", "line_number": 354, "usage_type": "call"}, {"api_name": "parl.utils.logger", "line_number": 354, "usage_type": "name"}]} +{"seq_id": "131208281", "text": "#!/usr/bin/env python3\n\nfrom exp.nb_ctabr import *\nimport os, sys, random, string\nfrom pathlib import Path\nfrom fastai.vision import models\nimport time\nimport torch\n\n\nsys.stdout.write(str(torch.cuda.is_available()) + '\\n')\nstart_time = time.time()\n\nscriptID = ''.join(random.choices(string.ascii_uppercase + string.digits, k=5))\n\nruns = [0,1,2] #,3,4]\nseeds = [7, 42, 666] #, 10, 108]\n\nrunsave_path = Path('/workspace/abr_project/run_saves')\n\nim_path = Path('/workspace/abr_project/CT_segmentation/ct_data/images')\nbifurcations = openNamesList('/workspace/abr_project/CT_segmentation/dual_capsseg/nbs/useful/bifs.csv', im_path)\nnormals = openNamesList('/workspace/abr_project/CT_segmentation/dual_capsseg/nbs/useful/normal.csv', im_path)\n\nfor runID,seed in zip(runs, seeds):\n torch.manual_seed(seed)\n unet = init_CT_model(models.resnet101, runsave_path/'models/pretrained_unet_101')\n unet = unet.cuda()\n learner = ABRLearner(unet, normals, bifurcations, runsave_path, scriptID, str(runID),2, 10)\n learner.fit(3)\n #print(len(learner.trainNData))\n\nsys.stdout.write('Finished in ' + str(int(time.time()-start_time)) + ' seconds.')\n", "sub_path": "nbs/ensembler.py", "file_name": "ensembler.py", "file_ext": "py", "file_size_in_byte": 1146, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "sys.stdout.write", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 11, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 12, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 14, "usage_type": "call"}, {"api_name": "string.ascii_uppercase", "line_number": 14, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 19, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.manual_seed", "line_number": 26, "usage_type": "call"}, {"api_name": "fastai.vision.models.resnet101", "line_number": 27, "usage_type": "attribute"}, {"api_name": "fastai.vision.models", "line_number": 27, "usage_type": "name"}, {"api_name": "sys.stdout.write", "line_number": 33, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 33, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "306532646", "text": "'''\n@说明 :动态用户组刷新\n@时间 :2019/12/25 下午12:12:24\n@作者 :任秋锴\n@版本 :1.0\n'''\n\nimport requests\n\n\nclass cjdgUserGroup:\n def __init__(self, token):\n self.token = token\n self.headers = {\n \"Host\": \"bms.chaojidaogou.com\",\n \"Connection\": \"keep-alive\",\n \"Content-Length\": \"90\",\n \"Pragma\": \"no-cache\",\n \"Cache-Control\": \"no-cache\",\n \"Origin\": \"http://bms.chaojidaogou.com\",\n \"Upgrade-Insecure-Requests\": \"1\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\n \"Referer\": \"http://bms.chaojidaogou.com/shopguide/book_view.jhtml?topId=140&requestSource=h5\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7\",\n \"Cookie\": f\"JSESSIONID=9AC8A3799D296A8712AE18B059D5B694; Hm_lvt_5e3170920dadcb2f29dfb66f63b9b6aa=1574042672; accessToken={self.token}\",\n }\n\n def get(self, group_id):\n url = \"http://bms.chaojidaogou.com/shopguide/groupfindGroupDynamicAttr.jhtml\"\n data = {\n \"usergroup.groupId\": group_id\n }\n response = requests.post(url, data=data, headers=self.headers)\n if response.status_code == 200:\n result = response.json()\n return (result)\n\n def saveFlush(self, group_id):\n data = self.get(group_id)\n url = \"http://bms.chaojidaogou.com/shopguide/groupsaveDynamicGroup.jhtml\"\n group = data.get(\"group\")\n orgType = group.get(\"orgType\")\n orgIdTypes = \",\".join([f\"{k}_{orgType}\" for k in group.get(\"orgMap\")])\n data = {\n # \"user.orgPath\": \"null\",\n # \"user.level1\": \"NaN\",\n # \"user.level2\": \"NaN\",\n \"user.groupId\": group_id,\n \"user.userPost\": group.get(\"userPost\"),\n \"user.ids\": group.get(\"belongOrg\"),\n \"user.orgIdTypes\": orgIdTypes,\n \"user.isExeCutive\": \"1\",\n \"ge.id\": group.get(\"id\"),\n \"user.staticGroupId\": group.get(\"staticGroupId\"),\n \"user.staticGroupName\": group.get(\"staticGroupName\"),\n \"user.sysUserTagIds\": data.get(\"selecttag\"),\n \"user.orgType\": orgType,\n }\n # print(data)\n response = requests.post(url, data=data, headers=self.headers)\n if response.status_code == 200:\n result = response.json()\n return (result)\n\n\n\ndef testcase1():\n group_id = 10918039\n token = \"6c24d991339096fe99f4e011d19626e9_deppon\"\n ug = cjdgUserGroup(token)\n print(ug.saveFlush(group_id))\n # print(userGroupFlush(group_id, token))\n\nif __name__ == \"__main__\":\n testcase1()\n # print(userGroupFlush(group_id, token))\n", "sub_path": "cjdg_api/user_group.py", "file_name": "user_group.py", "file_ext": "py", "file_size_in_byte": 3051, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "requests.post", "line_number": 36, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "608842977", "text": "import jsonlines\nimport matplotlib.pyplot as plt\n\n\ndef load_results(path):\n r = jsonlines.open(path+\"results.jsonl\")\n train_loss = []\n epoch = 0\n for line in r:\n train_loss.append(line[\"test_loss\"])\n epoch+=1\n\n return train_loss\n\n#base_path = \"/home/jonas/data/squad/cluster_exp/\"\n#base_path = \"/home/jonas/data/25_11_19/\"\nbase_path = \"/media/jonas/archive/master/data/cluster_exp/27_11_19/experiments/\"\nexps = [\n #(base_path + \"transformer_medium/\", \"Transformer Medium\"),\n #(base_path + \"transformer_large/\", \"Transformer Large\"),\n #(base_path + \"transformer_small/\", \"Transformer Small\"),\n #(base_path + \"transformer_small_lr/\", \"Transformer Small lr+\"),\n (base_path + \"Transformer__11-19_12:06/\", \"Transformer Medium lr+\"),\n #(base_path + \"adam_trans_medium/\", \"Transformer Medium\"),\n #(base_path + \"adam_trans_large/\", \"Transformer Large\"),\n #(base_path + \"adam_trans_small/\", \"Transformer Small\"),\n (base_path+\"Transformer__11-13_16:49/\",\"Transformer Medium\"),\n (base_path+\"Transformer__11-13_16:50/\",\"Transformer Large\"),\n (base_path+\"Transformer__11-13_21:03/\",\"Transformer Small\"),\n # (base_path+\"LSTM_auto_encoder_1__11-13_09:18/\",\"LSTM Medium\"),\n # (base_path+\"LSTM_auto_encoder_1__11-15_11:10/\",\"LSTM Large\"),\n # (\"/media/jonas/archive/master/data/squad2/cluster_exp/experiments/Transformer__11-11_14:05/\",\"LSTM Large\"),\n # (\"/media/jonas/archive/master/data/squad2/cluster_exp/experiments/Transformer__11-11_15:12/\",\"LSTM Medium\"),\n # (\"/media/jonas/archive/master/data/squad2/cluster_exp/experiments/Transformer__11-11_15:26/\",\"LSTM Small\"),\n]\n\nfor exp in exps:\n\n exp_path = exp[0]\n results = load_results(exp_path)\n\n plt.plot(results)\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Test Loss\")\n\n\nplt.title(\"Test loss with subword embedding input\")\nplt.legend([exp[1] for exp in exps])\n#plt.ylim(3.5,6.5)\nplt.savefig(\"fig/transformer_test_loss.png\")\n#plt.savefig(\"fig/lstm_train_loss.png\")\n#plt.savefig(\"fig/trans_subwords_train_loss.png\")\n\nplt.show()", "sub_path": "analysis/ttest_loss_graph.py", "file_name": "ttest_loss_graph.py", "file_ext": "py", "file_size_in_byte": 2044, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "jsonlines.open", "line_number": 6, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "347693781", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom models.base_blocks import BasicConv\n\n\ndef feature_transform_module(channels, fea_channel):\n layers = []\n for (i, channel) in enumerate(channels):\n layers.append(BasicConv(channel, fea_channel, kernel_size=1, padding=0, scale_factor=2 ** i))\n return nn.ModuleList(layers)\n\n\ndef fpn_feature_extractor(channels, fpn_level, fea_channel):\n layers = [BasicConv(fea_channel * len(channels), fea_channel, kernel_size=3, stride=1, padding=1)]\n for _ in range(fpn_level - 1):\n layers.append(BasicConv(fea_channel, fea_channel, kernel_size=3, stride=2, padding=1))\n return nn.ModuleList(layers)\n\n\nclass SSDNeck(nn.Module):\n\n def __init__(self, fpn_level, channels, fea_channel):\n super(SSDNeck, self).__init__()\n self.ft_module = feature_transform_module(channels, fea_channel)\n self.pyramid_ext = fpn_feature_extractor(channels, fpn_level, fea_channel)\n\n def forward(self, x):\n transformed_features = list()\n for (k, v) in zip(x, self.ft_module):\n transformed_features.append(v(k))\n x = torch.cat(transformed_features, 1)\n\n fpn_fea = list()\n for v in self.pyramid_ext:\n x = v(x)\n fpn_fea.append(x)\n return fpn_fea\n\n", "sub_path": "models/neck/ssd_neck.py", "file_name": "ssd_neck.py", "file_ext": "py", "file_size_in_byte": 1363, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "models.base_blocks.BasicConv", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn.ModuleList", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "models.base_blocks.BasicConv", "line_number": 18, "usage_type": "call"}, {"api_name": "models.base_blocks.BasicConv", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn.ModuleList", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "432688649", "text": "# tgb - 4/18/2019 - Python script callable from command line\n# Follows notebook 010 @ https://github.com/tbeucler/CBRAIN-CAM/blob/master/notebooks/tbeucler_devlog/010_Conserving_Network_Paper_Runs.ipynb\n\n# set random seeds\nimport numpy as np\nnp.random.seed(0)\nfrom tensorflow import set_random_seed\nset_random_seed(0)\n\nimport sys\nsys.path.append('../')\nsys.path.append('../../../')\n\nimport os\nimport argparse\nimport xarray as xr\nimport numpy as np\n\nfrom cbrain.imports import *\nfrom cbrain.data_generator import *\nfrom cbrain.utils import limit_mem\n\nfrom model import Network\nfrom monitor import MetricMonitor\nfrom cbrain.model_diagnostics import ModelDiagnostics\n\n# Otherwise tensorflow will use ALL your GPU RAM for no reason\nlimit_mem()\n\nparser = argparse.ArgumentParser()\n# important params\nparser.add_argument('--data', type=str, default='fluxbypass_aqua', choices=['fluxbypass_aqua'])\nparser.add_argument('--continue', default=False, action='store_true', help='Continue from saved')\nparser.add_argument('--get_pred', default=False, action='store_true', help='Run predictions for specified model')\nparser.add_argument('--run_type', type=str, default='hyper_param_opt', choices=['hyper_param_opt', 'baseline', 'hyper_param_opt_conservation'], help='What to run?')\n# params okay left as defaults\nparser.add_argument('--batch_size', type=int, default=8192, help='Batch size')\nparser.add_argument('--data_dir', type=str, default='/baldig/chemistry/earth_system_science/')\nparser.add_argument('--max_dense_layers', type=int, default=5, help='Max dense layers allowed')\nparser.add_argument('--epochs', type=int, default=100, help='Number of epochs used for training')\nparser.add_argument('--patience', type=int, default=8, help='How long to wait for an improvement')\n\nargs = vars(parser.parse_args())\n\nmetric_monitor = MetricMonitor(args)\n\nPREFIX = '8col009_01_'\nDATADIR = args['data_dir'] + args['data'] + '/'\n\nscale_dict = load_pickle(DATADIR + '009_Wm2_scaling.pkl'); in_vars = load_pickle(DATADIR + '009_Wm2_in_vars.pkl')\nout_vars = load_pickle(DATADIR + '009_Wm2_out_vars.pkl'); dP = load_pickle(DATADIR + '009_Wm2_dP.pkl')\n\n\ntrain_gen = DataGenerator(\n data_fn = DATADIR+PREFIX+'train.nc',\n input_vars = in_vars,\n output_vars = out_vars,\n norm_fn = DATADIR+PREFIX+'norm.nc',\n input_transform = ('mean', 'maxrs'),\n output_transform = scale_dict,\n batch_size=args['batch_size'],\n shuffle=True\n)\n\nargs['div'] = train_gen.input_transform.div; args['sub'] = train_gen.input_transform.sub\nargs['scale_dict'] = scale_dict\n\nvalid_gen = DataGenerator(\n data_fn = DATADIR+PREFIX+'valid.nc',\n input_vars = in_vars,\n output_vars = out_vars,\n norm_fn = DATADIR+PREFIX+'norm.nc',\n input_transform = ('mean', 'maxrs'),\n output_transform = scale_dict,\n batch_size=args['batch_size'],\n shuffle=False\n)\n\nif args['run_type'] == 'baseline':\n net = Network(args)\n # train linear regression model\n history = net.train(train_gen, valid_gen)\n # save training results\n metric_monitor.update_trial_storage(history)\n # save lr model\n net.save(weights=True)\n # store predictions from lr model\n net.predict(valid_gen, file_name=metric_monitor.get_pred_loc())\n\nelif 'hyper_param_opt' in args['run_type']:\n # iterate through sherpa trials\n for params, (trial, study) in metric_monitor.run_hyper_param_opt():\n try:\n # create new network with hyper params each trial\n net = Network(params)\n # fit the data\n history = net.train(train_gen, valid_gen, trial=trial, study=study)\n\n # save current model configuration\n net.save()\n\n # record results and param settings\n best_loss = metric_monitor.update_trial_storage(history)\n\n if best_loss:\n # store predictions made by current model\n net.predict(valid_gen, file_name=metric_monitor.get_pred_loc())\n else:\n # remove weights file if the loss hasn't improved\n os.remove(net.get_model_path()+'.h5')\n\n metric_monitor.end_trial()\n\n except Exception as e:\n # clear memory from keras\n K.clear_session()\n", "sub_path": "notebooks/tbeucler_devlog/hp_opt_conservation/Serial/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4211, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "numpy.random.seed", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 6, "usage_type": "attribute"}, {"api_name": "tensorflow.set_random_seed", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cbrain.utils.limit_mem", "line_number": 28, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 30, "usage_type": "call"}, {"api_name": "monitor.MetricMonitor", "line_number": 45, "usage_type": "call"}, {"api_name": "model.Network", "line_number": 80, "usage_type": "call"}, {"api_name": "model.Network", "line_number": 95, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 110, "usage_type": "call"}]} +{"seq_id": "595533930", "text": "import numpy as np\nfrom io import StringIO\nimport sklearn\nfrom sklearn import svm\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neural_network import MLPClassifier\n\ndef report(results, n_top=3):\n for i in range(1, n_top + 1):\n candidates = np.flatnonzero(results['rank_test_score'] == i)\n for candidate in candidates:\n print(\"Model with rank: {0}\".format(i))\n print(\"Mean validation score: {0:.3f} (std: {1:.3f})\".format(\n results['mean_test_score'][candidate],\n results['std_test_score'][candidate]))\n print(\"Parameters: {0}\".format(results['params'][candidate]))\n print(\"\")\n\n\ndef randomCV(clf, X, y, param_grid, n_iter, cv):\n\trandom_search = RandomizedSearchCV(clf, param_distributions = param_grid,\n\t\t\t\t\tn_iter = n_iter, cv = cv, iid = False)\n\trandom_search.fit(X, y)\n\treport(random_search.cv_results_)\n\t\n#\tneigh.fit(X[:420], y[:420])\n#\treturn neigh.score(X[420:], y[420:])\n\t\n\ndef KNN(X, y):\n\t\n\tneigh = KNeighborsClassifier()\n\tparam_grid = {\n\t\t\"n_neighbors\" : np.arange(1,20),\n\t\t\"algorithm\" : ['auto', 'ball_tree', 'kd_tree', 'brute'],\n\t\t\"weights\" : ['uniform', 'distance'],\n\t\t\"leaf_size\" : np.arange(1,60)\n\t}\n\trandomCV(neigh, X, y, param_grid, 400, 6)\n\ndef SVM(X, y):\n\n# C_grid = [0.1, 1, 10]\n# gamma_grid = np.logspace(-2, 1, 4)[0:3]\n# svm_C = svm.SVC(kernel='poly')\n# param_grid = { 'C' : C_grid, 'gamma' : gamma_grid, \"kernel\" : ['poly', 'rbf', 'sigmoid'], }\n# gridcv = GridSearchCV(svm_C, param_grid, verbose=1, cv=3)\n# gridcv.fit(X, y)\n# print(\"best parameters:\", gridcv.best_params_)\n# print(\"%.1f%% accuracy on validation sets (average)\" % (gridcv.best_score_*100))\n\n\tsvm_C = svm.SVC()\n\tparam_grid = {\n\t\t\"kernel\" : ['linear', 'rbf', 'sigmoid'],\n\t\t\"gamma\" : ['scale', 'auto'],\n\t\t\"degree\" : np.arange(10),\n\t\t\"coef0\" : np.random.rand(60)*10,\n\t\t\"shrinking\" : [False, True],\n\t\t\"decision_function_shape\" : ['ovo','ovr']\n\t}\n\trandomCV(svm_C, X, y, param_grid, 4, 6)\n\ndef DT(X, y):\n\tdt = DecisionTreeClassifier()\n\tparam_grid = {\n\t\t\"criterion\" : ['gini', 'entropy'],\n\t\t\"splitter\" : ['best', 'random'],\n\t\t\"min_samples_split\" : np.random.random_sample((100,)),\n\t\t\"max_features\" : ['auto', 'sqrt', 'log2', None],\n\t\t\"class_weight\" : [None, 'balanced'],\n\t\t\"presort\" : [True, False],\n\t\t\"min_samples_leaf\" : np.arange(1,6)\n\t}\n\trandomCV(dt, X, y, param_grid, 400, 6)\n\ndef RF(X, y):\n\trf = RandomForestClassifier()\n\tparam_grid = {\n\t\t\"n_estimators\" : [10*x for x in np.arange(1,50)],\n\t\t\"criterion\" : ['gini', 'entropy'],\n\t\t\"min_samples_split\" : np.random.random_sample((100,)),\n\t\t\"max_features\" : ['auto', 'sqrt', 'log2', None],\n#\t\t\"class_weight\" : [None, 'balanced'],\n\t\t\"min_samples_leaf\" : np.arange(1,6),\n#\t\t\"bootstrap\" : [True, False],\n#\t\t\"oob_score\" : [True, False],\n\t\t\"warm_start\" : [True, False],\n\t}\n\trandomCV(rf, X, y, param_grid, 40, 6)\n\ndef Ada(X, y):\n\tada = AdaBoostClassifier(algorithm = \"SAMME\")\n\tparam_grid = {\n#\t\t\"base_estimator\" : ['classes', 'n_classes_', None],\n\t\t\"n_estimators\" : [10*x for x in np.arange(1,50)]\n#\t\t\"learning_rate\" : [10*x for x in np.random.random_sample((100,))]\n#\t\t\"algorithm\" : ['SAMME']\n\t}\n\trandomCV(ada, X, y, param_grid, 40, 6)\n\ndef LR(X, y):\n\tlr = LogisticRegression()\n\tparam_grid = {\n\t\t\"penalty\" : ['l1', 'l2'],\n#\t\t\"dual\" : [True, False],\n\t\t\"C\" : np.random.rand(60),\n\t\t\"fit_intercept\" : [True, False],\n\t\t\"warm_start\" : [True, False],\n\t\t\"multi_class\" : ['ovr', 'auto'],\n\t\t\"solver\" : [ 'liblinear']\n\t}\n\trandomCV(lr, X, y, param_grid, 400, 6)\n\ndef GNB(X, y):\n\tgnb = GaussianNB()\n\tparam_grid = {\n\t\t\"var_smoothing\" : np.random.random_sample((100,))\n\t}\n\trandomCV(gnb, X, y, param_grid, 100, 6)\n\ndef NN(X, y):\n\tnn = MLPClassifier()\n\tparam_grid = {\n\t\t\"hidden_layer_sizes\" : np.arange(2,200),\n\t\t\"activation\" : ['identity', 'logistic', 'tanh', 'relu'],\n\t\t\"solver\" : ['lbfgs', 'sgd', 'adam'],\n#\t\t\"verbose\" : [True, False],\n\t\t\"warm_start\" : [False, True]\n\t}\n\trandomCV(nn, X, y, param_grid, 200, 6)\n\n\ndef read(a):\n\tif a.endswith('.dat'):\n\t\tf = open(a,\"r\")\n\t\tif f.mode == 'r':\n\t\t\tcontent = f.read()\n\t\tc = StringIO(content)\n\t\treturn np.loadtxt(c)\n\n# ---------------> Read dataset file\ndata = read('australian.dat')\ny = data[:,-1]\nx = data[:,0:data.shape[1] -1]\n\n# --------------------> KNN\n\n#KNN(x,y)\n\n## --------------------> SVM\n\nSVM(x,y)\n\n## --------------------> Decision Tree\n\n#DT(x,y)\n\n## --------------------> Random Forest\n\n#RF(x, y)\n\n## --------------------> Adaboost\n\n#Ada(x, y)\n\n\n## ---------------------> Logistic regression\n\n#LR(x, y)\n\n## ---------------------> Gaussian NB\n\n#GNB(x, y)\n\n\n## ---------------------> Neural Network\n\n#NN(x, y)\n\n\n\n\n\n# SVM too much time\n\n# cd '/home/sid/Desktop/COMP 6321/default_project/9/4 classification'\n", "sub_path": "4 classification/4.py", "file_name": "4.py", "file_ext": "py", "file_size_in_byte": 5118, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "numpy.flatnonzero", "line_number": 18, "usage_type": "call"}, {"api_name": "sklearn.model_selection.RandomizedSearchCV", "line_number": 29, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 45, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 60, "usage_type": "call"}, {"api_name": "sklearn.svm", "line_number": 60, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 65, "usage_type": "attribute"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.random.random_sample", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 76, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 80, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.random.random_sample", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 89, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 92, "usage_type": "call"}, {"api_name": "sklearn.ensemble.AdaBoostClassifier", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 103, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 114, "usage_type": "attribute"}, {"api_name": "sklearn.naive_bayes.GaussianNB", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.random.random_sample", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 125, "usage_type": "attribute"}, {"api_name": "sklearn.neural_network.MLPClassifier", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 132, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 147, "usage_type": "call"}]} +{"seq_id": "404309142", "text": "from flask import (\n Blueprint, render_template, request, redirect\n)\n\nfrom addon import db\nfrom models import People\n#from app import db\nfrom settings import get_value\nimport datetime\n\npeople_blueprint = Blueprint('people', __name__, url_prefix='/people')\n\n\n@people_blueprint.route(\"/\")\ndef people_main():\n return render_template('people/index.html', peoples=People.query.all(),\n OUR_APP_NAME=get_value('OUR_APP_NAME'), SECTION_NAME=get_value('SECTION_NAME'))\n\n\n@people_blueprint.route('/add', methods=['GET', 'POST'])\ndef people_add():\n if request.method == 'POST':\n name = request.form['name']\n birthday = request.form['birthday']\n about = request.form['about']\n social_media = request.form['social_media']\n # calculate age\n today_date = datetime.datetime.now()\n time_format = \"%Y-%m-%d\"\n b_day = datetime.datetime.strptime(birthday, time_format)\n age = str(today_date - b_day)\n # insert data into DB\n p = People(name=name, birthday=birthday, age=age,\n about=about, social_media=social_media)\n db.session.add(p)\n db.session.commit()\n return redirect('/people/add')\n return render_template('people/add.html', OUR_APP_NAME=get_value('OUR_APP_NAME'), message='')\n\n\n@people_blueprint.route('/delete/', methods=['GET', 'POST'])\ndef people_delete(id):\n People.query.filter(People.id == id).delete()\n db.session.commit()\n return redirect('/people')\n\n\n@people_blueprint.route('/edit/', methods=['GET', 'POST'])\ndef people_edit(name):\n a = People.query.get(name)\n return render_template('people/edit.html',\n id=a.id, name=a.name, birthday=a.birthday, about=a.about, social_media=a.social_media,\n OUR_APP_NAME=get_value('OUR_APP_NAME'), SECTION_ITEMS=get_value('SECTION_ITEMS'))\n\n\n@people_blueprint.route('/update', methods=['GET', 'POST'])\ndef people_update():\n people_name = request.form['name']\n people_id = request.form['id']\n people_birthday = request.form['birthday']\n people_about = request.form['about']\n people_social_media = request.form['social_media']\n # calculate age\n today_date = datetime.datetime.now()\n time_format = \"%Y-%m-%d\"\n b_day = datetime.datetime.strptime(people_birthday, time_format)\n people_age = str(today_date - b_day)\n # retrive record from db with id\n s = People.query.get(people_id)\n s.name = people_name\n s.birthday = people_birthday\n s.age = people_age\n s.about = people_about\n s.social_media = people_social_media\n db.session.commit()\n return redirect('/people')\n", "sub_path": "shopyo/views/people.py", "file_name": "people.py", "file_ext": "py", "file_size_in_byte": 2679, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "flask.Blueprint", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 16, "usage_type": "call"}, {"api_name": "models.People.query.all", "line_number": 16, "usage_type": "call"}, {"api_name": "models.People.query", "line_number": 16, "usage_type": "attribute"}, {"api_name": "models.People", "line_number": 16, "usage_type": "name"}, {"api_name": "settings.get_value", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 22, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 22, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 26, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "attribute"}, {"api_name": "models.People", "line_number": 33, "usage_type": "call"}, {"api_name": "addon.db.session.add", "line_number": 35, "usage_type": "call"}, {"api_name": "addon.db.session", "line_number": 35, "usage_type": "attribute"}, {"api_name": "addon.db", "line_number": 35, "usage_type": "name"}, {"api_name": "addon.db.session.commit", "line_number": 36, "usage_type": "call"}, {"api_name": "addon.db.session", "line_number": 36, "usage_type": "attribute"}, {"api_name": "addon.db", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 38, "usage_type": "call"}, {"api_name": "settings.get_value", "line_number": 38, "usage_type": "call"}, {"api_name": "models.People.query.filter", "line_number": 43, "usage_type": "call"}, {"api_name": "models.People.query", "line_number": 43, "usage_type": "attribute"}, {"api_name": "models.People", "line_number": 43, "usage_type": "name"}, {"api_name": "models.People.id", "line_number": 43, "usage_type": "attribute"}, {"api_name": "addon.db.session.commit", "line_number": 44, "usage_type": "call"}, {"api_name": "addon.db.session", "line_number": 44, "usage_type": "attribute"}, {"api_name": "addon.db", "line_number": 44, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 45, "usage_type": "call"}, {"api_name": "models.People.query.get", "line_number": 50, "usage_type": "call"}, {"api_name": "models.People.query", "line_number": 50, "usage_type": "attribute"}, {"api_name": "models.People", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 51, "usage_type": "call"}, {"api_name": "settings.get_value", "line_number": 53, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 58, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 58, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 59, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 60, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 60, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 61, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 61, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 62, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 62, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 64, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 64, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 66, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 66, "usage_type": "attribute"}, {"api_name": "models.People.query.get", "line_number": 69, "usage_type": "call"}, {"api_name": "models.People.query", "line_number": 69, "usage_type": "attribute"}, {"api_name": "models.People", "line_number": 69, "usage_type": "name"}, {"api_name": "addon.db.session.commit", "line_number": 75, "usage_type": "call"}, {"api_name": "addon.db.session", "line_number": 75, "usage_type": "attribute"}, {"api_name": "addon.db", "line_number": 75, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "108268084", "text": "\"\"\"Module: globs.py\nOverview: Display initialization, global constants, and graphic/font loading.\nFunctions: getgraphics(directory,alpha=False)\"\"\"\n\nimport os\nimport pygame as pg\n\nos.environ['SDL_VIDEO_CENTERED'] = '1'\n\nFPS = 64.0 #Global frames per second (desired)\nSCREENSIZE = (1000,600) #Global screen size\nPLAYSIZE = (800,600) #Global size of play area\nOFFSET = (100,0) #Global location of play area within screen\nSURFACE = pg.display.set_mode(SCREENSIZE)\npg.init()\n\n#Fonts\nbasicFont = pg.font.Font(os.path.join('graphics','ArialNb.TTF'),48)\nfixedsys = pg.font.Font(os.path.join('graphics','Fixedsys500c.ttf'),60)\n\n#Player one controls\nPLAYER1_DEFAULT = {\"thrust\" :pg.K_UP,\n \"reverse\":pg.K_DOWN,\n \"right\" :pg.K_RIGHT,\n \"left\" :pg.K_LEFT,\n \"prime\" :pg.K_p,\n \"second\" :pg.K_o}\n#Player two controls\nPLAYER2_DEFAULT = {\"thrust\" :pg.K_w,\n \"reverse\":pg.K_s,\n \"right\" :pg.K_d,\n \"left\" :pg.K_a,\n \"prime\" :pg.K_SPACE,\n \"second\" :pg.K_LSHIFT}\n\ndef getgraphics(directory,alpha=False):\n \"\"\"Returns a dictionary of all the image files in a directory.\n Dictionary keys are image names minus their file extensions.\"\"\"\n dirlist = os.listdir(directory)\n graphic = {}\n for graf in dirlist:\n if graf[-3:] in (\"png\",\"jpg\"):\n if not alpha:\n graphic[graf[:-4]] = pg.image.load(os.path.join(directory,graf)).convert()\n graphic[graf[:-4]].set_colorkey((255,0,255))\n else:\n graphic[graf[:-4]] = pg.image.load(os.path.join(directory,graf)).convert_alpha()\n return graphic\n\nGFX = getgraphics(\"graphics\")\nGFXA = getgraphics(\"graphalpha\",True)\n\nSHIPS = (\"intrepid_alt\",\"wing_blue\",\"tripple\",\"ship_blue\",\"ship_red\",\"ship_up\")###", "sub_path": "Tutoriales-Ejemplos/Ejemplos/ChaoticHegemony-0.06/data/globs.py", "file_name": "globs.py", "file_ext": "py", "file_size_in_byte": 1907, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "os.environ", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.font.Font", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.K_p", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pygame.K_o", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pygame.K_w", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pygame.K_s", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pygame.K_d", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pygame.K_a", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pygame.K_LSHIFT", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 47, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}]} +{"seq_id": "534613718", "text": "# -*- coding: utf-8 -*-\nimport xlrd\nimport unittest\nimport requests\nimport json\nimport readConfig as readConfig\n\nlocalReadConfig = readConfig.ReadConfig()\n\n\nclass read(unittest.TestCase):\n def get_sheet1(self):\n # 打开文件\n workbook = xlrd.open_workbook(r'F:\\file_excel\\sample.xlsx')\n # 根据sheet索引或者名称获取sheet内容\n sheet1 = workbook.sheet_by_name('Sheet1')\n return\n\n # sheet的名称,行数,列数\n max_nrows_num = sheet1.nrows\n max_ncols_num = sheet1.ncols\n print(sheet1.name, max_ncols_num, max_nrows_num)\n\n # 获取整行和整列的值(数组)\n num_rows = sheet1.row_values(0) # 获取第四行内容\n cols = sheet1.col_values(2) # 获取第三列内容\n # print(num_rows)\n # print(cols)\n\n # 获取单元格内容\n # print(sheet1.cell(2, 2).value)\n # print(sheet1.cell_value(2, 0).encode('utf-8'))\n # print(sheet1.row(4)[0].value.encode('utf-8'))\n\n # 获取单元格内容的数据类型(ctype : 0 empty,1 string, 2 number, 3 date, 4 boolean, 5 error)\n # print(sheet1.cell(2, 2).ctype)\n # 获取总行数据\n nrows = sheet2.nrows\n print(nrows)\n for i in range(nrows):\n # print(sheet1.row_values(i))\n if i == 0:\n continue\n a = int(sheet2.row_values(i)[0])\n b = int(sheet2.row_values(i)[1])\n print(type(a), type(b))\n params = {'mobilePhone': a, 'password': b, 'remember': 'true', 'siteName': 'main'}\n url = localReadConfig.get_http('url')\n\n headers = {\n 'Content-Type': 'application/json;charset=UTF-8',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:59.0) Gecko/20100101 Firefox/59.0',\n 'Accept - Encoding': 'gzip, deflate',\n 'Accept - Language': 'zh - CN, zh;q = 0.9',\n 'Referer': 'http://www1.ejw.cn/auth/?backUrl=http%3A%2F%2Fadmin.ejw.cn%2F%23%2F',\n 'X-Requested-With': 'XMLHttpRequest'\n }\n print(headers)\n # r1 = requests.post(url, data=json.dumps(params), headers=headers).text\n # print('新增成功')\n token_act = requests.post(url, data=json.dumps(params), headers=headers)\n print(token_act)\n s = json.loads(token_act.text)\n print(\"登陆成功\")\n\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "comm/excel.py", "file_name": "excel.py", "file_ext": "py", "file_size_in_byte": 2502, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "readConfig.ReadConfig", "line_number": 8, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 11, "usage_type": "attribute"}, {"api_name": "xlrd.open_workbook", "line_number": 14, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 61, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 61, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 63, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "249656102", "text": "#!/usr/bin/env python3\n\nimport os\nimport sys\nimport re\nimport datetime\nimport string\nimport subprocess\n\nincludes = [\n 'include/',\n 'include/options/',\n 'include/screens/',\n 'include/syntax/',\n 'src/'\n]\n\ninput = 'Train.hpp'\noutput = 'Train.hpp'\n\ninclude_re = re.compile(r'\\s*#\\s*include\\s*\"(.*)\"')\n\ndef git():\n return subprocess.check_output(['git', 'log', '-1', '--oneline']).strip()\n\nout = open(output, 'w')\nout.write(\"/*\\n\")\nout.write(\" * This is file is generated by \")\nout.write(\"'scripts/generate-single-header.py' script\\n\")\nout.write(\" * Date: {}\\n\".format(datetime.datetime.now()))\nout.write(\" * Git: {}\\n\".format(git()))\nout.write(\" */\\n\\n\")\nout.write(\"#define TRAIN_HEADER_ONLY 1\\n\")\n\nseen = []\n\ndef inc_open(path):\n for dir in includes:\n p = dir + path\n if os.path.exists(p):\n return open(p, 'r')\n\n print('File {} is not found!'.format(path))\n raise\n\ndef parse(path, depth = 0):\n\n print('-' * (depth * 4) + ' ' + path)\n\n seen.append(path)\n\n for line in inc_open(path):\n match = include_re.match(line)\n\n if not match:\n out.write(line.rstrip() + '\\n')\n continue\n\n header = match.group(1)\n if header in seen:\n continue\n\n out.write(\"// start {0}\\n\".format(line))\n parse(header, depth + 1)\n out.write(\"// end {0}\\n\".format(line))\n\nparse(input)\n", "sub_path": "scripts/generate-single-header.py", "file_name": "generate-single-header.py", "file_ext": "py", "file_size_in_byte": 1393, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "re.compile", "line_number": 21, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}]} +{"seq_id": "577412285", "text": "#!/usr/bin/env python\n# Copyright (c) 2019 Graphcore Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport tensorflow as tf\nimport numpy as np\nimport argparse\nimport os\nimport itertools\nimport collections\nimport time\n\nimport imagenet_categories\nfrom resnet import ResNet\nfrom tensorflow.python.ipu import config\n\n\nclass ImageClassifier(object):\n\n def __init__(self, weights_path = './weights'):\n \"\"\"Builds a TensorFlow image classifier model for Graphcore IPUs.\"\"\"\n # Set compile and device options\n cfg = config.IPUConfig()\n cfg.auto_select_ipus = 1\n cfg.configure_ipu_system()\n\n # Build a Graph that computes the predictions from the inference model.\n img_size = 224\n num_classes = 1000\n checkpoint_file = os.path.join(weights_path, '16bit-0')\n\n self.jpeg_input = tf.placeholder(tf.string)\n raw_img = tf.image.decode_jpeg(self.jpeg_input, channels=3)\n image_data = tf.cast(tf.reshape(raw_img, [1, img_size, img_size, 3]), tf.float16) / 255.0\n\n # Build model\n with tf.device('/device:IPU:0'):\n with tf.variable_scope('', use_resource=True):\n self.network = ResNet(image_data, num_classes)\n self.network.build_model()\n\n # For restoring the data\n saver = tf.train.Saver()\n self.session = tf.Session()\n # Restore weights\n saver.restore(self.session, checkpoint_file)\n\n # Try to prime with a dummy image to force the graph compilation. Note that this is\n # a temporary workaround so that the first real inference doesn't include\n # a long time building the graph.\n try:\n jpeg_file = tf.gfile.GFile('images/zebra.jpg', 'rb').read()\n self.session.run(self.network.probs, feed_dict={self.jpeg_input: jpeg_file})\n except tf.errors.NotFoundError:\n pass\n\n def classify_image(self, image_filename):\n \"\"\"Classify a single image\n\n image_filename -- A JPEG image of the appropriate size\n\n \"\"\"\n jpeg_file = tf.gfile.GFile(image_filename, 'rb').read()\n preds = self.session.run(self.network.probs, feed_dict={self.jpeg_input: jpeg_file})\n predictions = np.squeeze(preds)\n\n # Print top predictions\n top_k = predictions.argsort()[-5:][::-1]\n print(\"\\nFilename : {0}\".format(os.path.basename(image_filename)))\n for v in top_k:\n print(\"Class {0: >3}: {1} {2:1.3g}%\".format(v, imagenet_categories.labels[v], 100 * predictions[v]))\n\n def classify_images(self, image_filenames, loop):\n \"\"\"Classify multiple images\n\n image_filenames -- list of JPEG images\n loop -- if True endlessly loop over the images\n\n \"\"\"\n if loop:\n image_filenames = itertools.cycle(image_filenames)\n timings = collections.deque(maxlen=250) # keep the most recent timings\n for f in image_filenames:\n self.classify_image(f)\n timings.append(time.time())\n if len(timings) > 1:\n fps = (len(timings) - 1) / (timings[-1] - timings[0])\n print(\"\\nAverage images per second: {0:.1f}\".format(fps))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Classify images using ResNet-18')\n parser.add_argument('image', type=str, nargs='+',\n help='image file name(s) or single directory')\n parser.add_argument('--loop', action=\"store_true\",\n help=\"Endlessly loop through all the images\")\n args = parser.parse_args()\n\n # If a directory was given then get the files in it\n if len(args.image) == 1 and os.path.isdir(args.image[0]):\n image_filenames = [os.path.join(args.image[0], f) for f in\n os.listdir(args.image[0]) if not f.startswith('.')]\n else:\n image_filenames = args.image\n # Filter out non-jpeg images\n image_filenames = [f for f in image_filenames if tf.gfile.Exists(f) and\n f.lower().endswith(('.jpg', '.jpeg'))]\n\n if image_filenames:\n print(\"{0} image(s) found\".format(len(image_filenames)))\n ic = ImageClassifier()\n ic.classify_images(image_filenames, args.loop)\n else:\n print(\"No image files found.\")\n", "sub_path": "simple_applications/tensorflow/resnet18_inference/classify_images.py", "file_name": "classify_images.py", "file_ext": "py", "file_size_in_byte": 4821, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "tensorflow.python.ipu.config.IPUConfig", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorflow.python.ipu.config", "line_number": 34, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.string", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tensorflow.image.decode_jpeg", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 44, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.float16", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tensorflow.device", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 49, "usage_type": "call"}, {"api_name": "resnet.ResNet", "line_number": 50, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.gfile.GFile", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.gfile", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tensorflow.errors", "line_number": 65, "usage_type": "attribute"}, {"api_name": "tensorflow.gfile.GFile", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow.gfile", "line_number": 74, "usage_type": "attribute"}, {"api_name": "numpy.squeeze", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "imagenet_categories.labels", "line_number": 82, "usage_type": "attribute"}, {"api_name": "itertools.cycle", "line_number": 92, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 93, "usage_type": "call"}, {"api_name": "time.time", "line_number": 96, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 113, "usage_type": "call"}, {"api_name": "tensorflow.gfile.Exists", "line_number": 117, "usage_type": "call"}, {"api_name": "tensorflow.gfile", "line_number": 117, "usage_type": "attribute"}]} +{"seq_id": "484127123", "text": "from flask import Flask,session, url_for, request, redirect, abort, jsonify, render_template\nfrom BookDao import bookDao\n\napp = Flask(__name__, static_url_path='', static_folder='staticpages')\n\n\n@app.route('/')\ndef home():\n if not 'username' in session:\n return redirect(url_for('login'))\n\n return 'welcome ' + session['username'] +\\\n '
logout'\n\nc\n@app.route('/books')\ndef getAll():\n return jsonify(bookDao.getAll())\n# find By id\n\n\n@app.route('/books/')\ndef findById(ISBN):\n return jsonify(bookDao.findById(ISBN))\n\n\n@app.route('/books/searchtitle/')\ndef searchbytitle(title):\n print(title)\n return jsonify(bookDao.searchbytitle(title))\n\n# create\n# curl -X POST -i -H \"Content-Type:application/json\" -d \"{\\\"ISBN\\\":\\\"1234\\\",\\\"title\\\":\\\"test\\\", \\\"author\\\":\\\"some guy\\\", \\\"price\\\":123}\" http://127.0.0.1:5000/books\n\n\n@app.route('/books', methods=['POST'])\ndef create():\n\n if not request.json:\n abort(400)\n\n book = {\n \"ISBN\": request.json[\"ISBN\"],\n \"title\": request.json[\"title\"],\n \"author\": request.json[\"author\"],\n \"price\": request.json[\"price\"]\n }\n return jsonify(bookDao.create(book))\n\n return \"served by Create \"\n\n#update\n# curl -X PUT -i -H \"Content-Type:application/json\" -d \"{\\\"Title\\\":\\\"new Title\\\", \\\"Price\\\":999}\" -H \"content-type:application/json\" http://127.0.0.1:5000/books/1\n\n\n@app.route('/books/', methods=['PUT'])\ndef update(ISBN):\n foundBook = bookDao.findById(ISBN)\n print(foundBook)\n if foundBook == {}:\n return jsonify({}), 404\n currentBook = foundBook\n if 'title' in request.json:\n currentBook['title'] = request.json['title']\n if 'author' in request.json:\n currentBook['author'] = request.json['author']\n if 'price' in request.json:\n currentBook['price'] = request.json['price']\n bookDao.update(currentBook)\n\n return jsonify(currentBook)\n\n#delete\n# curl -X DELETE http://127.0.0.1:5000/books/1\n\n\n@app.route('/books/', methods=['DELETE'])\ndef delete(ISBN):\n bookDao.delete(ISBN)\n\n return jsonify({\"done\": True})\n\n\n@app.route('/books/display/')\ndef display(ISBN):\n book = bookDao.findById(ISBN)\n print(book)\n return render_template('displayxss.html', title=book['title'])\n\n\n@app.route('/books/baddisplay/')\ndef baddisplay(ISBN):\n book = bookDao.findById(ISBN)\n print(book)\n return ' you like '+ book['title'] + ''\n\n\n@app.route('/books/badhello')\ndef badhello():\n name = request.args.get('username')\n return ' hello ' + name + ''\n\n\n#### login urls\napp.secret_key = 'someSecrtetasdrgsadfgsdfg3ko'\n\n\n@app.route('/login')\ndef login():\n return '

login

' +\\\n ''\n\n@app.route('/processlogin')\ndef proccess_login():\n #check credentials\n #if bad redirect to login page again\n\n #else\n print(\"logging in\")\n session['username'] = \"I dunno2\"\n return redirect(url_for('home'))\n\n\n@app.route('/logout')\ndef logout():\n print(session['username'])\n session.pop('username', None)\n if 'username' in session:\n print('o no')\n print(session['username'])\n else:\n print(\"nowt here\")\n #session['username'] ='3434'\n #print(session['username'])\n\n #return 'done'\n return redirect(url_for('home'))\n\n\n@app.route('/secure')\ndef getData():\n if not 'username' in session:\n abort(401)\n\n print(session['username'])\n return '{\"data\":\"Top Secret stuff\"}'\n\n\n\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n", "sub_path": "semester2/ABStuff-NotMine/week02-vunerabilities/server1-improper-inputs.py", "file_name": "server1-improper-inputs.py", "file_ext": "py", "file_size_in_byte": 3729, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 9, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 12, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 18, "usage_type": "call"}, {"api_name": "BookDao.bookDao.getAll", "line_number": 18, "usage_type": "call"}, {"api_name": "BookDao.bookDao", "line_number": 18, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 24, "usage_type": "call"}, {"api_name": "BookDao.bookDao.findById", "line_number": 24, "usage_type": "call"}, {"api_name": "BookDao.bookDao", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 30, "usage_type": "call"}, {"api_name": "BookDao.bookDao.searchbytitle", "line_number": 30, "usage_type": "call"}, {"api_name": "BookDao.bookDao", "line_number": 30, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 39, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 43, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 44, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 44, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 45, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 45, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 46, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 48, "usage_type": "call"}, {"api_name": "BookDao.bookDao.create", "line_number": 48, "usage_type": "call"}, {"api_name": "BookDao.bookDao", "line_number": 48, "usage_type": "name"}, {"api_name": "BookDao.bookDao.findById", "line_number": 58, "usage_type": "call"}, {"api_name": "BookDao.bookDao", "line_number": 58, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 63, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 65, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 65, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 66, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 67, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 68, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 68, "usage_type": "name"}, {"api_name": "BookDao.bookDao.update", "line_number": 69, "usage_type": "call"}, {"api_name": "BookDao.bookDao", "line_number": 69, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 71, "usage_type": "call"}, {"api_name": "BookDao.bookDao.delete", "line_number": 79, "usage_type": "call"}, {"api_name": "BookDao.bookDao", "line_number": 79, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 81, "usage_type": "call"}, {"api_name": "BookDao.bookDao.findById", "line_number": 86, "usage_type": "call"}, {"api_name": "BookDao.bookDao", "line_number": 86, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 88, "usage_type": "call"}, {"api_name": "BookDao.bookDao.findById", "line_number": 93, "usage_type": "call"}, {"api_name": "BookDao.bookDao", "line_number": 93, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 100, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 100, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 112, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 124, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 125, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 125, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 130, "usage_type": "name"}, {"api_name": "flask.session.pop", "line_number": 131, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 131, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 132, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 134, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 141, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 141, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 146, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 147, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 149, "usage_type": "name"}]} +{"seq_id": "346885703", "text": "\"\"\" Nudging to the Freeway environment\n\nIn this script, we apply optimal nudging to the freeway environment of OpenAI gym, available at: \nhttps://gym.openai.com/envs/Freeway-v0/\n\nFor solving an average reward problem, having a recurrent state and to generate the Bertsekas Split,\nwe modified the enviroment in the following way:\n\nState: RGB image of shape (210, 160, 3) \n\nRecurrent state: the initial state is always the state after the first 500 steps\n\nActions: 0 - no move\n 1 - one step forward\n 2 - one step backwards\n The action selected by the agent is executed with a 98% probability\n\nReward: +1.0 - if the agent crosses the street\n -1.0 - if the agent is hit by a car\n -1.0 - the agent has not crossed the street after 2000 steps\n 0.0 - otherwise\n\nThe episode ends if it meets any of the following conditions:\n * The agent has crossed the street\n * The agent was hit by a car\n * After 2000 steps, none of the two previous situations have occurred\n\nIn this script, nudging uses, in every iteration, DQN as a black box to approximate the \nvalue of the recurrent state. This value is used to update (reduce) the enclosing triangle \nand to update the gain value.\n\nWe modified the DQN implementation from Stable Baselines, available at\nhttps://github.com/hill-a/stable-baselines, to incorporate the new reward definition and \nthe recurrent state.\n\n\nThis script requires the following packages be installed within the Python \nenvironment you are running this script in.\n * numpy\n * matplotlib\n * opencv\n * gym\n\n\"\"\"\n\nimport os\n# comment if not using GPU\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1\"\nimport sys\nsys.path.append('./deepq')\nsys.path.append('./utils')\n\nfrom utils.atari_wrappers import make_atari\nfrom deepq.policies import MlpPolicy, CnnPolicy\nfrom deepq.dqn import DQN\nfrom nudge.nudge_functions import *\n\n# --------- remove extra verbosity ---------\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=RuntimeWarning) \n\nimport tensorflow as tf\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n\nos.environ['KMP_WARNINGS'] = 'off'\n# ------------------------------------------\n\n\ndef plot_rho_value(rhos, values, directory):\n \"\"\" Plots the evolution of the gain (rho) and the value of the recurrent state\n \n Args:\n rho - array with all rho values given by nudging\n values - array of all values for the recurrent state, during all interations of nudging\n directory - path where to save the .png image\n \"\"\"\n steps = np.arange(len(rhos))\n plt.subplot(1,2,1)\n plt.plot(rhos)\n plt.grid(True)\n plt.title('Gain (ρ)', color='black')\n plt.xlabel('Steps')\n plt.subplot(1,2,2)\n plt.plot(values)\n plt.grid(True)\n plt.title('Value recurrent state (sI)', color='black')\n plt.xlabel('Steps')\n plt.savefig(directory+'evol_rho_value_sI.png')\n plt.close()\n\n# path to save data\nfolder = f'./results_nudging/'\ntry:\n os.mkdir(folder)\nexcept:\n pass\n\n# create enviroment\nenv = make_atari('FreewayNoFrameskip-v0')\n\n# a bound on unsigned, unnudged reward. This bound can be obtained running q-learning in an\n# undiscounted Freeway problem and getting the value of the initial state for the optimal policy\nD = 1.25\n# Initialize enclosing triangle\nset_initial_enclosing_triangle(D)\n# alpha for getting rho via alpha-nudging\nalpha = 0.3\n# max number of iterations for nudging\nmaxItersNudging = 10\n\n\n# load undiscounted model (the same used to get D)\nvalues_s0 = []\nrhos = []\nmodel = DQN.load(load_path=f'./models/freeway_dqn_base', env=env)\nmodel.gamma = 1.0\nmodel.learning_rate = 0.5e-4\nmodel.full_tensorboard_log = True\nmodel.tensorboard_log = folder\n\n# maximum number of steps for the black-box solver (DQN)\nmaxsteps = 800000\n\n# Get recurrent state (state_sI)\nenv.reset()\nfor _ in range(500):\n obs, _, _, _ = env.step(0)\n state_sI = obs\n\n\n\n###################### Begins Nudged Learning Algorithm ######################\nfor i in range(maxItersNudging):\n env.reset()\n print(f'************* Iteration {i} ************* ')\n\n # approximate rho evaluating over several points in the left and right uncertainty \n rho = get_r(points=1000000)\n \n # get rho as in the intersection point of the conic section of the left and right uncertainty\n # rho = get_optimal_rho_value() \n\n # uncomment the following line for doing alpha-nudging\n # rho = get_alpha_rho_value(alpha=alpha)\n\n print(f'rho = {rho}')\n\n # train DQN with new rho value\n model.learn(total_timesteps=maxsteps, directory=folder, iteration=i, rho=rho, log_interval=100)\n # save model\n model.save(f'{folder}/freeway_opt_nudge_{i}')\n\n # get q_values for recurrent state\n _, q_values = model.predict(state_sI) \n # get value of recurrent state\n v_k = max(q_values[0])\n print(f'Value s0 = {v_k}')\n\n values_s0.append(v_k)\n rhos.append(rho) \n\n # update the enclosing triangle vertices, given the new value of the recurrent state\n exit_code, m = update_enclosing_triangle(rho, v_k, i, folder)\n print(f'exit code {exit_code}, m={m}')\n if exit_code==4 or exit_code==-1:\n break \n\n# plot the evolution of rho and the valur of sI, during nudging \nplot_rho_value(rhos, values_s0, folder)\n\n# save records for rho and value of sI\nnp.save(f'{folder}summary_rhos.npy',rhos)\nnp.save(f'{folder}summary_values_sI.npy',values_s0)\n \n\n\n", "sub_path": "Freeway/train_freeway_nudging.py", "file_name": "train_freeway_nudging.py", "file_ext": "py", "file_size_in_byte": 5425, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "os.environ", "line_number": 48, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 50, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 51, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "warnings.filterwarnings", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.logging.set_verbosity", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 65, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 94, "usage_type": "call"}, {"api_name": "utils.atari_wrappers.make_atari", "line_number": 99, "usage_type": "call"}, {"api_name": "deepq.dqn.DQN.load", "line_number": 115, "usage_type": "call"}, {"api_name": "deepq.dqn.DQN", "line_number": 115, "usage_type": "name"}]} +{"seq_id": "558461026", "text": "# main.py\nfrom gevent.wsgi import WSGIServer\nfrom flask import Flask, render_template, stream_with_context, Response\nfrom camera import VideoCamera\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n\treturn render_template('index.html')\n\ndef feedStream(camera):\n\t\"\"\"\n\tfeedStream streams camera images (frames)\n\t\"\"\"\n\twhile True:\n\t\tframe = camera.get_frame()\n\t\tyield (b'--frame\\r\\n'\n b'Content-Type: video/mjpg\\r\\n\\r\\n' + frame + b'\\r\\n\\r\\n')\n\n@app.route('/video_feed')\ndef video_feed():\n\t\"\"\"\n\tyou could add algorithm above this portion to prevent users\n\twho shouldn't view the video from viewing it. \n\tThis could also have algos for managing expired streams.\n\t\"\"\"\n\treturn Response(feedStream(VideoCamera()),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n\nif __name__ == '__main__':\n\thttp_server = WSGIServer(('0.0.0.0', 5001), app)\n\thttp_server.serve_forever()", "sub_path": "CamStrmmer/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 903, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 10, "usage_type": "call"}, {"api_name": "camera.get_frame", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 28, "usage_type": "call"}, {"api_name": "camera.VideoCamera", "line_number": 28, "usage_type": "call"}, {"api_name": "gevent.wsgi.WSGIServer", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "493199783", "text": "from flask import Blueprint, render_template, request\nfrom flask_login import login_required\nfrom datetime import date\nfrom ..model import *\nfrom ..patrol import recond\n\npatrol = Blueprint('patrol', __name__,\n url_prefix='/patrol',\n static_folder='static',\n template_folder='templates')\n\n\n@patrol.route('/primary', methods=['GET', 'POST'])\ndef primary():\n if request.method == 'POST':\n select_date = request.values.get('select_date')\n period = Period.query.filter_by(period_name='morning').first()\n return render_template('primary.html', myDate=select_date)\n else:\n today = date.today()\n return render_template('primary.html', myDate=today)\n\n\n@patrol.route('/distributor')\ndef distributor():\n return render_template('distributor.html')\n\n\n@patrol.route('/station')\ndef station():\n return render_template('station.html')\n", "sub_path": "app/patrol/routes.py", "file_name": "routes.py", "file_ext": "py", "file_size_in_byte": 920, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "flask.Blueprint", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 16, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 21, "usage_type": "call"}, {"api_name": "patrol.route", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 26, "usage_type": "call"}, {"api_name": "patrol.route", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 31, "usage_type": "call"}, {"api_name": "patrol.route", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "443354585", "text": "#!/usr/bin/env python\n\nimport json\nimport argparse\nimport subprocess\n\n\ndef main():\n inventory = {'all': {'hosts': [], 'vars': {'ansible_user': 'root'}}}\n hostvars = {}\n\n inventory['client'] = client(hostvars)\n inventory['server'] = server(hostvars)\n inventory['monitor'] = monitor(hostvars)\n\n for type in ['client', 'server', 'monitor']:\n for host in inventory[type]['hosts']:\n inventory['all']['hosts'].append(host)\n\n # noqa https://github.com/ansible/ansible/commit/bcaa983c2f3ab684dca6c2c2c8d1997742260761\n inventory['_meta'] = {'hostvars': hostvars}\n\n parser = argparse.ArgumentParser(description=\"DO droplet inventory\")\n parser.add_argument('--list', action='store_true',\n help=\"List DO droplet inventory\")\n parser.add_argument('--host', help='List details of a droplet')\n args = parser.parse_args()\n\n if args.list:\n print(json.dumps(inventory))\n elif args.host:\n print(json.dumps(hostvars.get(args.host, {})))\n\n\ndef client(hostvars):\n client = {'hosts': []}\n\n for i in range(int(get_scalar_value(\"client_count\"))):\n name = \"client%d\" % i\n client['hosts'].append(name)\n\n # Get the public IPv4 for the droplet reachability.\n address = get_array_value(\"client_public_ipv4\", i)\n hostvars[name] = {'ansible_host': address, 'server': {}}\n\n # Setup the server related variables.\n hostvars[name]['server']['ipv4'] = get_array_value(\"server_public_ipv4\", i)\n hostvars[name]['server']['ipv4_private'] = get_array_value(\"server_private_ipv4\", i)\n hostvars[name]['server']['ipv6'] = get_array_value(\"server_public_ipv6\", i)\n hostvars[name]['server']['flip'] = get_array_value(\"server_flip\", i)\n hostvars[name]['server']['port'] = get_scalar_value(\"server_port\")\n\n return client\n\n\ndef server(hostvars):\n server = {'hosts': []}\n\n for i in range(int(get_scalar_value(\"server_count\"))):\n name = \"server%d\" % i\n server['hosts'].append(name)\n\n # Get the public IPv4 for the droplet reachability.\n address = get_array_value(\"server_public_ipv4\", i)\n hostvars[name] = {'ansible_host': address, 'server': {}}\n\n # Setup the server related variables.\n port = get_scalar_value(\"server_port\")\n hostvars[name]['server']['port'] = port\n\n return server\n\n\ndef monitor(hostvars):\n monitor = {'hosts': []}\n\n for i in range(int(get_scalar_value(\"monitor_count\"))):\n name = \"monitor%d\" % i\n monitor['hosts'].append(name)\n\n # Get the public IPv4 for the droplet reachability.\n address = get_array_value(\"monitor_public_ipv4\", i)\n hostvars[name] = {'ansible_host': address}\n\n return monitor\n\n\ndef get_scalar_value(value):\n return subprocess.Popen(\"terraform output %s\" % value,\n shell=True, stdout=subprocess.PIPE\n ).stdout.read().decode('utf-8').strip('\\n').replace('\"', '')\n\n\ndef get_array_value(value, index):\n return subprocess.Popen(\"terraform output -json %s|jq '.value[%d]'\" % (value, index),\n shell=True, stdout=subprocess.PIPE\n ).stdout.read().decode('utf-8').strip('\\n').replace('\"', '')\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "firewalls/both/000-deny-all/inventory.py", "file_name": "inventory.py", "file_ext": "py", "file_size_in_byte": 3315, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 23, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 30, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 32, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 89, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 90, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 95, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 96, "usage_type": "attribute"}]} +{"seq_id": "503695608", "text": "# @Author : Fizzyi\nimport pygame\nfrom bullet import Bullet\nfrom alien import Alien\nfrom random import randint\nfrom time import sleep\ndef check_events(ai_settings,screen,stats,play_button,ship,aliens,bullets):\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n exit()\n elif event.type == pygame.KEYDOWN:\n check_keydown_event(event,ai_settings,screen,ship,bullets)\n elif event.type == pygame.KEYUP:\n check_keyup_event(event,ship)\n elif event.type == pygame.MOUSEBUTTONDOWN:\n #mouse.get-pos()获取鼠标点击的坐标\n mouse_x,mouse_y = pygame.mouse.get_pos()\n check_play_button(ai_settings,screen,stats,play_button,ship,aliens,bullets,mouse_x,mouse_y)\n\n\ndef check_keyup_event(event,ship):\n if event.key == pygame.K_RIGHT:\n ship.moving_right = False\n elif event.key == pygame.K_LEFT:\n ship.moving_left = False\n elif event.key == pygame.K_UP:\n ship.moving_up = False\n elif event.key == pygame.K_DOWN:\n ship.moving_down = False\ndef check_keydown_event(event,ai_settings,screen,ship,bullets):\n if event.key == pygame.K_RIGHT:\n ship.moving_right = True\n elif event.key == pygame.K_LEFT:\n ship.moving_left = True\n elif event.key == pygame.K_UP:\n ship.moving_up = True\n elif event.key == pygame.K_DOWN:\n ship.moving_down = True\n\ndef add_bullets(ai_settings,screen,ship,bullets):\n new_bullet = Bullet(ai_settings, screen, ship)\n bullets.add(new_bullet)\n\ndef update_screen(ai_settings,screen,stats,sb,ship,aliens,bg,bullets,play_button):\n screen.blit(bg, (0, 0))\n #显示得分\n sb.show_score()\n for bullet in bullets.sprites():\n bullet.blit_bullet()\n ship.blitme()\n aliens.draw(screen)\n if not stats.game_active:\n play_button.draw_button()\n\n pygame.display.flip()\n\ndef update_bullets(ai_settings,screen,stats,sb,ship,aliens,bullets):\n # 删除消失的子弹\n for bullet in bullets.copy():\n if bullet.rect.bottom <= 0:\n bullets.remove(bullet)\n check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, aliens, bullets)\n\ndef create_fleet(ai_settings,screen,ship,aliens):\n '''创建外星人群'''\n #确定每行能创建多少个外星人\n alien = Alien(ai_settings,screen)\n number_aliens_x = get_number_aliens_x(ai_settings,alien.rect.width)\n # #创建第一行外星人\n alien_number = randint(0,number_aliens_x)\n #创建一个外星人并且将其加入当前行\n create_alien(ai_settings,screen,aliens,alien_number)\ndef get_number_aliens_x(ai_settings,alien_width):\n #计算每行可以容纳多少个外星人\n available_space_x = ai_settings.scrren_width - 2 * alien_width\n number_alien_x = int(available_space_x / ( 1.5 * alien_width))\n return number_alien_x\ndef create_alien(ai_settings,screen,aliens,alien_number):\n #创建外星人\n alien = Alien(ai_settings, screen)\n alien_width = alien.rect.width\n alien.x = alien_width + 1.5 * alien_width * alien_number\n alien.rect.x = alien.x\n alien.rect.y = 0\n aliens.add(alien)\n\ndef update_aliens(ai_settings,stats,screen,ship,aliens,bullets):\n #更新外星人的位置\n aliens.update()\n if pygame.sprite.spritecollideany(ship,aliens):\n ship_hit(ai_settings,stats,screen,ship,aliens,bullets)\n check_alien_bottom(ai_settings,stats,screen,ship,aliens,bullets)\ndef ship_hit(ai_settings,stats,screen,ship,aliens,bullets):\n '''响应被外星人撞击的飞船'''\n if stats.ships_left > 0:\n # 将ships_left减1\n stats.ships_left -= 1\n #清空外星人列表和子弹列表\n aliens.empty()\n bullets.empty()\n #暂停\n sleep(0.5)\n else:\n stats.game_active = False\ndef check_alien_bottom(ai_settings,stats,screen,ship,aliens,bullets):\n '''检查是否有外星人到达屏幕底端'''\n screen_rect = screen.get_rect()\n for alien in aliens.sprites():\n if alien.rect.bottom >= screen_rect.bottom:\n ship_hit(ai_settings,stats,screen,ship,aliens,bullets)\n break\ndef check_play_button(ai_settings,screen,stats,play_button,ship,aliens,bullets,mouse_x,mouse_y):\n #玩家单击play按钮时开始新游戏\n '''使用collidepoint函数传入鼠标点击的坐标可以得到坐标是否在button内'''\n button_clicked = play_button.rect.collidepoint(mouse_x,mouse_y)\n if button_clicked and not stats.game_active:\n #重置游戏统计信息\n stats.reset_stats()\n stats.game_active = True\n\n aliens.empty()\n bullets.empty()\ndef check_bullet_alien_collisions(ai_settings,screen,stats,sb,ship,aliens,bullets):\n '''响应子弹和外星人发生碰撞'''\n #删除发生碰撞的子弹和外星人\n collisions = pygame.sprite.groupcollide(bullets,aliens,True,True)\n\n if collisions:\n for aliens in collisions.values():\n stats.score += ai_settings.alien_points * len(aliens)\n sb.prep_score()\n check_high_score(stats,sb)\ndef check_high_score(stats,sb):\n '''检查是否诞生了最高分'''\n if stats.score > stats.high_score:\n stats.high_score = stats.score\n sb.prep_high_score()\nif __name__ == '__main__':\n pass\n", "sub_path": "Python_homework/pygame_aircraft_battle1.0/game_functions.py", "file_name": "game_functions.py", "file_ext": "py", "file_size_in_byte": 5301, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pygame.event.get", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 38, "usage_type": "attribute"}, {"api_name": "bullet.Bullet", "line_number": 42, "usage_type": "call"}, {"api_name": "bullet.blit_bullet", "line_number": 50, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 56, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 56, "usage_type": "attribute"}, {"api_name": "bullet.rect", "line_number": 61, "usage_type": "attribute"}, {"api_name": "alien.Alien", "line_number": 68, "usage_type": "call"}, {"api_name": "alien.rect", "line_number": 69, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 71, "usage_type": "call"}, {"api_name": "alien.Alien", "line_number": 81, "usage_type": "call"}, {"api_name": "alien.rect", "line_number": 82, "usage_type": "attribute"}, {"api_name": "alien.x", "line_number": 83, "usage_type": "attribute"}, {"api_name": "alien.rect", "line_number": 84, "usage_type": "attribute"}, {"api_name": "alien.x", "line_number": 84, "usage_type": "attribute"}, {"api_name": "alien.rect", "line_number": 85, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollideany", "line_number": 91, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 91, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 103, "usage_type": "call"}, {"api_name": "alien.rect", "line_number": 110, "usage_type": "attribute"}, {"api_name": "pygame.sprite.groupcollide", "line_number": 127, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 127, "usage_type": "attribute"}]} +{"seq_id": "595063814", "text": "import autograd.numpy as np\nfrom autograd import jacobian\nfrom autograd.tracer import getval\n\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\n\nfrom trajopt.envs.quanser.qube.base import QubeBase, QubeDynamics, ActionLimiter\n\n\nclass Qube(QubeBase):\n def __init__(self, fs, fs_ctrl):\n super(Qube, self).__init__(fs, fs_ctrl)\n self.dyn = QubeDynamics()\n self._sim_state = None\n self._vis = {'vp': None, 'arm': None, 'pole': None, 'curve': None}\n\n def _set_gui(self):\n scene_range = 0.2\n arm_radius = 0.003\n arm_length = 0.085\n pole_radius = 0.0045\n pole_length = 0.129\n # http://www.glowscript.org/docs/VPythonDocs/canvas.html\n self._vis['vp'].scene.width = 400\n self._vis['vp'].scene.height = 300\n self._vis['vp'].scene.background = self._vis['vp'].color.gray(0.95)\n self._vis['vp'].scene.lights = []\n self._vis['vp'].distant_light(\n direction=self._vis['vp'].vector(0.2, 0.2, 0.5),\n color=self._vis['vp'].color.white)\n self._vis['vp'].scene.up = self._vis['vp'].vector(0, 0, 1)\n self._vis['vp'].scene.range = scene_range\n self._vis['vp'].scene.center = self._vis['vp'].vector(0.04, 0, 0)\n self._vis['vp'].scene.forward = self._vis['vp'].vector(-2, 1.2, -1)\n self._vis['vp'].box(pos=self._vis['vp'].vector(0, 0, -0.07),\n length=0.09, width=0.1, height=0.09,\n color=self._vis['vp'].color.gray(0.5))\n self._vis['vp'].cylinder(\n axis=self._vis['vp'].vector(0, 0, -1), radius=0.005,\n length=0.03, color=self._vis['vp'].color.gray(0.5))\n # Arm\n arm = self._vis['vp'].cylinder()\n arm.radius = arm_radius\n arm.length = arm_length\n arm.color = self._vis['vp'].color.blue\n # Pole\n pole = self._vis['vp'].cylinder()\n pole.radius = pole_radius\n pole.length = pole_length\n pole.color = self._vis['vp'].color.red\n # Curve\n curve = self._vis['vp'].curve(color=self._vis['vp'].color.white,\n radius=0.0005, retain=2000)\n return arm, pole, curve\n\n def _calibrate(self):\n _low, _high = np.array([-0.1, - np.pi / 36., -0.1, -0.1]),\\\n np.array([0.1, np.pi / 36., 0.1, 0.1])\n self._sim_state = self._np_random.uniform(low=_low, high=_high)\n self._state = self._zero_sim_step()\n\n def _sim_step(self, u):\n # add action noise\n u = u + np.random.randn(1) * 1e-2\n\n u_cmd = self._lim_act(self._sim_state, u)\n # u_cmd = np.clip(u, self.action_space.low, self.action_space.high)\n\n thdd, aldd = self.dyn(self._sim_state, u_cmd)\n\n # Update internal simulation state\n self._sim_state[3] += self.timing.dt * aldd\n self._sim_state[2] += self.timing.dt * thdd\n self._sim_state[1] += self.timing.dt * self._sim_state[3]\n self._sim_state[0] += self.timing.dt * self._sim_state[2]\n\n # apply state constraints\n self._sim_state = np.clip(self._sim_state, self.state_space.low, self.state_space.high)\n\n # add observation noise\n self._sim_state = self._sim_state + np.random.randn(4) * 1e-4\n\n return self._sim_state, u\n\n def reset(self):\n self._calibrate()\n if self._vis['curve'] is not None:\n self._vis['curve'].clear()\n return self.step(np.array([0.0]))[0]\n\n def render(self, mode='human'):\n if self._vis['vp'] is None:\n import importlib\n self._vis['vp'] = importlib.import_module('vpython')\n self._vis['arm'],\\\n self._vis['pole'],\\\n self._vis['curve'] = self._set_gui()\n th, al, _, _ = self._state\n arm_pos = (self.dyn.Lr * np.cos(th), self.dyn.Lr * np.sin(th), 0.0)\n pole_ax = (-self.dyn.Lp * np.sin(al) * np.sin(th),\n self.dyn.Lp * np.sin(al) * np.cos(th),\n self.dyn.Lp * np.cos(al))\n self._vis['arm'].axis = self._vis['vp'].vector(*arm_pos)\n self._vis['pole'].pos = self._vis['vp'].vector(*arm_pos)\n self._vis['pole'].axis = self._vis['vp'].vector(*pole_ax)\n self._vis['curve'].append(\n self._vis['pole'].pos + self._vis['pole'].axis)\n self._vis['vp'].rate(self.timing.render_rate)\n\n\nclass QubeTO(gym.Env):\n\n def __init__(self):\n self.dm_state = 4\n self.dm_act = 1\n\n self._dt = 0.01\n\n self._sigma = 1e-8 * np.eye(self.dm_state)\n\n self.dyn = QubeDynamics()\n\n # g = [th, al, thd, ald]\n self._g = np.array([0., np.pi, 0., 0.])\n self._gw = np.array([1e-1, 1e0, 1e-2, 1e-3])\n\n # x = [x, th, dx, dth]\n self._xmax = np.array([2.3, np.inf, 30., 40.])\n self.observation_space = spaces.Box(low=-self._xmax,\n high=self._xmax)\n\n self._uw = np.array([1e-3])\n self._umax = 10.0\n self.action_space = spaces.Box(low=-self._umax,\n high=self._umax, shape=(1,))\n\n safety_th_lim = 1.5\n self._lim_act = ActionLimiter(self.observation_space,\n self.action_space,\n safety_th_lim)\n\n self.state = None\n self.np_random = None\n\n self.seed()\n\n _low, _high = np.array([-0.1, - np.pi / 18., -0.1, -0.1]),\\\n np.array([0.1, np.pi / 18., 0.1, 0.1])\n self._x0 = self.np_random.uniform(low=_low, high=_high)\n self._sigma_0 = 1e-4 * np.eye(self.dm_state)\n\n @property\n def xlim(self):\n return self._xmax\n\n @property\n def ulim(self):\n return self._umax\n\n @property\n def dt(self):\n return self._dt\n\n @property\n def goal(self):\n return self._g\n\n def init(self):\n return self._x0, self._sigma_0\n\n def dynamics(self, x, u):\n _u = self._lim_act(x, u)\n # _u = np.clip(u, -self._umax, self._umax)\n\n def f(x, u):\n thdd, aldd = self.dyn(x, u)\n return np.hstack((x[2], x[3], thdd, aldd))\n\n k1 = f(x, _u)\n k2 = f(x + 0.5 * self.dt * k1, _u)\n k3 = f(x + 0.5 * self.dt * k2, _u)\n k4 = f(x + self.dt * k3, _u)\n\n xn = x + self.dt / 6. * (k1 + 2. * k2 + 2. * k3 + k4)\n\n xn = np.clip(xn, -self._xmax, self._xmax)\n return xn\n\n def features(self, x):\n return x\n\n def features_jacobian(self, x):\n _J = jacobian(self.features, 0)\n _j = self.features(x) - _J(x) @ x\n return _J, _j\n\n def noise(self, x=None, u=None):\n _u = np.clip(u, -self._umax, self._umax)\n _x = np.clip(x, -self._xmax, self._xmax)\n return self._sigma\n\n def cost(self, x, u, a):\n _J, _j = self.features_jacobian(getval(x))\n _x = _J(getval(x)) @ x + _j\n return a * (_x - self._g).T @ np.diag(self._gw) @ (_x - self._g) + u.T @ np.diag(self._uw) @ u\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def step(self, u):\n # state-action dependent noise\n _sigma = self.noise(self.state, u)\n # evolve deterministic dynamics\n self.state = self.dynamics(self.state, u)\n # add noise\n self.state = self.np_random.multivariate_normal(mean=self.state, cov=_sigma)\n return self.state, [], False, {}\n\n def reset(self):\n _mu_0, _sigma_0 = self.init()\n self.state = self.np_random.multivariate_normal(mean=_mu_0, cov=_sigma_0)\n return self.state\n\n\nclass QubeTOWithCartesianCost(QubeTO):\n\n def __init__(self):\n super(QubeTOWithCartesianCost, self).__init__()\n\n # g = [th, cs_al, sn_al, dth, dal]\n self._g = np.array([0., -1., 0., 0., 0.])\n self._gw = np.array([1e-1, 1e0, 0., 1e-2, 1e-3])\n\n def features(self, x):\n return np.array([x[0],\n np.cos(x[1]), np.sin(x[1]),\n x[2], x[3]])\n", "sub_path": "trajopt/envs/quanser/qube/qube.py", "file_name": "qube.py", "file_ext": "py", "file_size_in_byte": 8080, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "trajopt.envs.quanser.qube.base.QubeBase", "line_number": 12, "usage_type": "name"}, {"api_name": "trajopt.envs.quanser.qube.base.QubeDynamics", "line_number": 15, "usage_type": "call"}, {"api_name": "autograd.numpy.array", "line_number": 59, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 59, "usage_type": "name"}, {"api_name": "autograd.numpy.pi", "line_number": 59, "usage_type": "attribute"}, {"api_name": "autograd.numpy.array", "line_number": 60, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 60, "usage_type": "name"}, {"api_name": "autograd.numpy.pi", "line_number": 60, "usage_type": "attribute"}, {"api_name": "autograd.numpy.random.randn", "line_number": 66, "usage_type": "call"}, {"api_name": "autograd.numpy.random", "line_number": 66, "usage_type": "attribute"}, {"api_name": "autograd.numpy", "line_number": 66, "usage_type": "name"}, {"api_name": "autograd.numpy.clip", "line_number": 80, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 80, "usage_type": "name"}, {"api_name": "autograd.numpy.random.randn", "line_number": 83, "usage_type": "call"}, {"api_name": "autograd.numpy.random", "line_number": 83, "usage_type": "attribute"}, {"api_name": "autograd.numpy", "line_number": 83, "usage_type": "name"}, {"api_name": "autograd.numpy.array", "line_number": 91, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 91, "usage_type": "name"}, {"api_name": "importlib.import_module", "line_number": 96, "usage_type": "call"}, {"api_name": "autograd.numpy.cos", "line_number": 101, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 101, "usage_type": "name"}, {"api_name": "autograd.numpy.sin", "line_number": 101, "usage_type": "call"}, {"api_name": "autograd.numpy.sin", "line_number": 102, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 102, "usage_type": "name"}, {"api_name": "autograd.numpy.sin", "line_number": 103, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 103, "usage_type": "name"}, {"api_name": "autograd.numpy.cos", "line_number": 103, "usage_type": "call"}, {"api_name": "autograd.numpy.cos", "line_number": 104, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 104, "usage_type": "name"}, {"api_name": "gym.Env", "line_number": 113, "usage_type": "attribute"}, {"api_name": "autograd.numpy.eye", "line_number": 121, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 121, "usage_type": "name"}, {"api_name": "trajopt.envs.quanser.qube.base.QubeDynamics", "line_number": 123, "usage_type": "call"}, {"api_name": "autograd.numpy.array", "line_number": 126, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 126, "usage_type": "name"}, {"api_name": "autograd.numpy.pi", "line_number": 126, "usage_type": "attribute"}, {"api_name": "autograd.numpy.array", "line_number": 127, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 127, "usage_type": "name"}, {"api_name": "autograd.numpy.array", "line_number": 130, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 130, "usage_type": "name"}, {"api_name": "autograd.numpy.inf", "line_number": 130, "usage_type": "attribute"}, {"api_name": "gym.spaces.Box", "line_number": 131, "usage_type": "call"}, {"api_name": "gym.spaces", "line_number": 131, "usage_type": "name"}, {"api_name": "autograd.numpy.array", "line_number": 134, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 134, "usage_type": "name"}, {"api_name": "gym.spaces.Box", "line_number": 136, "usage_type": "call"}, {"api_name": "gym.spaces", "line_number": 136, "usage_type": "name"}, {"api_name": "trajopt.envs.quanser.qube.base.ActionLimiter", "line_number": 140, "usage_type": "call"}, {"api_name": "autograd.numpy.array", "line_number": 149, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 149, "usage_type": "name"}, {"api_name": "autograd.numpy.pi", "line_number": 149, "usage_type": "attribute"}, {"api_name": "autograd.numpy.array", "line_number": 150, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 150, "usage_type": "name"}, {"api_name": "autograd.numpy.pi", "line_number": 150, "usage_type": "attribute"}, {"api_name": "autograd.numpy.eye", "line_number": 152, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 152, "usage_type": "name"}, {"api_name": "autograd.numpy.hstack", "line_number": 179, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 179, "usage_type": "name"}, {"api_name": "autograd.numpy.clip", "line_number": 188, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 188, "usage_type": "name"}, {"api_name": "autograd.jacobian", "line_number": 195, "usage_type": "call"}, {"api_name": "autograd.numpy.clip", "line_number": 200, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 200, "usage_type": "name"}, {"api_name": "autograd.numpy.clip", "line_number": 201, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 201, "usage_type": "name"}, {"api_name": "autograd.tracer.getval", "line_number": 205, "usage_type": "call"}, {"api_name": "autograd.tracer.getval", "line_number": 206, "usage_type": "call"}, {"api_name": "autograd.numpy.diag", "line_number": 207, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 207, "usage_type": "name"}, {"api_name": "gym.utils.seeding.np_random", "line_number": 210, "usage_type": "call"}, {"api_name": "gym.utils.seeding", "line_number": 210, "usage_type": "name"}, {"api_name": "autograd.numpy.array", "line_number": 234, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 234, "usage_type": "name"}, {"api_name": "autograd.numpy.array", "line_number": 235, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 235, "usage_type": "name"}, {"api_name": "autograd.numpy.array", "line_number": 238, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 238, "usage_type": "name"}, {"api_name": "autograd.numpy.cos", "line_number": 239, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 239, "usage_type": "name"}, {"api_name": "autograd.numpy.sin", "line_number": 239, "usage_type": "call"}]} +{"seq_id": "113578427", "text": "# ## Step 2 - Climate App\n\n# Now that you have completed your initial analysis, design a Flask API based on the queries that you have just developed.\n\n# * Use FLASK to create your routes.\n\n#Import of Dependencys\nimport numpy as np\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\nfrom flask import Flask, jsonify\n\nimport datetime as dt\nfrom datetime import date\nfrom dateutil.relativedelta import relativedelta\n\n# Database Setup\nengine = create_engine(\"sqlite:///Instructions/Resources/hawaii.sqlite\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save reference to the table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n# Create our session (link) from Python to the DB\nsession = Session(engine)\n\n# Flask Setup\napp = Flask(__name__)\n\n# Flask Routes\n\n# * Home page.\n\n# * List all routes that are available.\n\n@app.route(\"/\")\ndef route_available():\n \"\"\"List all available api routes.\"\"\"\n return (\n f\"Available Routes:
\"\n f\"/api/v1.0/precipitation
\"\n f\"/api/v1.0/stations
\"\n f\"/api/v1.0/tobs
\"\n f\"/api/v1.0/start_date
\"\n f\"/api/v1.0/start_date/end_date
\"\n f\"*****FORMAT ALL DATES: yyyy-mm-dd*****\"\n )\n# `/api/v1.0/precipitation`\n\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n \"\"\"Return a Json of Precipitation Dates and actual precipitation\"\"\"\n # Convert the query results to a Dictionary using `date` as the key and `prcp` as the value.\n results = session.query(Measurement.date, Measurement.prcp).all()\n\n # Return the JSON representation of your dictionary.\n all_precipitation = []\n for date, prcp in results:\n precipitation_dict = {}\n precipitation_dict[\"date\"] = date\n precipitation_dict[\"prcp\"] = prcp\n all_precipitation.append(precipitation_dict)\n\n return jsonify(all_precipitation)\n\n\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n \"\"\"Return a list of JSON list of stations from the dataset.\"\"\"\n # Return a JSON list of stations from the dataset.\n results = session.query(Station.name, Station.station).all()\n\n # Convert list of tuples into normal list\n all_stations = list(np.ravel(results))\n\n return jsonify(all_stations)\n\n# `/api/v1.0/tobs`\n\n@app.route(\"/api/v1.0/tobs\")\ndef tobs():\n \"\"\"Return a list of JSON list of stations from the dataset.\"\"\"\n # Query for the dates and temperature observations from a year from the last data point.\n\n last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first().date\n\n current_date = dt.datetime.strptime(last_date,'%Y-%m-%d').date()\n prior_year = dt.datetime.strptime(last_date, '%Y-%m-%d').date() + relativedelta(years=-1)\n\n results = session.query(session.query(Measurement.date, Measurement.prcp)).filter(Measurement.date >= prior_year).all()\n\n # Convert list of tuples into normal list\n all_tobs = list(np.ravel(results))\n # Return a JSON list of Temperature Observations (tobs) for the previous year.\n return jsonify(all_tobs)\n\n\n\n## * Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.\n\n# * When given the start only, calculate `TMIN`, `TAVG`, and `TMAX` for all dates greater than and equal to the start date.\n\n@app.route(\"/api/v1.0/\")\ndef start_date(start):\n \"\"\"Return a list of JSON list of stations from the dataset.\"\"\"\n start_date = '2011-02-28'\n end_date = '2011-03-05'\n results_start =session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).group_by(Measurement.date).all()\n \n all_tobs = []\n for DATE, TMIN, TAVG, TMAX in results_start:\n tobs_dict={}\n tobs_dict[\"DATE\"]= DATE\n tobs_dict[\"TMIN\"]= TMIN\n tobs_dict[\"TAVG\"]= TAVG\n tobs_dict[\"TMAX\"]= TMAX\n all_tobs.append(tobs_dict)\n return jsonify(all_tobs)\n\n# * When given the start and the end date, calculate the `TMIN`, `TAVG`, and `TMAX` for dates between the start and end date inclusive.\n\n@app.route(\"/api/v1.0//\")\ndef date(start,end):\n results_start_end =session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).filter(Measurement.date <=end).group_by(Measurement.date).all()\n \n all_tobs = []\n for DATE,TMIN, TAVG, TMAX in results_start_end:\n tobs_dict={}\n tobs_dict[\"DATE\"]= DATE\n tobs_dict[\"TMIN\"]= TMIN\n tobs_dict[\"TAVG\"]= TAVG\n tobs_dict[\"TMAX\"]= TMAX\n all_tobs.append(tobs_dict)\n return jsonify(all_tobs)\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "sub_path": "hw.py", "file_name": "hw.py", "file_ext": "py", "file_size_in_byte": 4944, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.automap.automap_base", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 66, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 68, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 84, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 95, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 95, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 96, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 96, "usage_type": "attribute"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 101, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 103, "usage_type": "call"}, {"api_name": "sqlalchemy.func.min", "line_number": 116, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 116, "usage_type": "name"}, {"api_name": "sqlalchemy.func.avg", "line_number": 116, "usage_type": "call"}, {"api_name": "sqlalchemy.func.max", "line_number": 116, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 127, "usage_type": "call"}, {"api_name": "sqlalchemy.func.min", "line_number": 133, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 133, "usage_type": "name"}, {"api_name": "sqlalchemy.func.avg", "line_number": 133, "usage_type": "call"}, {"api_name": "sqlalchemy.func.max", "line_number": 133, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 144, "usage_type": "call"}]} +{"seq_id": "420985628", "text": "\nimport os\nimport time\nimport resource\nimport copy\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.lines as mlines\nfrom matplotlib.collections import LineCollection\nfrom matplotlib.colors import ListedColormap, BoundaryNorm\nfrom tensorflow.keras.callbacks import Callback\nfrom tensorflow.keras import backend\nfrom tensorflow.keras import Model\n\nfrom lib.classes.dataset_classes.SubjectDataset import SubjectDataset\n\nclass BabyTrainingCallback(Callback):\n '''\n BabyTrainingCallback: Custom callback for recording data during training. Built for version-3 code base\n\n Types of figures to create:\n • png and eps for every figure\n • Filter response for every week\n •Necessary data:\n •Full dataset\n •Filter response for each week\n • Dual figure of coefficient vs time and predicted label vs target label\n •Necessary data:\n •Filters and bias\n •Loss\n •Target label\n •Predicted label\n •Feature names and their indices relative to filters\n\n Saving cycle:\n • Checkpoint the model every ? epochs, with its epoch number and loss value stored\n • Save figures at the end of training only\n\n Wishlist:\n •Automatically pull a zoom of this best responses? That's too much to ask I think\n '''\n\n def __init__(self, master_config):\n self.data_config = master_config.Core_Config.Data_Config\n self.save_config = master_config.Core_Config.Save_Config\n self.model_config = master_config.Core_Config.Model_Config\n\n self.child_name = master_config.child_name\n\n self.iteration_parameters = master_config.iteration_parameters\n\n self.subject = SubjectDataset(master_config=master_config)\n\n self.epoch_count = 0\n self.recent_epoch_logs = 0\n self.epoch_start_time = 0\n\n def on_train_begin(self, logs):\n print(\"Training begin!\", flush=True)\n\n def on_epoch_begin(self, epoch, logs):\n\n self.epoch_start_time = time.time()\n\n def on_epoch_end(self, epoch, logs):\n\n print(\"Log keys: {}\\n\".format(logs.keys()), flush=True)\n\n self.recent_epoch_logs = self._create_log(logs)\n if self.epoch_count % self.save_config.Output.checkpoint_trigger == 0:\n self._checkpoint()\n\n self.epoch_count += 1\n\n total_epoch_time = time.time() - self.epoch_start_time\n print(\"Total epoch time: {} secs\".format(total_epoch_time), flush=True)\n\n def on_train_end(self, logs):\n\n print(\"Training finished!\", flush=True)\n\n #print(\"Input: {}\\n\\n\".format(self.recent_epoch_logs[\"input\"]), flush=True)\n #print(\"Input 0: {}\\n\\n\".format(self.recent_epoch_logs[\"split_input_0\"]), flush=True)\n #print(\"Input 1: {}\\n\\n\".format(self.recent_epoch_logs[\"split_input_1\"]), flush=True)\n\n self._checkpoint()\n\n ### Save out both types of figures we're currently using\n print(\"Saving out figures...\", flush=True)\n\n ### Save dual fig as png and eps\n self._create_dual_figs()\n\n ### Save filter responses as png and eps\n self._collect_filter_response_figs()\n\n ### Save trajectories as png and eps\n #self._collect_trajectory_figs()\n\n print(\"Done saving figures!\", flush=True)\n\n def on_test_batch_end(self, batch, logs):\n\n self.recent_epoch_logs = self._create_log(logs)\n\n self.on_train_end(logs)\n\n def _checkpoint(self):\n print(\"Checkpointing...\", flush=True)\n self._save_model()\n self._create_statistics_file()\n print(\"Finished checkpointing!\", flush=True)\n\n def _save_model(self):\n print(\"Saving model...\", flush=True)\n save_folder_path = \"results/\" + self.save_config.Output.batch_name + \"/\" + self.child_name\n save_folder_path += \"/model_checkpoints/\"\n\n if not os.path.exists(save_folder_path):\n os.mkdir(save_folder_path)\n\n self.model.save_weights(save_folder_path + \"model_weights_epoch_{}.h5\".format(self.epoch_count))\n print(\"Model saved!\", flush=True)\n\n def _create_statistics_file(self):\n \n ### Create statistics representative of training\n ### This includes printing total loss and all separate regularization losses\n print(\"Saving statistics...\", flush=True)\n save_file_path = \"results/\" + self.save_config.Output.batch_name + \"/\" + self.child_name\n save_file_path += \"/statistics.txt\"\n\n total_loss = self.recent_epoch_logs[\"loss\"]\n convolutional_l2_loss = self.recent_epoch_logs[\"convolutional_l2_loss\"]\n convolutional_activity_loss = self.recent_epoch_logs[\"convolutional_activity_loss\"]\n convolutional_dot_loss = self.recent_epoch_logs[\"convolutional_dot_loss\"]\n core_loss = self.recent_epoch_logs[\"core_loss\"]\n #core_loss = total_loss - convolutional_l2_loss - convolutional_activity_loss - convolutional_dot_loss\n\n with open(save_file_path, \"w\") as f:\n f.write(\"Experiment parameters: {}\\n\".format(self.iteration_parameters))\n f.write(\"Epoch count: {}\\n\".format(self.epoch_count))\n f.write(\"Total loss: {}\\n\".format(round(total_loss, 5)))\n f.write(\"Core loss: {}\\n\".format(round(core_loss, 5)))\n f.write(\"L2 loss: {}\\n\".format(round(convolutional_l2_loss, 5)))\n f.write(\"Activity loss: {}\\n\".format(round(convolutional_activity_loss, 5)))\n f.write(\"Dot loss: {}\\n\".format(round(convolutional_dot_loss, 5)))\n\n print(\"Statistics saved!\", flush=True)\n\n def _get_all_feature_xyz(self):\n feature_names = copy.deepcopy(self.data_config.feature_names)\n raw_names = [name.replace(\"_x\", \"\") for name in feature_names]\n raw_names = [name.replace(\"_y\", \"\") for name in raw_names]\n raw_names = [name.replace(\"_z\", \"\") for name in raw_names]\n\n unique_names = list(set(raw_names))\n\n feature_xyz_dict = {}\n for unique_name in unique_names:\n\n xyz_dict = self._get_single_feature_xyz(unique_name, feature_names)\n if xyz_dict:\n feature_xyz_dict[unique_name] = copy.deepcopy(xyz_dict)\n\n return feature_xyz_dict\n\n def _get_single_feature_xyz(self, unique_name, feature_names):\n \n x_index = -1\n y_index = -1\n z_index = -1\n for i, feature_name in enumerate(feature_names):\n if unique_name in feature_name:\n if \"_x\" in feature_name:\n x_index = i\n elif \"_y\" in feature_name:\n y_index = i\n elif \"_z\" in feature_name:\n z_index = i\n\n xyz_dict = {}\n if x_index != -1:\n xyz_dict[\"x\"] = x_index\n\n if y_index != -1:\n xyz_dict[\"y\"] = y_index\n\n if z_index != -1:\n xyz_dict[\"z\"] = z_index\n\n if xyz_dict:\n return xyz_dict\n else:\n return\n\n\n def _collect_trajectory_figs(self):\n\n print(\"Saving trajectory figs...\", flush=True)\n \n ### Get dictionary of all features and their xyz components\n feature_xyz_dict = self._get_all_feature_xyz()\n\n ### If it's null, return\n if not feature_xyz_dict:\n return\n\n ### Loop through available xyz dicts\n for feature_name, xyz_dict in feature_xyz_dict.items():\n\n for i in range(self.model_config.Convolution.n_filters):\n\n for j in range(self.subject.get_n_weeks()):\n\n ### Check if xyz dict has x and z\n if \"x\" in xyz_dict.keys() and \"z\" in xyz_dict.keys():\n self._create_xvz_trajectory_fig(filter_index=i, week_index=j, feature_name=feature_name,\n x_index=xyz_dict[\"x\"], z_index=xyz_dict[\"z\"])\n\n ### Check if xyz dict has x and y\n if \"x\" in xyz_dict.keys() and \"z\" in xyz_dict.keys():\n self._create_xvy_trajectory_fig(filter_index=i, week_index=j, feature_name=feature_name,\n x_index=xyz_dict[\"x\"], y_index=xyz_dict[\"y\"])\n\n print(\"Done saving trajectory figs!\", flush=True)\n\n def _create_xvz_trajectory_fig(self, filter_index, week_index, x_index, z_index, feature_name):\n\n ### Generate features\n subject_generator = self.subject.get_full_generator()\n features, labels = next(subject_generator)\n\n ### Create figure\n fig, axs = plt.subplots(1)\n fig.set_figwidth(self.save_config.Callback.figwidth)\n fig.set_figheight(self.save_config.Callback.figheight)\n fig.tight_layout()\n fig.suptitle(\"Week \"+str(week_index+1)+\" Trajectory Response: X vs Z\")\n\n ### Get individual points\n x_points = features[0, week_index, :, x_index]\n z_points = features[0, week_index, :, z_index]\n\n ### Create combined points and segments\n points = np.array([x_points, z_points]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n\n ### Get convolutional output from logs\n conv_output = self.recent_epoch_logs[\"convolutional_output\"]\n\n ### Define colormap function\n norm = plt.Normalize(conv_output[0,week_index,:,filter_index].min(), conv_output[0,week_index,:,filter_index].max())\n lc = LineCollection(segments, cmap='viridis', norm=norm)\n # Set the values used for colormapping\n lc.set_array(conv_output[0,week_index,:,filter_index])\n lc.set_linewidth(2)\n line = axs.add_collection(lc)\n fig.colorbar(line, ax=axs)\n\n ### Set margins\n margin = .3\n half_x_points = x_points.max() - (x_points.min()+x_points.max())/2\n axs.set_xlim(x_points.min()-half_x_points*margin, x_points.max()+half_x_points*margin)\n half_z_points = z_points.max() - (z_points.min()+z_points.max())/2\n axs.set_ylim(z_points.min()-half_z_points*margin, z_points.max() + half_z_points*margin)\n\n ### Set labels\n axs.set_xlabel(\"Z Position\")\n axs.set_ylabel(\"X Position\")\n\n ### Save filter response fig as png and eps\n filter_response_folder_path = \"results/\" + self.save_config.Output.batch_name + \"/\" + self.child_name + \"/trajectory-responses/\"\n if not os.path.exists(filter_response_folder_path):\n os.mkdir(filter_response_folder_path)\n\n filter_response_folder_path += \"{}/\".format(feature_name)\n if not os.path.exists(filter_response_folder_path):\n os.mkdir(filter_response_folder_path)\n\n filter_response_folder_path += \"x-vs-z/\".format(filter_index)\n if not os.path.exists(filter_response_folder_path):\n os.mkdir(filter_response_folder_path)\n\n filter_response_folder_path += \"filter-{}/\".format(filter_index)\n if not os.path.exists(filter_response_folder_path):\n os.mkdir(filter_response_folder_path)\n\n filter_response_folder_path += \"week-{}/\".format(week_index+1)\n if not os.path.exists(filter_response_folder_path):\n os.mkdir(filter_response_folder_path)\n\n single_response_png_path = filter_response_folder_path + \"week-{}.png\".format(week_index+1)\n fig.savefig(single_response_png_path, dpi=200, format=\"png\", bbox_inches=\"tight\")\n single_response_eps_path = filter_response_folder_path + \"week-{}.eps\".format(week_index+1)\n fig.savefig(single_response_eps_path, dpi=200, format=\"eps\", bbox_inches=\"tight\")\n\n plt.close(fig)\n\n def _create_xvy_trajectory_fig(self, filter_index, week_index, x_index, y_index, feature_name):\n\n ### Generate features\n subject_generator = self.subject.get_full_generator()\n features, labels = next(subject_generator)\n\n ### Create figure\n fig, axs = plt.subplots(1)\n fig.set_figwidth(self.save_config.Callback.figwidth)\n fig.set_figheight(self.save_config.Callback.figheight)\n fig.tight_layout()\n fig.suptitle(\"Week \"+str(week_index+1)+\" Trajectory Response: X vs Y\")\n\n ### Get individual points\n x_points = features[0, week_index, :, x_index]\n y_points = features[0, week_index, :, y_index]\n\n ### Create combined points and segments\n points = np.array([x_points, y_points]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n\n ### Get convolutional output from logs\n conv_output = self.recent_epoch_logs[\"convolutional_output\"]\n\n ### Define colormap function\n norm = plt.Normalize(conv_output[0,week_index,:,filter_index].min(), conv_output[0,week_index,:,filter_index].max())\n lc = LineCollection(segments, cmap='viridis', norm=norm)\n # Set the values used for colormapping\n lc.set_array(conv_output[0,week_index,:,filter_index])\n lc.set_linewidth(2)\n line = axs.add_collection(lc)\n fig.colorbar(line, ax=axs)\n\n ### Set margins\n margin = .3\n half_x_points = x_points.max() - (x_points.min()+x_points.max())/2\n axs.set_xlim(x_points.min()-half_x_points*margin, x_points.max()+half_x_points*margin)\n half_y_points = y_points.max() - (y_points.min()+y_points.max())/2\n axs.set_ylim(y_points.min()-half_y_points*margin, y_points.max()+half_y_points*margin)\n\n ### Set labels\n axs.set_xlabel(\"Y Position\")\n axs.set_ylabel(\"X Position\")\n\n ### Save filter response fig as png and eps\n filter_response_folder_path = \"results/\" + self.save_config.Output.batch_name + \"/\" + self.child_name + \"/trajectory-responses/\"\n if not os.path.exists(filter_response_folder_path):\n os.mkdir(filter_response_folder_path)\n\n filter_response_folder_path += \"{}/\".format(feature_name)\n if not os.path.exists(filter_response_folder_path):\n os.mkdir(filter_response_folder_path)\n\n filter_response_folder_path += \"x-vs-y/\".format(filter_index)\n if not os.path.exists(filter_response_folder_path):\n os.mkdir(filter_response_folder_path)\n\n filter_response_folder_path += \"filter-{}/\".format(filter_index)\n if not os.path.exists(filter_response_folder_path):\n os.mkdir(filter_response_folder_path)\n\n filter_response_folder_path += \"week-{}/\".format(week_index+1)\n if not os.path.exists(filter_response_folder_path):\n os.mkdir(filter_response_folder_path)\n\n single_response_png_path = filter_response_folder_path + \"week-{}.png\".format(week_index+1)\n fig.savefig(single_response_png_path, dpi=200, format=\"png\", bbox_inches=\"tight\")\n single_response_eps_path = filter_response_folder_path + \"week-{}.eps\".format(week_index+1)\n fig.savefig(single_response_eps_path, dpi=200, format=\"eps\", bbox_inches=\"tight\")\n\n plt.close(fig)\n\n def _collect_filter_response_figs(self):\n\n print(\"Saving filter responses...\", flush=True)\n \n for i in range(self.model_config.Convolution.n_filters):\n\n for j in range(self.subject.get_n_weeks()):\n\n self._create_filter_response_fig(filter_index=i, week_index=j)\n\n print(\"Done saving filter responses!\", flush=True)\n\n def _create_filter_response_fig(self, filter_index, week_index):\n\n ### Generate features\n subject_generator = self.subject.get_full_generator()\n features, labels = next(subject_generator)\n\n ### Get convolutional output from logs\n conv_output = self.recent_epoch_logs[\"convolutional_output\"]\n\n ### Create figure\n fig, axs = plt.subplots(1)\n fig.set_figwidth(self.save_config.Callback.figwidth)\n fig.set_figheight(self.save_config.Callback.figheight)\n fig.tight_layout()\n fig.suptitle(\"Week \"+str(week_index+1)+\" Filter Response\")\n\n ### Get colormap cycle\n color_map_cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']\n\n ### Create scaling variables for figure\n big_max = np.max(features[0,week_index,:,0])\n if np.max(features[0,week_index,:,1]) > big_max:\n big_max = np.max(features[0,week_index,:,1])\n elif np.max(features[0,week_index,:,2]) > big_max:\n big_max = np.max(features[0,week_index,:,2])\n\n ### Adjust axis settings\n for i in range(features.shape[3]):\n axs.plot(features[0,week_index,:,i], color=color_map_cycle[i])\n\n ### Start plotting on the axes\n filter_linestyle = \"--\"\n filter_height = 1.5\n filter_mag = .5\n axs.plot(conv_output[0,week_index,:,filter_index]*big_max*filter_mag+big_max*filter_height, color=color_map_cycle[3])\n axs.plot(np.full(shape=conv_output[0,week_index,:,filter_index].shape, fill_value=0+big_max*filter_height),\n linestyle=filter_linestyle,\n color=color_map_cycle[3])\n axs.plot(np.full(shape=conv_output[0,week_index,:,filter_index].shape, fill_value=big_max*filter_mag + big_max*filter_height),\n linestyle=filter_linestyle,\n color=color_map_cycle[3])\n\n ### Set up axis limits\n axs.set_ylim(top=big_max*filter_mag+big_max*2)\n axs.set_ylabel(\"centimeters\")\n this_yticks = axs.get_yticks()\n axs.set_yticklabels(np.around(this_yticks*100, decimals=0))\n axs.set_xlabel(\"time (sec)\")\n\n ### left_x and right_x represent data bounds in seconds\n left_x = 0\n right_x = 300\n\n ### Create padding\n padding_distance = .013 * ((right_x/60*3000)-(left_x/60*3000))\n axs.set_xlim(left=(left_x/60*3000)-padding_distance, right=(right_x/60*3000)+padding_distance)\n\n ### Set the legend\n fig = self._set_legend_filter_response_fig(fig, self.data_config.feature_names, left_x=left_x, right_x=right_x)\n\n ###### Make correct folder\n ### Save filter response fig as png and eps\n filter_response_folder_path = \"results/\" + self.save_config.Output.batch_name + \"/\" + self.child_name + \"/filter-responses/\"\n if not os.path.exists(filter_response_folder_path):\n os.mkdir(filter_response_folder_path)\n\n filter_response_folder_path += \"filter-{}/\".format(filter_index)\n if not os.path.exists(filter_response_folder_path):\n os.mkdir(filter_response_folder_path)\n\n filter_response_folder_path += \"week-{}/\".format(week_index+1)\n if not os.path.exists(filter_response_folder_path):\n os.mkdir(filter_response_folder_path)\n\n single_response_png_path = filter_response_folder_path + \"week-{}.png\".format(week_index+1)\n fig.savefig(single_response_png_path, dpi=200, format=\"png\", bbox_inches=\"tight\")\n single_response_eps_path = filter_response_folder_path + \"week-{}.eps\".format(week_index+1)\n fig.savefig(single_response_eps_path, dpi=200, format=\"eps\", bbox_inches=\"tight\")\n\n plt.close(fig)\n\n def _set_legend_filter_response_fig(self, figure, feature_names, left_x=0, right_x=300):\n \n ### Get colormap cycle\n color_map_cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']\n\n ax_list = figure.axes\n \n handles = []\n for i in range(len(ax_list)):\n handles[:] = []\n for j in range(len(feature_names)):\n line = mlines.Line2D([], [], color=color_map_cycle[j], markersize=15, label=feature_names[j])\n handles.append(line)\n ax_list[i].legend(handles=handles, loc=\"upper right\", prop={\"size\":10})\n ### Set x tick locations\n minx = left_x/60*3000\n maxx = right_x/60*3000\n ax_list[i].set_xticks(np.arange(minx, maxx+1, (maxx-minx)/10))\n ### Set x tick labels\n maxx_seconds = right_x\n num_labels = np.arange(left_x, right_x+1, (right_x-left_x)/10)\n num_labels = np.around(num_labels, decimals=2)\n ax_list[i].set_xticklabels(map(str, num_labels))\n \n return figure\n\n def _create_dual_figs(self):\n\n print(\"Saving dual figs...\", flush=True)\n\n ### Get feature names for figure labels\n feature_names = self.data_config.feature_names\n\n ### Get loss value, rounded to 5 decimal places\n loss = round(self.recent_epoch_logs[\"loss\"], 5)\n\n ### Get convolutional filter and bias\n conv_filter = self.model.get_weights()[0]\n bias = self.model.get_weights()[1]\n\n ### Get n_filters\n n_filters = conv_filter.shape[2]\n\n fig_list = []\n\n for i in range(n_filters):\n\n ### Create blank figure\n fig, axes = plt.subplots(1, 2)\n fig.set_figwidth(self.save_config.Callback.figwidth)\n fig.set_figheight(self.save_config.Callback.figheight)\n fig.tight_layout()\n plt.subplots_adjust(wspace=.2)\n\n ### Insert text into figure\n fig.text(x=.1, y=1.2, s=\"Epoch: \"+str(self.epoch_count), fontsize=13)\n fig.text(x=.1, y=1.05, s=\"Loss: \"+str(np.round(loss, decimals=5)), fontsize=13)\n fig.text(x=.25, y=1.05, s=\"Bias: \"+str(np.round(bias[0], decimals=3)), fontsize=13)\n\n ### Deal with axes\n if len(axes.shape) == 1:\n axes = np.expand_dims(axes, axis=0)\n\n ### Generate features\n subject_generator = self.subject.get_full_generator()\n features, labels = next(subject_generator)\n\n ### Get predicted labels\n predicted_labels = self.recent_epoch_logs[\"predicted_labels\"]\n\n # Loop for every filter\n axes = self._modify_axes(axes, i, conv_filter, predicted_labels, labels)\n\n # Handle this method\n height_loc_list = [1.6, 1.45]\n fig = self._set_legend_dual_fig(fig, height_loc_list, feature_names)\n fig_list.append(fig)\n \n dual_fig_folder_path = \"results/\" + self.save_config.Output.batch_name + \"/\" + self.child_name + \"/dual-figs/\"\n if not os.path.exists(dual_fig_folder_path):\n os.mkdir(dual_fig_folder_path)\n\n for i in range(len(fig_list)):\n current_filter_dual_fig = \"\"\n current_filter_dual_fig += dual_fig_folder_path\n current_filter_dual_fig += \"filter-{}/\".format(i)\n if not os.path.exists(current_filter_dual_fig):\n os.mkdir(current_filter_dual_fig)\n\n dual_fig_png_path = current_filter_dual_fig + \"dual-fig.png\"\n dual_fig_eps_path = current_filter_dual_fig + \"dual-fig.eps\"\n fig_list[i].savefig(dual_fig_png_path, dpi=200, format=\"png\", bbox_inches=\"tight\")\n fig_list[i].savefig(dual_fig_eps_path, dpi=200, format=\"eps\", bbox_inches=\"tight\")\n\n plt.close(fig_list[i])\n\n print(\"Done saving dual figs!\", flush=True)\n\n def _modify_axes(self, axes, filter_index, conv_filter, predicted_labels, labels):\n\n #### Below this is mostly old code, with some replaced variable names. Haven't touched it, because it's a mess\n\n mask_week_vector = self.subject.valid_mask\n color_map_cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']\n\n min_label = 0-(conv_filter[:,0,0].shape[0]/2)\n max_label = 0+(conv_filter[:,0,0].shape[0]/2)\n num_labels = np.arange(min_label, max_label, 1)\n\n # Plot the filter axis\n for j in range(conv_filter.shape[1]):\n axes[0, 0].plot(conv_filter[:,j,filter_index], color=color_map_cycle[j])\n \n ########################\n ### Set x tick locations\n minx = 0\n maxx = conv_filter[:,0,0].shape[0]\n axes[0, 0].set_xticks(np.arange(minx, maxx+1, maxx/10))\n \n ### Set x tick labels\n maxx_seconds = (maxx/2) / 3000 * 60\n num_labels = np.arange(minx-maxx_seconds, maxx_seconds+1, maxx_seconds*2/10)\n num_labels = np.around(num_labels, decimals=2)\n axes[0, 0].set_xticklabels(map(str, num_labels))\n ########################\n \n # Plot the rate axis\n mask_predicted_labels = np.ma.masked_where(mask_week_vector == 0, predicted_labels[0,:,filter_index])\n \n axes[0, 1].plot(mask_predicted_labels, color=color_map_cycle[0])\n axes[0, 1].plot(labels[0,:,filter_index], color=color_map_cycle[1])\n \n num_labels = np.arange(-1, mask_predicted_labels.shape[0]+1, 2)\n axes[0, 1].set_xticklabels(map(str, num_labels))\n\n return axes\n\n\n def _set_legend_dual_fig(self, figure, height_loc_list, feature_names):\n\n color_map_cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']\n\n rate_names = [\"Predicted Labels\", \"Actual Labels\"]\n \n ax_list = figure.axes\n \n handles = []\n for i in range(len(feature_names)):\n line = mlines.Line2D([], [], color=color_map_cycle[i], markersize=15, label=feature_names[i])\n handles.append(line)\n ax_list[0].legend(handles=handles, loc=\"upper right\", bbox_to_anchor=(1, height_loc_list[0]), prop={\"size\":10})\n ax_list[0].set_xlabel(\"Time (sec)\")\n ax_list[0].set_ylabel(\"Coefficient\")\n \n handles[:] = []\n for i in range(len(rate_names)):\n line = mlines.Line2D([], [], color=color_map_cycle[i], markersize=15, label=rate_names[i])\n handles.append(line)\n ax_list[1].legend(handles=handles, loc=\"upper right\", bbox_to_anchor=(1, height_loc_list[1]), prop={\"size\": 10})\n ax_list[1].set_xlabel(\"Week\")\n ax_list[1].set_ylabel(\"Rate\")\n \n return figure\n ### Old code ends here\n\n def _create_log(self, logs):\n\n logs[\"core_loss\"] = logs[\"loss\"]\n logs[\"core_loss\"] -= logs[\"convolutional_activity_loss\"]\n logs[\"core_loss\"] -= logs[\"convolutional_dot_loss\"]\n logs[\"core_loss\"] -= logs[\"convolutional_l2_loss\"]\n\n return logs\n\n\n\n\n\n", "sub_path": "lib/classes/callback_classes/BabyTrainingCallback_legacy.py", "file_name": "BabyTrainingCallback_legacy.py", "file_ext": "py", "file_size_in_byte": 26032, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "tensorflow.keras.callbacks.Callback", "line_number": 18, "usage_type": "name"}, {"api_name": "lib.classes.dataset_classes.SubjectDataset.SubjectDataset", "line_number": 53, "usage_type": "call"}, {"api_name": "time.time", "line_number": 64, "usage_type": "call"}, {"api_name": "time.time", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 121, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 153, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 236, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 236, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 248, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.Normalize", "line_number": 254, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 254, "usage_type": "name"}, {"api_name": "matplotlib.collections.LineCollection", "line_number": 255, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 275, "usage_type": "call"}, {"api_name": "os.path", "line_number": 275, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 276, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 279, "usage_type": "call"}, {"api_name": "os.path", "line_number": 279, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 280, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 283, "usage_type": "call"}, {"api_name": "os.path", "line_number": 283, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 284, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 287, "usage_type": "call"}, {"api_name": "os.path", "line_number": 287, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 288, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 291, "usage_type": "call"}, {"api_name": "os.path", "line_number": 291, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 292, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 299, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 299, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 308, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 308, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 319, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 320, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.Normalize", "line_number": 326, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 326, "usage_type": "name"}, {"api_name": "matplotlib.collections.LineCollection", "line_number": 327, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 347, "usage_type": "call"}, {"api_name": "os.path", "line_number": 347, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 348, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 351, "usage_type": "call"}, {"api_name": "os.path", "line_number": 351, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 352, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 355, "usage_type": "call"}, {"api_name": "os.path", "line_number": 355, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 356, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 359, "usage_type": "call"}, {"api_name": "os.path", "line_number": 359, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 360, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 363, "usage_type": "call"}, {"api_name": "os.path", "line_number": 363, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 364, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 371, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 371, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 395, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 395, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 402, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 402, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 405, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 406, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 407, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 408, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 409, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 420, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 423, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 431, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 448, "usage_type": "call"}, {"api_name": "os.path", "line_number": 448, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 449, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 452, "usage_type": "call"}, {"api_name": "os.path", "line_number": 452, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 453, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 456, "usage_type": "call"}, {"api_name": "os.path", "line_number": 456, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 457, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 464, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 464, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 469, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 469, "usage_type": "name"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 477, "usage_type": "call"}, {"api_name": "matplotlib.lines", "line_number": 477, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 483, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 486, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 487, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 514, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 514, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 518, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 518, "usage_type": "name"}, {"api_name": "numpy.round", "line_number": 522, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 523, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 527, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 545, "usage_type": "call"}, {"api_name": "os.path", "line_number": 545, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 546, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 552, "usage_type": "call"}, {"api_name": "os.path", "line_number": 552, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 553, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 560, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 560, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 569, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 569, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 573, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 583, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 587, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 588, "usage_type": "call"}, {"api_name": "numpy.ma.masked_where", "line_number": 593, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 593, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 598, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 606, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 606, "usage_type": "name"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 614, "usage_type": "call"}, {"api_name": "matplotlib.lines", "line_number": 614, "usage_type": "name"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 622, "usage_type": "call"}, {"api_name": "matplotlib.lines", "line_number": 622, "usage_type": "name"}]} +{"seq_id": "31155369", "text": "import requests\r\nimport pandas as pd\r\nimport os\r\nimport datetime\r\nimport matplotlib.pyplot as plt\r\n\r\ndir_path = os.path.dirname(os.path.realpath(__file__))\r\ndata_path = dir_path + '/Data/'\r\nif not(os.path.exists(data_path)):\r\n os.mkdir(data_path)\r\ncsv_file_path = dir_path + '/Data/COVID-19-DailyData.csv'\r\n\r\n\r\ndef is_older_than_today(filename):\r\n modifiedTime = os.path.getmtime(filename)\r\n modifiedDate = datetime.date.fromtimestamp(modifiedTime)\r\n todaysDate = datetime.date.today()\r\n return modifiedDate < todaysDate\r\n\r\n\r\n# scrapes the website if need be\r\nurl = 'https://opendata.ecdc.europa.eu/covid19/casedistribution/csv'\r\nif not(os.path.exists(csv_file_path)) or is_older_than_today(csv_file_path):\r\n r = requests.get(url, allow_redirects=True)\r\n with open(csv_file_path, 'wb') as csv:\r\n csv.write(r.content)\r\n\r\n# Dataframe manipulation\r\ndf = pd.read_csv(csv_file_path, engine='python')\r\nsum_df = df.groupby(['countriesAndTerritories', 'popData2018'])[\r\n ['cases', 'deaths']].apply(sum)\r\nsum_df.reset_index(inplace=True)\r\nG7Countries = ['Canada', 'United_States_of_America',\r\n 'France', 'Italy', 'Japan', 'United_Kingdom', 'Germany']\r\nsum_df = sum_df.loc[sum_df['countriesAndTerritories'].isin(G7Countries)]\r\nsum_df['DeathRate'] = (sum_df['deaths'] / sum_df['cases']) * 100\r\nG7Countries_df = sum_df.nlargest(7, 'DeathRate')\r\nprint(G7Countries_df)\r\n\r\n\r\n# plotting\r\nplt.plot(G7Countries_df['countriesAndTerritories'],\r\n G7Countries_df['DeathRate'], marker='o', color='red')\r\nfor x, y in zip(G7Countries_df['countriesAndTerritories'], G7Countries_df['DeathRate']):\r\n plt.annotate(str(round(y, 2)) + \"%\", xy=(x, y))\r\nplt.title(\"G7 Countries COVID-19 death rates\")\r\nplt.xlabel(\"Country\")\r\nplt.ylabel(\"Death Rate (%)\")\r\nmng = plt.get_current_fig_manager()\r\nmng.full_screen_toggle()\r\nplt.xticks(rotation=30)\r\nplt.grid()\r\nplt.tight_layout()\r\nsavefigname = dir_path + \"\\\\\" + str(datetime.date.today()) + \".png\"\r\nplt.savefig(savefigname)\r\nplt.show()\r\n", "sub_path": "COVID-19-G7-Country-DeathRates.py", "file_name": "COVID-19-G7-Country-DeathRates.py", "file_ext": "py", "file_size_in_byte": 2005, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "os.path.dirname", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.getmtime", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "datetime.date.fromtimestamp", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 16, "usage_type": "attribute"}, {"api_name": "datetime.date.today", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 24, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.annotate", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.get_current_fig_manager", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 54, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 54, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}]} +{"seq_id": "218519527", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# test.py\n# @Author : ()\n# @Link : \n# @Date : 2/13/2019, 1:47:06 PM\n\n\n#coding=utf-8\n# from PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import QApplication, QDialog,QWidget, QFileDialog, QPushButton, QLineEdit, QGridLayout\nimport sys, os\n \nclass MyLoadTskList(QDialog):\n def __init__(self):\n QDialog.__init__(self)\n self.initTaskList()\n def initTaskList(self):\n global connectserver\n self.ui = Ui_Dialog()\n self.ui.setupUi(self)\n self.model = QStandardItemModel()\n self.ui.btsure.clicked.connect(self.test)\n \n tsklist = [u'北京', u'南京', u'海南', u'青岛', u'西安']\n #model = QStandardItemModel()\n for task in tsklist:\n item = QStandardItem(QString(task))\n item.setCheckState(False)\n item.setCheckable(True)\n self.model.appendRow(item)\n self.ui.listView.setModel(self.model)\n def test(self):\n #获取选中的item的index\n print (\"hello this is LoadTskList\")\n lsd = []\n for i in range(self.model.rowCount()):\n if self.model.item(i).checkState():\n index = i + 1\n lsd.append(index)\n print (lsd)\n \napp = QApplication(sys.argv)\ntsk = MyLoadTskList()\ntsk.show()\napp.exec_()\n", "sub_path": "study/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 1418, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "PyQt5.QtWidgets.QDialog", "line_number": 17, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QDialog.__init__", "line_number": 19, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QDialog", "line_number": 19, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 46, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 46, "usage_type": "attribute"}]} +{"seq_id": "559065249", "text": "import psutil\r\nfrom tkinter import messagebox\r\nimport tkinter\r\n\r\n# hide the main tkinter window to just show the message box\r\ntkinter.Tk().withdraw()\r\n\r\nwarning_limit = 20\r\n\r\ndisk_free = round(psutil.disk_usage('/').free/1024/1024/1024,2) \r\ndisk_total = round(psutil.disk_usage('/').total/1024/1024/1024,2)\r\ndisk_used = round(psutil.disk_usage('/').used/1024/1024/1024,2)\r\n\r\ndisk_percent = psutil.disk_usage('/').percent\r\nif disk_percent < warning_limit:\r\n messagebox.showwarning(title=\"Disk Usage Warning!\",\r\n message=f\"\"\"Percentage of disk used ({disk_percent}) > warning set ({warning_limit})\r\n Free: {disk_free} GB\r\n Total: {disk_total} GB\r\n Used: {disk_used} GB\"\"\")\r\n", "sub_path": "PyDisck_space_reader.py", "file_name": "PyDisck_space_reader.py", "file_ext": "py", "file_size_in_byte": 703, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "tkinter.Tk", "line_number": 6, "usage_type": "call"}, {"api_name": "psutil.disk_usage", "line_number": 10, "usage_type": "call"}, {"api_name": "psutil.disk_usage", "line_number": 11, "usage_type": "call"}, {"api_name": "psutil.disk_usage", "line_number": 12, "usage_type": "call"}, {"api_name": "psutil.disk_usage", "line_number": 14, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showwarning", "line_number": 16, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "111326261", "text": "from ortools.linear_solver import pywraplp\n\nfrom players.models import Player\n\n\nclass Roster:\n POSITION_ORDER = {\n \"QB\": 0,\n \"RB\": 1,\n \"WR\": 2,\n \"TE\": 3,\n \"K\": 4,\n \"D\": 5\n }\n\n def __init__(self):\n self.players = []\n\n def add_player(self, player):\n self.players.append(player)\n\n def spent(self):\n return sum(map(lambda x: x.salary, self.players))\n\n def projected(self):\n return sum(map(lambda x: x.fantasy_points_per_game, self.players))\n\n def position_order(self, player):\n return self.POSITION_ORDER[player.position]\n\n def sorted_players(self):\n return sorted(self.players, key=self.position_order)\n\n def __repr__(self):\n s = '\\n'.join(str(x) for x in self.sorted_players())\n s += \"\\n\\nProjected Score: %s\" % self.projected()\n s += \"\\tCost: $%s\" % self.spent()\n return s\n\n\nSALARY_CAP = 60000\n\nPOSITION_LIMITS = [\n [\"QB\", 1, 1],\n [\"RB\", 2, 2],\n [\"WR\", 3, 3],\n [\"TE\", 1, 1],\n [\"K\", 1, 1],\n [\"D\", 1, 1]\n]\n\nROSTER_SIZE = 9\n\n\ndef solve(locked_players):\n\n solver = pywraplp.Solver('FD', pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)\n\n all_players = [player for player in Player.objects.exclude(injury__contains='O').exclude(injury__contains='IR').exclude(injury__contains='NA')]\n\n variables = []\n\n for player in all_players:\n if player.id in locked_players:\n variables.append(solver.IntVar(1, 1, player.first_name + player.last_name + player.team.name))\n elif player.injury == 'P':\n variables.append(solver.IntVar(0, .9, player.first_name + player.last_name + player.team.name))\n elif player.injury == 'Q':\n variables.append(solver.IntVar(0, .5, player.first_name + player.last_name + player.team.name))\n else:\n variables.append(solver.IntVar(0, 1, player.first_name + player.last_name + player.team.name))\n\n objective = solver.Objective()\n objective.SetMaximization()\n\n for i, player in enumerate(all_players):\n objective.SetCoefficient(variables[i], player.fantasy_points_per_game)\n\n salary_cap = solver.Constraint(0, SALARY_CAP)\n for i, player in enumerate(all_players):\n salary_cap.SetCoefficient(variables[i], player.salary)\n\n for position, min_limit, max_limit in POSITION_LIMITS:\n position_cap = solver.Constraint(min_limit, max_limit)\n\n for i, player in enumerate(all_players):\n if position == player.position:\n position_cap.SetCoefficient(variables[i], 1)\n\n size_cap = solver.Constraint(ROSTER_SIZE, ROSTER_SIZE)\n for variable in variables:\n size_cap.SetCoefficient(variable, 1)\n\n solution = solver.Solve()\n\n roster = Roster()\n\n for i, player in enumerate(all_players):\n if variables[i].solution_value() == 1:\n roster.add_player(player)\n\n return roster.sorted_players()\n", "sub_path": "common/optimize.py", "file_name": "optimize.py", "file_ext": "py", "file_size_in_byte": 2931, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "ortools.linear_solver.pywraplp.Solver", "line_number": 57, "usage_type": "call"}, {"api_name": "ortools.linear_solver.pywraplp", "line_number": 57, "usage_type": "name"}, {"api_name": "players.models.Player.objects.exclude", "line_number": 59, "usage_type": "call"}, {"api_name": "players.models.Player.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "players.models.Player", "line_number": 59, "usage_type": "name"}]} +{"seq_id": "172354380", "text": "import pytest\n\nfrom modules.commands.data_tools.templates.filter_templates import GenericTextFilter, \\\n GenericNumbersFilter\n\n\n@pytest.fixture(scope='module')\ndef generic_text_filter():\n \"\"\" Setup of generic text filter object \"\"\"\n\n text_filter = GenericTextFilter()\n yield text_filter\n\n del text_filter\n\n\n@pytest.fixture(scope='module')\ndef generic_numbers_filter():\n \"\"\" Setup of generic numbers filter object \"\"\"\n\n num_filter = GenericNumbersFilter()\n yield num_filter\n\n del num_filter\n\n\n@pytest.fixture(scope='module')\ndef fake_database_results():\n \"\"\" Setup of fake database results \"\"\"\n\n results = [\n\n {\"title\": \"The Shawshank Redemption\", \"year\": 1994, \"runtime\": \"142 min\",\n \"genre\": \"Drama\",\n \"director\": \"Frank Darabont\",\n \"writer\": \"Stephen King (short story \\\"Rita Hayworth and Shawshank \"\n \"Redemption\\\"), Frank Darabont (screenplay)\",\n \"actors\": \"Tim Robbins, Morgan Freeman, Bob Gunton, William Sadler\",\n \"language\": \"English\",\n \"awards\": \"Nominated for 7 Oscars. Another 21 wins & 35 nominations.\",\n \"imdbRating\": 9.3, \"imdbVotes\": 2217195,\n \"box_office\": None},\n\n {\"title\": \"The Godfather\", \"year\": 1972, \"runtime\": \"175 min\",\n \"genre\": \"Crime, Drama\",\n \"director\": \"Francis Ford Coppola\",\n \"writer\": \"Mario Puzo (screenplay by), Francis Ford Coppola (screenplay by), Mario Puzo (based on the novel by)\",\n \"actors\": \"Marlon Brando, Al Pacino, James Caan, Richard S. Castellano\",\n \"language\": \"English, Italian, Latin\",\n \"awards\": \"Won 3 Oscars. Another 26 wins & 30 nominations.\",\n \"imdbRating\": 9.2, \"imdbVotes\": 1516505, \"box_office\": None},\n\n {\"title\": \"The Dark Knight\", \"year\": 2008, \"runtime\": \"152 min\",\n \"genre\": \"Action, Crime, Drama, Thriller\", \"director\": \"Christopher Nolan\",\n \"writer\": \"Jonathan Nolan (screenplay), Christopher Nolan (screenplay), \"\n \"Christopher Nolan (story), David S. Goyer (story), Bob Kane (characters)\",\n \"actors\": \"Christian Bale, Heath Ledger, Aaron Eckhart, Michael Caine\",\n \"language\": \"English, Mandarin\",\n \"awards\": \"Won 2 Oscars. Another 153 wins & 159 nominations.\",\n \"imdbRating\": 9.0, \"imdbVotes\": 2184673, \"BoxOffice\": 533316061}\n ]\n yield results\n\n del results\n\n\n@pytest.mark.parametrize('column, args, results',\n [\n ('title', ['The'], [True, True, True]),\n ('title', ['Shawshank'], [True, False, False]),\n ('genre', ['Drama'], [True, True, True]),\n ('genre', ['Crime'], [False, True, True]),\n ('director', ['Christopher Nolan'], [False, False, True])\n ])\ndef test_generic_text_filter(fake_database_results, generic_text_filter,\n column, args, results):\n \"\"\" Test filter by different columns and values \"\"\"\n\n generic_text_filter.column_name = column\n filter_func = generic_text_filter.get_filter_function(*args)\n\n assert results[0] == filter_func(fake_database_results[0])\n assert results[1] == filter_func(fake_database_results[1])\n assert results[2] == filter_func(fake_database_results[2])\n\n\n@pytest.mark.parametrize('column, args, results',\n [\n ('year', ['gte', '1972'], [True, True, True]),\n ('year', ['gt', '1972'], [True, False, True]),\n ('year', ['e', '2008'], [False, False, True]),\n ('year', ['lt', '1995'], [True, True, False]),\n ('year', ['lte', '1994'], [True, True, False]),\n ])\ndef test_generic_num_filter(fake_database_results, generic_numbers_filter,\n column, args, results):\n \"\"\" Test filter by different columns and values \"\"\"\n\n generic_numbers_filter.column_name = column\n filter_func = generic_numbers_filter.get_filter_function(*args)\n\n assert results[0] == filter_func(fake_database_results[0])\n assert results[1] == filter_func(fake_database_results[1])\n assert results[2] == filter_func(fake_database_results[2])\n", "sub_path": "tests/test_commands/test_data_tools/test_templates/test_filter_templates.py", "file_name": "test_filter_templates.py", "file_ext": "py", "file_size_in_byte": 4337, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "modules.commands.data_tools.templates.filter_templates.GenericTextFilter", "line_number": 11, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 7, "usage_type": "call"}, {"api_name": "modules.commands.data_tools.templates.filter_templates.GenericNumbersFilter", "line_number": 21, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 17, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 27, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 67, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 67, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 87, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 87, "usage_type": "attribute"}]} +{"seq_id": "4465806", "text": "import argparse\nimport glob\nfrom queue import Queue\n\nfrom feature_detection.feature_detector import FeatureDetector, find_keypoints_and_descriptors\n\nTHREADS_NUMBER = 8\n\nap = argparse.ArgumentParser()\nap.add_argument('-qf', '--query_folder', required=True, help='Path to a folder with query images')\nap.add_argument('-tf', '--train_folder', required=True, help='Path to a folder with train images')\nap.add_argument(\"-df\", \"--destination_folder\", required=True, help=\"Path to a folder in which save the result\")\nargs = vars(ap.parse_args())\n\n# find the keypoints and descriptors for all query images\nquery_images = []\nfor image_path in glob.glob(args['query_folder'] + '/*'):\n keypoints, descriptors = find_keypoints_and_descriptors(image_path)\n query_images.append(dict(keypoints=keypoints, descriptors=descriptors))\n\ntrain_image_paths = Queue()\nfor image_path in glob.glob(args['train_folder'] + '/*'):\n train_image_paths.put(image_path)\n\nfor n in range(0, THREADS_NUMBER):\n thread = FeatureDetector(train_image_paths, query_images, args['destination_folder'])\n thread.start()\n\ntrain_image_paths.join()\n", "sub_path": "feature_detection/feature_matching.py", "file_name": "feature_matching.py", "file_ext": "py", "file_size_in_byte": 1118, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 9, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 17, "usage_type": "call"}, {"api_name": "feature_detection.feature_detector.find_keypoints_and_descriptors", "line_number": 18, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 21, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 22, "usage_type": "call"}, {"api_name": "feature_detection.feature_detector.FeatureDetector", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "456117584", "text": "# Copyright (c) 2016-2022 Association of Universities for Research in Astronomy, Inc. (AURA)\n# For license information see LICENSE or https://opensource.org/licenses/BSD-3-Clause\n\nimport dataclasses\nimport functools\nfrom itertools import count\nfrom multiprocessing import Process\n\nfrom scheduler.services import logger_factory\nfrom .process import ProcessTask, Result\n\njob_counter = count()\n\nlogger = logger_factory.create_logger(__name__)\n\n\n@dataclasses.dataclass(order=True)\nclass Job:\n process: ProcessTask = dataclasses.field(compare=False)\n sequence: int = dataclasses.field(compare=False,\n default_factory=functools.partial(next, job_counter))\n\n def __repr__(self):\n return f\"Job-{self.sequence}\"\n\n\n# TODO: An abstract class might be needed here, but right now RealtimeRunner can be done\n# with just the same Runner as Standard.\nclass StandardRunner:\n \"\"\"\n Main runner to handle Process objects and their associated tasks.\n \"\"\"\n\n def __init__(self, size):\n self.max_jobs = size\n self.jobs = []\n self.callbacks = []\n\n def add_done_callback(self, callback: callable) -> None:\n \"\"\"\n Adds a callback that will be invoked when a job is finished. Useful\n to control the scheduling of new jobs.\n \"\"\"\n self.callbacks.append(callback)\n\n async def terminated_job(self, job: Job, ptask: ProcessTask) -> None:\n \"\"\"\n Called when a job has finished.\n\n Runs any added callbacks in order to notify that a new slot is free\n for scheduling.\n \"\"\"\n res = ptask.result\n if res != Result.TERMINATED:\n # Terminated jobs had been evicted earlier (see maybe_evict) and we\n # don't need to do anything else about them.\n # The others need a bit more of work\n if res == Result.TIMEOUT:\n logger.warning(f\" - Task {job} timed out!\")\n else:\n logger.info(f\" - Task {job} is done\")\n\n try:\n del self.jobs[self.jobs.index(job)]\n except ValueError:\n logger.warning(f\" - Job {job} was not in the heap any longer!\")\n\n # Notify that we're ready to queue something new\n for callback in self.callbacks:\n callback()\n\n def _run_job(self, proc: Process, timeout: int) -> Job:\n \"\"\"\n Prepares a job and starts its associated process.\n\n Only internal use.\n \"\"\"\n ptask = ProcessTask(proc)\n job = Job(ptask)\n proc.name = f'Job-{job.sequence}'\n ptask.add_done_callback(functools.partial(self.terminated_job, job))\n ptask.start(timeout=timeout)\n\n return job\n\n def evict(self) -> None:\n \"\"\"\n Kill the latest job.\n \"\"\"\n try:\n job = self.jobs[-1]\n job.process.terminate()\n del self.jobs[-1]\n logger.info(f\" - Task {job} was evicted\")\n except ValueError:\n logger.warning(f\" - No jobs to evict!\")\n\n def terminate(self, task: ProcessTask) -> None:\n \"\"\"\n Terminates a task from the queue.\n \"\"\"\n try:\n job = self.jobs[self.jobs.index(task)]\n job.process.terminate()\n del self.jobs[self.jobs.index(task)]\n except ValueError:\n logger.warning(f\" - Task {task} was not in the heap any longer!\")\n\n def schedule(self, process: Process, timeout: int) -> bool:\n \"\"\"\n Attempts scheduling a new job.\n\n Returns True if the task was successfully scheduled,\n False otherwise.\n \"\"\"\n if len(self.jobs) == self.max_jobs:\n self.evict()\n if len(self.jobs) < self.max_jobs:\n self.jobs.append(self._run_job(process, timeout))\n print(self.jobs)\n return True\n else:\n return False\n\n def terminate_all(self) -> None:\n \"\"\"\n Ends all running processes.\n \"\"\"\n for job in self.jobs:\n job.process.terminate()\n\n self.jobs = []\n", "sub_path": "scheduler/process_manager/runner.py", "file_name": "runner.py", "file_ext": "py", "file_size_in_byte": 4111, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "itertools.count", "line_number": 12, "usage_type": "call"}, {"api_name": "scheduler.services.logger_factory.create_logger", "line_number": 14, "usage_type": "call"}, {"api_name": "scheduler.services.logger_factory", "line_number": 14, "usage_type": "name"}, {"api_name": "process.ProcessTask", "line_number": 19, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 19, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 20, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 21, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 17, "usage_type": "call"}, {"api_name": "process.ProcessTask", "line_number": 46, "usage_type": "name"}, {"api_name": "process.Result.TERMINATED", "line_number": 54, "usage_type": "attribute"}, {"api_name": "process.Result", "line_number": 54, "usage_type": "name"}, {"api_name": "process.Result.TIMEOUT", "line_number": 58, "usage_type": "attribute"}, {"api_name": "process.Result", "line_number": 58, "usage_type": "name"}, {"api_name": "multiprocessing.Process", "line_number": 72, "usage_type": "name"}, {"api_name": "process.ProcessTask", "line_number": 78, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 81, "usage_type": "call"}, {"api_name": "process.ProcessTask", "line_number": 98, "usage_type": "name"}, {"api_name": "multiprocessing.Process", "line_number": 109, "usage_type": "name"}]} +{"seq_id": "451204317", "text": "# -*- coding:UTF-8 -*-\n#!/usr/bin/env python\n\n#python內建函式庫\nimport os\nimport logging\nimport datetime\nimport time\nimport csv\n\n#GAE的函式庫\nimport webapp2\nfrom google.appengine.api import users\nfrom google.appengine.ext import blobstore, db, webapp\nfrom google.appengine.ext.webapp import blobstore_handlers, template\n\n#自創函式庫\nimport methods\nfrom models import *\nfrom backend_process import *\nfrom instant_messaging import *\nfrom cron import *\nfrom filterdir.customfilters import *\n\n#--------------------------------------------------------------------\n#對前端框架的模板語法註冊新功能\nwebapp.template.register_template_library('filterdir.customfilters')\n#--------------------------------------------------------------------\n#('/', Home_Handler)\nclass Home_Handler(webapp2.RequestHandler):\n def get(self):\n values = {\n 'user': users.get_current_user(),\n 'startup_web':\"home\",\n } \n path = os.path.join(os.path.dirname(__file__), 'html','home.html')\n self.response.out.write(template.render(path, values))\n\n\n#('/clouddrive', Clouddrive_Handler)\nclass Clouddrive_Handler(webapp2.RequestHandler):\n def get(self):\n #獲取使用者對於資料庫空間使用情形\n stored_data_details = methods.Stored_Data_Details()\n #獲取目前頻寬狀態\n Bandwidth_log_Quota = methods.load_log(\"Bandwidth_log\",\"Quota\")\n values = {\n 'user': users.get_current_user(),\n 'users': users,\n 'stored_data_details':stored_data_details,\n 'upload_url': blobstore.create_upload_url('/upload'),\n 'wrappers': Wrapper.all().order('-date'),\n 'startup_web':\"clouddrive\",\n 'Bandwidth_log':Bandwidth_log_Quota,\n }\n \n path = os.path.join(os.path.dirname(__file__), 'html','clouddrive.html')\n self.response.out.write(template.render(path, values))\n\n#('/dashboard', Charts_Handler)\nclass Dashboard_Handler(webapp2.RequestHandler):\n def get(self):\n #獲取使用者對於資料庫空間使用情形\n stored_data_details = methods.Stored_Data_Details()\n #獲取log\n Bandwidth_log_Quota = methods.load_log(\"Bandwidth_log\",\"Quota\")\n Upload_log_Quota = Upload_log.all().order('date')#由舊到新\n Download_log_Quota = Download_log.all().order('date')#由舊到新\n\n logging.info(\"====================================\")\n logging.info(datetime.datetime.now().date())\n t = time.time()\n # 透過 datetime\n logging.info(datetime.datetime.fromtimestamp(t).strftime('%Y-%m-%d %H:%M:%S'))\n values = {\n 'user': users.get_current_user(),\n 'users': users,\n 'stored_data_details':stored_data_details,\n 'startup_web':\"dashboard\",\n 'Bandwidth_log':Bandwidth_log_Quota,\n 'Upload_log':Upload_log_Quota,\n 'Download_log':Download_log_Quota,\n } \n path = os.path.join(os.path.dirname(__file__), 'html','dashboard.html')\n self.response.out.write(template.render(path, values))\n\n#('/instant_messaging', Instant_Messaging_Handler)\nclass Instant_Messaging_Handler(webapp2.RequestHandler):\n def get(self):\n values = {\n 'user': users.get_current_user(),\n 'startup_web':\"instant_messaging\",\n } \n path = os.path.join(os.path.dirname(__file__), 'html','instant_messaging.html')\n self.response.out.write(template.render(path, values))\n\n#('/paint', Paint_Handler)\nclass Paint_Handler(webapp2.RequestHandler):\n def get(self):\n values = {\n 'user': users.get_current_user(),\n 'startup_web':\"paint\",\n }\n path = os.path.join(os.path.dirname(__file__), 'html','paint.html')\n self.response.out.write(template.render(path, values))\n\nclass Test_Handler(webapp2.RequestHandler):\n def get(self):\n pass\n\n#網址啟動\napp = webapp2.WSGIApplication([\n #前端輸出\n ('/', Home_Handler),\n ('/clouddrive', Clouddrive_Handler),\n ('/dashboard', Dashboard_Handler),\n ('/account/([^/]+)?', Account_Handler),\n ('/test1', Test_Handler),\n ('/instant_messaging', Instant_Messaging_Handler),\n ('/paint', Paint_Handler),\n #後端處理\n ('/upload', Upload_Handler),\n ('/serve/([^/]+)?', Serve_Handler),\n ('/delete', Delete_Handler),\n ('/csv', CSV_Handler),\n #即時通功能\n ('/post_msg', ReceiveHandler),\n ('/get_token', GetTokenHandler),\n ('/del_token', ReleaseTokenHandler),\n ('/open', OpenHandler),\n #排程\n ('/quotas_reset', Quotas_Reset_Handler),\n], debug=True)\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4685, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "google.appengine.ext.webapp.template.register_template_library", "line_number": 27, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.template", "line_number": 27, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.webapp", "line_number": 27, "usage_type": "name"}, {"api_name": "webapp2.RequestHandler", "line_number": 30, "usage_type": "attribute"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 33, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 33, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 36, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.template.render", "line_number": 37, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.template", "line_number": 37, "usage_type": "name"}, {"api_name": "webapp2.RequestHandler", "line_number": 41, "usage_type": "attribute"}, {"api_name": "methods.Stored_Data_Details", "line_number": 44, "usage_type": "call"}, {"api_name": "methods.load_log", "line_number": 46, "usage_type": "call"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 48, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 48, "usage_type": "name"}, {"api_name": "google.appengine.api.users", "line_number": 49, "usage_type": "name"}, {"api_name": "google.appengine.ext.blobstore.create_upload_url", "line_number": 51, "usage_type": "call"}, {"api_name": "google.appengine.ext.blobstore", "line_number": 51, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 57, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.template.render", "line_number": 58, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.template", "line_number": 58, "usage_type": "name"}, {"api_name": "webapp2.RequestHandler", "line_number": 61, "usage_type": "attribute"}, {"api_name": "methods.Stored_Data_Details", "line_number": 64, "usage_type": "call"}, {"api_name": "methods.load_log", "line_number": 66, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 70, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 71, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 71, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 71, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 72, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 74, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 74, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 74, "usage_type": "attribute"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 76, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 76, "usage_type": "name"}, {"api_name": "google.appengine.api.users", "line_number": 77, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 84, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.template.render", "line_number": 85, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.template", "line_number": 85, "usage_type": "name"}, {"api_name": "webapp2.RequestHandler", "line_number": 88, "usage_type": "attribute"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 91, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 91, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 94, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.template.render", "line_number": 95, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.template", "line_number": 95, "usage_type": "name"}, {"api_name": "webapp2.RequestHandler", "line_number": 98, "usage_type": "attribute"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 101, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 101, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path", "line_number": 104, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 104, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.template.render", "line_number": 105, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.template", "line_number": 105, "usage_type": "name"}, {"api_name": "webapp2.RequestHandler", "line_number": 107, "usage_type": "attribute"}, {"api_name": "webapp2.WSGIApplication", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "23677249", "text": "from rest_framework.serializers import ModelSerializer\nfrom transfer.models import Transfer\nfrom user.api_rest_user.serielizer import UserSerializer\n\n\nclass TransferSerializer(ModelSerializer):\n\n user = UserSerializer()\n\n class Meta:\n model = Transfer\n fields = [\n 'user', 'pagador_nome', 'pagador_banco', 'pagador_agencia',\n 'pagador_conta', 'beneficiario_nome', 'beneficiario_banco',\n 'beneficiario_agencia', 'beneficiario_conta', 'valor'\n ]\n", "sub_path": "transfer/api_rest_transfer/serielizer.py", "file_name": "serielizer.py", "file_ext": "py", "file_size_in_byte": 504, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 6, "usage_type": "name"}, {"api_name": "user.api_rest_user.serielizer", "line_number": 8, "usage_type": "name"}, {"api_name": "user.api_rest_user.serielizer.UserSerializer", "line_number": 8, "usage_type": "call"}, {"api_name": "transfer.models.Transfer", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "489287302", "text": "\"\"\"\r\n\r\n\r\nMy friend John and I are members of the \"Fat to Fit Club (FFC)\".\r\nJohn is worried because each month a list with the weights of members is published and each month he is the last on the list which means he is the heaviest.\r\n\r\nI am the one who establishes the list so I told him:\r\n\"Don't worry any more, I will modify the order of the list\".\r\n It was decided to attribute a \"weight\" to numbers. The weight of a number will be from now on the sum of its digits.\r\n\r\nFor example 99 will have \"weight\" 18, 100 will have \"weight\" 1 so in the list 100 will come before 99.\r\n Given a string with the weights of FFC members in normal order can you give this string ordered by \"weights\" of these numbers?\r\n\r\nExample:\r\n\r\n\"56 65 74 100 99 68 86 180 90\" ordered by numbers weights becomes: \"100 180 90 56 65 74 68 86 99\"\r\n\r\nWhen two numbers have the same \"weight\",\r\n let us class them as if they were strings and not numbers: 100 is before 180 because its \"weight\" (1) is less than the one of 180 (9)\r\n and 180 is before 90 since, having the same \"weight\" (9) it comes before as a string.\r\n\r\nAll numbers in the list are positive numbers and the list can be empty.\r\n\r\n\r\n\"\"\"\r\n\r\nfrom collections import OrderedDict\r\n\r\ndef order_weight(strng):\r\n if not strng:\r\n return ''\r\n weight_list = strng.split(' ')\r\n ordered_weight_list = OrderedDict()\r\n weight_rank_list = []\r\n for index, weight in enumerate(weight_list):\r\n num_list = [int(n) for n in weight]\r\n ordered_weight_list[index] = sum(num_list)\r\n\r\n ordered_weight_list = sorted(ordered_weight_list.items(), key=lambda t: t[1])\r\n\r\n for key, value in ordered_weight_list:\r\n weight_rank_list.append(str(weight_list[key]))\r\n\r\n return ' '.join(weight_rank_list)\r\n\r\n\r\nif __name__ == '__main__':\r\n print(order_weight(\"56 65 74 100 99 68 86 180 90\"))\r\n", "sub_path": "src/codewars/python/5kyu/order_weight.py", "file_name": "order_weight.py", "file_ext": "py", "file_size_in_byte": 1843, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "collections.OrderedDict", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "233585301", "text": "from . import config\n\nimport logging\nlogger = logging.getLogger()\n\n\nsup_tags = set()\ngroup_tags = {}\n\ndef get(group):\n return list(group_tags.get(group, sup_tags))\n\ndef remove(group):\n group_tags.pop(group, None)\n\ndef get_all():\n return group_tags.copy()\n\ndef set_all():\n gtags = {}\n stags = []\n\n cfg = config.get_config()\n for section in cfg.sections():\n for name, value in cfg.items(section, raw=True):\n if name != 'tags':\n continue\n if ':' in section:\n gtags[section.rsplit(':')[1]] = parse(value)\n elif section == 'supervisord':\n stags = parse(value)\n\n # apply the supervisor tags to all the groups, sort\n for k in gtags:\n v = set(gtags[k])\n v.update(stags)\n gtags[k] = sorted(v)\n\n # replace tags\n global sup_tags, group_tags\n sup_tags = sorted(set(stags))\n group_tags = gtags\n\ndef parse(s):\n return [ t.strip() for t in s.split(',') ]\n", "sub_path": "supcast/tags.py", "file_name": "tags.py", "file_ext": "py", "file_size_in_byte": 988, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "logging.getLogger", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "124200736", "text": "# written by: Ioannis Paraskevakos\n# tested by:\n# debugged by:\nfrom pymongo import MongoClient\nfrom hashtable import hashtable\nfrom statehash import StateHash\n\nclass Validator:\n\n '''\n This class is responsible to receive the raw data, find which tweets are actually valid or trusted \n information based on the criteria the system has and then save them inside the MongoDB.\n \n ** Attributes **\n client : This is the actual connection with the MongoDB\n RawTweets : This is a list that has all the tweets that were grabbed\n more resently from Twitter\n loc_tweets : A list of the Geo Located tweets after the locator method is executed\n good_tweets : A list of the tweets that the first filter let to pass\n final_tweets : A list of the tweets that the filter filter let to pass\n keywords2 : A list with the words used as the filtering terms for the first filter.\n It is read from keywords2.txt file\n keywords3 : A list with the words used as the filtering terms for the first filter.\n It is read from keywords3.txt file\n\n ** Methods **\n __init__ : The constructor of the class must be present for\n python classes.\n\n locator : This method checks all tweets and returns only those that are geotagged or uses \n the location of the user to decide the coordinates of a tweet.\n \n data_filter2 : This method searches through all the Tweets for each word in the keyword\n list. If a keyword exists in the tweet's text and the tweet has not been\n added to the final list by a previous keyword success it is added to the\n tweets that will be returned.\n\n data_filter3 : This function searches through all the Tweets for each word in the keyword\n list. If a keyword exists in the tweet's text and the tweet has not been\n discarded from the final list by a previous keyword success it is discarded\n from the tweets that will be returned.\n\n updater : This method just updates the database with the new filtered and located data.\n '''\n\n def __init__(self):\n '''\n Class Constractor\n '''\n\n self.client = MongoClient('mongodb://localhost:27017')\n self.RawTweets = self.client.twiter_data.twitter_coll.find()\n self.loc_tweets = list()\n self.good_tweets = list()\n self.final_tweets = list()\n\n fpK2 = open('keywords2.txt','r')\n fpK3 = open('keywords3.txt','r')\n self.keywords2 = [line.strip() for line in fpK2]\n self.keywords3 = [line.strip() for line in fpK3]\n fpK2.close()\n fpK3.close()\n\n\n\n def locator(self):\n '''\n This method checks all tweets and returns only those that are geotagged or uses \n the location of the user to decide the coordinates of a tweet.\n \n Arguments:\n self.RawTweets : A list of tweets that will be selected by their location\n \n Output:\n self.loc_tweets : Tweets with coordinates. Each returned tweet has an ID, coordinates,\n the original tweet's language, the place (City,State), country_code,\n user information, timestamp in miliseconds.\n '''\n\n for tweet in self.RawTweets:\n # for all the tweets in the database if a tweet is geotagged keep the necessary information\n # and append it to the located tweets\n if tweet['geo'] != None and tweet['place'] != None:\n finalGeoLat = tweet['geo']['coordinates'][0]\n finalGeoLog = tweet['geo']['coordinates'][1]\n temp_tweet = {'_id':tweet['_id'],'coordinates':[finalGeoLat,finalGeoLog],\n 'lang':tweet['lang'],'text':tweet['text'],\n 'place':tweet['place']['full_name'],\n 'country':tweet['place']['country_code'],\n 'user':tweet['user'],'timestamp_ms':tweet['timestamp_ms']}\n #append tweet\n self.loc_tweets.append(temp_tweet)\n else:\n # Else seach for all the places that coordinates exist in the hashtable\n for place in hashtable:\n # If the user's location is found in the hash table, Geotag the tweet and\n # after changing the state to the abbreviation ppend it to the located\n # tweets\n if tweet['user']['location']==place['name']:\n finalGeoLat = place['lat']\n finalGeoLog = place['long']\n placed = False\n index = 0\n while not placed and index<50:\n #print index\n state = StateHash[index]\n \n # Take the state of the location. If the state is shown with more than two letters\n # chane it with the abbreviation.\n comma = place['name'].find(',')\n tempstate = place['name'][comma+2:]\n if tempstate.__len__() > 3 and tempstate in state['name']:\n part = place['name'].partition(',')\n new_place = part[0]\n new_place=new_place+', '+state['_id']\n placed = True\n if not placed:\n new_place = place['name']\n index=index+1\n \n temp_tweet = {'_id':tweet['_id'],'coordinates':[finalGeoLat,finalGeoLog],\n 'lang':tweet['lang'],'text':tweet['text'],\n 'place':new_place,\n 'country':'US',\n 'user':tweet['user'],'timestamp_ms':tweet['timestamp_ms']}\n \n #append tweet\n self.loc_tweets.append(temp_tweet)\n\n\n def data_filter2 (self):\n '''\n This method searches through all the Tweets for each word in the keyword\n list. If a keyword exists in the tweet's text and the tweet has not been\n added to the final list by a previous keyword success it is added to the\n tweets that will be returned.\n \n Arguments:\n self.keywords2: A list of keywords that will be used for the selection\n \n self.loc_tweets: The list with the tweets that need to be filtered.\n \n Output:\n self.good_tweets: A list with all the tweets that contained at least one \n keyword\n '''\n # For every tweet\n for tweet in self.loc_tweets:\n #Get the text of the tweet\n text = tweet['text']\n #For every keyword\n for keyword in self.keywords2:\n # If the keyword exists and the tweet has not been selected insert it in\n # the good tweets list\n if keyword in text and tweet not in self.good_tweets:\n self.good_tweets.append(tweet)\n \n \n def data_filter3(self):\n '''\n This method searches through all the Tweets for each word in the keyword\n list. If a keyword exists in the tweet's text and the tweet has not been\n discarded from the final list by a previous keyword success it is discarded\n from the tweets that will be returned.\n \n Arguments:\n self.keywords3: A list of keywords that will be used for the selection\n \n Tweets: The list with the tweets that need to be filtered.\n \n Output:\n self.good_tweets: A list with all the tweets that contained at least one \n keyword\n '''\n\n self.final_tweets = self.good_tweets\n \n # For every tweet\n for tweet in self.good_tweets:\n # Get the text of the tweet\n text = tweet['text']\n # For every keyword\n for keyword in self.keywords3:\n # If the keyword exists and the tweet has not been discarded, remove it\n # from the final tweets\n if keyword in text and tweet in self.final_tweets:\n self.final_tweets.remove(tweet)\n \n def updater(self):\n '''\n This method just updates the database with the new filtered and located data.\n '''\n\n #In case you only need the located tweets change the self.final_tweets to self.loc_tweets\n for tweet in self.loc_tweets:\n self.client.twiter_data.clean_tweets.update({'_id':tweet['_id']},tweet,True) #Target\n \n #client.twiter_data.twitter_coll.drop()\n\n\nif __name__ == \"__main__\":\n '''\n Just run the Class methods.\n '''\n \n Session = Validator()\n Session.locator()\n #Session.data_filter2()\n #Session.data_filter3()\n Session.updater()\n\n", "sub_path": "validator/Validator.py", "file_name": "Validator.py", "file_ext": "py", "file_size_in_byte": 8136, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pymongo.MongoClient", "line_number": 51, "usage_type": "call"}, {"api_name": "hashtable.hashtable", "line_number": 95, "usage_type": "name"}, {"api_name": "statehash.StateHash", "line_number": 106, "usage_type": "name"}]} +{"seq_id": "559124728", "text": "import csv\nimport os\nimport math\nimport numpy as np\nimport locale\nlocale.setlocale(locale.LC_ALL,'en_US.UTF-8')\n\nfrom datetime import datetime, timedelta\n\nDATE_BGN = 'Dec 27, 2013'\n#DATE_BGN = 'Jan 01, 2018'\nDATE_END = 'Jan 05, 2019'\n\nDATE_BGN_OBJ = datetime.strptime(DATE_BGN, '%b %d, %Y')\nDATE_BGN_STR = DATE_BGN_OBJ.strftime('%Y%m%d')\n\nDATE_END_OBJ = datetime.strptime(DATE_END, '%b %d, %Y')\nDATE_END_STR = DATE_END_OBJ.strftime('%Y%m%d')\n\nrows_wr = list()\ndate_prev = DATE_END_OBJ\nwhile date_prev >= DATE_BGN_OBJ:\n date_prev_str = date_prev.strftime('%b %d, %Y')\n print(date_prev_str)\n row = dict()\n row['Date'] = date_prev_str\n row['Gini1'] = 0\n row['Gini2'] = 0\n row['GiniCheck'] = 0\n row['VolCheck'] = 0\n row['VolTotal1'] = 0\n row['VolTotal2'] = 0\n rows_wr.append(row)\n date_prev += timedelta(-1)\n\ndef get_idx(rows, date_str):\n mylen = len(list(row for row in rows if row['Date'] == date_str))\n if mylen <=0:#NOT EXIST\n return -1\n elif mylen == 1:\n j = next(j for j, row in enumerate(rows) if row['Date'] == date_str)\n #print(\"j=\"+str(j))\n return j\n else:\n raise(\"get_idx(): mylen > 1\")\n\ncsv.register_dialect(\n 'mydialect',\n delimiter = ',',\n quotechar = '\"',\n doublequote = True,\n skipinitialspace = True,\n lineterminator = '\\r\\n',\n quoting = csv.QUOTE_MINIMAL)\n\nf = open(\"./work/fluidity_total.csv\",'r')\nreader = csv.DictReader(f, dialect='mydialect')\n#Date,Volume,MarketCap\nrows_rd = list()\nfor row in reader:\n rows_rd.append(row)\n\ndef add_column_matched(rows_wr, rows_rd, keys, keyt, keynew):\n for i,row in enumerate(rows_wr):\n j = get_idx(rows_rd, row[keys])\n if j < 0: \n rows_wr[i][keynew] = 0\n else: \n rows_wr[i][keynew] = locale.atof(rows_rd[j][keyt]) if rows_rd[j][keyt] != '-' else 0\n\nadd_column_matched(rows_wr, rows_rd, 'Date', 'Volume', 'VolTotal1')\n\n\nheaders = ('Date', 'Gini1', 'Gini2', 'GiniCheck', 'VolCheck', 'VolTotal1', 'VolTotal2')\npath = \"./history\" \nfiles= os.listdir(path) \nfor f in files: \n #if file == 'bitcoin.csv': continue\n #if f == 'paxex.csv': break #DEBUG\n print(f)\n cname = f.rstrip('csv').rstrip('\\.') \n headers += (cname,)\n\n fobj = open(path+\"/\"+f,'r')\n reader = csv.DictReader(fobj, dialect='mydialect')\n \n #load coin xxx history data to rows(list of dict)\n #'High','Volume','MarketCap','HighInBTC']\n rows_rd = list()\n for row in reader:\n rows_rd.append(row)\n\n for i,row in enumerate(rows_wr):\n j = get_idx(rows_rd, row['Date'])\n if j < 0:#NOT FOUND \n v = 0\n else:\n v = locale.atof(rows_rd[j]['Volume']) if rows_rd[j]['Volume'] != '-' else 0\n rows_wr[i]['VolTotal2'] += v\n rows_wr[i][cname] = v\n\nfname = './debug/fluidity_gini_'+DATE_BGN_STR+'_'+DATE_END_STR+'.csv'\nwith open(fname,'w') as f:\n writer = csv.DictWriter(f, dialect='mydialect',fieldnames=headers)\n writer.writeheader()\n writer.writerows(rows_wr)\n\nf = open(fname,'r')\nreader = csv.DictReader(f, dialect='mydialect')\n#Date,Volume,MarketCap\nrows_rd = list()\nfor row in reader:\n rows_rd.append(row)\n\ndef gini_coef1(wealths):\n cum_wealths = np.cumsum(sorted(np.append(wealths, 0)))\n sum_wealths = cum_wealths[-1]\n xarray = np.array(range(0, len(cum_wealths))) / np.float(len(cum_wealths)-1)\n yarray = cum_wealths / sum_wealths\n B = np.trapz(yarray, x=xarray)\n A = 0.5 - B\n return A / (A+B)\n\n#def gini_coef1(wealths):\n# Y = sorted(wealths)\n# N = len(Y)\n# g = 0\n# for k, item in enumerate(Y):\n# g += (2*k-N-1)*item\n# return g/((N-1)*sum(Y))\n\ndef gini_coef2(wealths):\n Y = sorted(wealths)\n sumY = sum(Y)\n y = list()\n for item in Y:\n y.append(item/sumY)\n N = len(y)\n mu = 0\n for k, item in enumerate(y):\n mu += (k+1)*item\n return 2*mu/N-(N+1)/N\n\n\nfor i,row in enumerate(rows_wr):\n j = get_idx(rows_rd, row['Date'])\n if j < 0: continue #NOT FOUND \n YY = list()\n for key, value in rows_rd[j].items():\n if key in ('Date','Gini1','Gini2','GiniCheck','VolCheck','VolTotal1','VolTotal2'): continue \n if value == 0: continue\n YY.append(float(value))\n if sum(YY) == 0:\n print('Date:'+row['Date']+' j='+str(j))\n continue\n #Y = sorted(YY)\n #sumY = sum(Y)\n #y = list()\n #for item in Y:\n # y.append(item/sumY)\n #N = len(y)\n #mu = 0\n #for k, item in enumerate(y):\n # mu += (k+1)*item\n #rows_wr[i]['Gini2'] = 2*mu/N-(N+1)/N\n\n rows_wr[i]['Gini1'] = gini_coef1(YY)\n rows_wr[i]['Gini2'] = gini_coef2(YY)\n rows_wr[i]['GiniCheck'] = rows_wr[i]['Gini1'] - rows_wr[i]['Gini2']\n\n rows_wr[i]['VolCheck'] = rows_wr[i]['VolTotal1'] - rows_wr[i]['VolTotal2']\n\n \nfname = './debug/fluidity_gini_'+DATE_BGN_STR+'_'+DATE_END_STR+'.csv'\nwith open(fname,'w') as f:\n writer = csv.DictWriter(f, dialect='mydialect',fieldnames=headers)\n writer.writeheader()\n writer.writerows(rows_wr)\n\ntuples = list()\nfor row in rows_wr:\n tuples.append((row['Date'],row['Gini1'],row['Gini2'],row['GiniCheck'],row['VolCheck'],row['VolTotal1'],row['VolTotal2']))\n\nfname = './work/fluidity_gini_'+DATE_BGN_STR+'_'+DATE_END_STR+'.csv'\nwith open(fname,'w') as f:\n headers = ('Date', 'Gini1', 'Gini2', 'GiniCheck', 'VolCheck', 'VolTotal1', 'VolTotal2')\n writer = csv.writer(f)\n writer.writerow(headers)\n writer.writerows(tuples)\n", "sub_path": "python/cmc/cmc_parse_fluidity_gini.py", "file_name": "cmc_parse_fluidity_gini.py", "file_ext": "py", "file_size_in_byte": 5444, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "locale.setlocale", "line_number": 6, "usage_type": "call"}, {"api_name": "locale.LC_ALL", "line_number": 6, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 14, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 17, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 34, "usage_type": "call"}, {"api_name": "csv.register_dialect", "line_number": 47, "usage_type": "call"}, {"api_name": "csv.QUOTE_MINIMAL", "line_number": 54, "usage_type": "attribute"}, {"api_name": "csv.DictReader", "line_number": 57, "usage_type": "call"}, {"api_name": "locale.atof", "line_number": 69, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 76, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 85, "usage_type": "call"}, {"api_name": "locale.atof", "line_number": 98, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 104, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.trapz", "line_number": 120, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 176, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 187, "usage_type": "call"}]} +{"seq_id": "529821968", "text": "from threading import *\nimport time\n\nimport redis\nfrom tenacity import *\nimport wx\n\nEVT_REDIS_CONN_ID = wx.NewId()\n\ndef EVT_REDIS_CONN(win, func):\n win.Connect(-1, -1, EVT_REDIS_CONN_ID, func)\n\nclass RedisConnectEvent(wx.PyEvent):\n def __init__(self, connection):\n super().__init__()\n self.SetEventType(EVT_REDIS_CONN_ID)\n self.connection = connection\n\nclass RedisConnectThread(Thread):\n def __init__(self, config, notify):\n super().__init__()\n self.notify = notify\n\n self.conn = redis.Redis(\n host=config['host'],\n port=config['port'],\n db=config['database']\n )\n\n self.start()\n\n @retry(wait=wait_fixed(2))\n def run(self):\n # Attempt to establish the connection.\n self.conn.ping()\n # Post an event to notify that the connection was successful.\n wx.PostEvent(self.notify, RedisConnectEvent(self.conn))", "sub_path": "threads.py", "file_name": "threads.py", "file_ext": "py", "file_size_in_byte": 932, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "wx.NewId", "line_number": 8, "usage_type": "call"}, {"api_name": "wx.PyEvent", "line_number": 13, "usage_type": "attribute"}, {"api_name": "redis.Redis", "line_number": 24, "usage_type": "call"}, {"api_name": "wx.PostEvent", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "278079684", "text": "import logging\nimport serial\nfrom PySide2.QtCore import QIODevice\nfrom PySide2.QtSerialPort import QSerialPort, QSerialPortInfo\n\nfrom .common import PortInfo\n\n\nclass QtSerial:\n def __init__(self):\n self.serial = None\n self.pyserial = None\n self.port_info = None\n\n @staticmethod\n def get_devices():\n info_list = QSerialPortInfo()\n ports = info_list.availablePorts()\n ports_info = []\n for port in ports:\n port_info = PortInfo(port, port.vendorIdentifier(), port.productIdentifier())\n ports_info.append(port_info)\n\n return ports_info\n\n def connect(self, port_obj, timeout):\n # didn't work\n # self.serial = QSerialPort(port_obj)\n # self.serial.setBaudRate(QSerialPort.Baud115200)\n # return self.serial.open(QIODevice.ReadWrite):\n\n logging.info('Connecting to port:', port_obj.systemLocation())\n try:\n self.pyserial = serial.Serial(port_obj.systemLocation(), baudrate=115200, timeout=timeout)\n except (FileNotFoundError, OSError) as exc:\n logging.debug('Connecting error', exc_info=True)\n return False\n\n return bool(self.pyserial)\n\n def close(self):\n if self.pyserial:\n self.pyserial.close()\n\n def clear(self):\n if not self.pyserial:\n return\n\n self.pyserial.reset_input_buffer()\n self.pyserial.reset_output_buffer()\n\n def read(self, size: int) -> bytes:\n # didn't work\n # data = bytes()\n # data = self.serial.readData(size)\n # return data\n\n return self.pyserial.read(size)\n\n def write(self, data: bytes):\n # didn't work\n # self.serial.writeData(data, len(data))\n\n self.pyserial.write(data)\n\n", "sub_path": "connect/interfaces/qt_serial.py", "file_name": "qt_serial.py", "file_ext": "py", "file_size_in_byte": 1783, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "PySide2.QtSerialPort.QSerialPortInfo", "line_number": 17, "usage_type": "call"}, {"api_name": "common.PortInfo", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 32, "usage_type": "call"}, {"api_name": "serial.Serial", "line_number": 34, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "372242029", "text": "from bs4 import BeautifulSoup\nimport requests\nimport re\n\npage = requests.get(\"http://www.soccervista.com/bet.php\",timeout=10)\ntry:\n page.raise_for_status()\nexcept Exception as error:\n print('There was a problem getting cash1 data: %s' % error)\nvista = BeautifulSoup(page.text, 'html.parser')\nif type(vista) == BeautifulSoup:\n if \"blocked\" in page.text:\n print (\"we've been blocked\")\n## tr:nth-child(even)\n games = vista.select(\"tr:nth-child(even)\")\n games_num = len(games)\n for each in games:\n print(each.getText()+\"\\n\")\nelse:\n print(\"not bs4 obj\")\n", "sub_path": "soccervista.py", "file_name": "soccervista.py", "file_ext": "py", "file_size_in_byte": 587, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "requests.get", "line_number": 5, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 10, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "191924391", "text": "r\"\"\"\n\nRepeatedly acquire and self-label confident points -- permanently added into the training set.\n\n2 variants: 1) in conjunction with active learning\n 2) w/o active learning\n\"\"\"\n\nfrom alr.utils import manual_seed, eval_fwd_exp, timeop\nfrom alr.acquisition import BALD\nfrom alr import MCDropout\nfrom alr.data.datasets import Dataset\nfrom alr.training.samplers import RandomFixedLengthSampler\nfrom alr.data import UnlabelledDataset, DataManager\nfrom alr.training import Trainer\nfrom alr.training.repeated_acquisition_utils import (\n get_confident_indices,\n RelabelledDataset,\n)\n\nimport torch\nimport torch.utils.data as torchdata\nimport pickle\nfrom torch.nn import functional as F\nfrom pathlib import Path\n\n\ndef main(use_al, b, threshold, log_every):\n manual_seed(42)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n kwargs = dict(num_workers=4, pin_memory=True)\n\n # --- constants ---\n BATCH_SIZE = 64\n EPOCHS = 200\n REPS = 6\n ITERS = 24\n VAL_SIZE = 5_000\n MIN_TRAIN_LEN = 12_500\n\n # --- setup ---\n train, pool, test = Dataset.MNIST.get_fixed()\n val, pool = torchdata.random_split(pool, (VAL_SIZE, len(pool) - VAL_SIZE))\n pool = UnlabelledDataset(pool)\n model = MCDropout(Dataset.MNIST.model, forward=20, fast=True).to(device)\n bald = BALD(eval_fwd_exp(model), device=device, batch_size=1024, **kwargs)\n dm = DataManager(train, pool, bald)\n val_loader = torchdata.DataLoader(\n val,\n batch_size=1024,\n shuffle=False,\n **kwargs,\n )\n test_loader = torchdata.DataLoader(\n test,\n batch_size=1024,\n shuffle=False,\n **kwargs,\n )\n\n accs = []\n template = f\"{'al' if use_al else 'no_al'}_b={b}_thresh={threshold}\"\n pl_metrics = Path(\"pl_metrics\") / template\n metrics = Path(\"metrics\") / template\n saved_models = Path(\"saved_models\") / template\n metrics.mkdir(parents=True)\n saved_models.mkdir(parents=True)\n\n for r in range(1, REPS + 1):\n print(f\"- Repeat {r} of {REPS} -\")\n dm.reset()\n accs_r = {}\n for i in range(1, ITERS + 1):\n print(f\"=== Iteration {i} of {ITERS} ({i / ITERS:.2%}) ===\")\n print(\n f\"\\ttrain: {dm.n_labelled}; \"\n f\"pool: {dm.n_unlabelled}; \"\n f\"val: {len(val)}; \"\n f\"test: {len(test)}\"\n )\n model.reset_weights()\n\n # -- stage 1: train --\n trainer = Trainer(\n model, F.nll_loss, \"Adam\", patience=3, reload_best=True, device=device\n )\n train_loader = torchdata.DataLoader(\n dm.labelled,\n batch_size=BATCH_SIZE,\n sampler=RandomFixedLengthSampler(\n dm.labelled, MIN_TRAIN_LEN, shuffle=True\n ),\n **kwargs,\n )\n with timeop() as t:\n history = trainer.fit(train_loader, val_loader, epochs=EPOCHS)\n\n test_metrics = trainer.evaluate(test_loader)\n accs_r[dm.n_labelled] = test_metrics[\"acc\"]\n\n print(\n f\"\\t[test] loss, acc: ({test_metrics['loss']:.4f}, {test_metrics['acc']:.4f}); time: {t}\"\n )\n\n with open(\n metrics / f\"repeat_{r}_dsize_{dm.n_labelled}_metrics.pkl\", \"wb\"\n ) as fp:\n payload = {\n \"history\": history,\n \"test_metrics\": test_metrics,\n }\n pickle.dump(payload, fp)\n\n if (i - 1) % log_every == 0:\n torch.save(\n model.state_dict(),\n saved_models / f\"repeat_{r}_dsize_{dm.n_labelled}_weights.pth\",\n )\n\n # skip if this is the last iteration\n if i == ITERS:\n continue\n\n # -- stage 2: acquire more data into the training set --\n\n # -- stage 2.1: acquire using AL acquisition function --\n if use_al:\n dm.acquire(b)\n\n # -- stage 2.2: acquire using pseudo-labels --\n pool.debug = True # to get the true labels from the unlabelled pool (for evaluation purposes)\n idxs, plabs = get_confident_indices(\n model=model,\n dataset=dm.unlabelled,\n threshold=threshold,\n root=(pl_metrics / f\"repeat_{r}\"),\n step=i,\n device=device,\n **kwargs,\n )\n pool.debug = False\n\n if idxs.shape[0]:\n n_unlabelled_before = dm.n_unlabelled\n # remove these from the unlabelled pool\n truth = pool.label(idxs)\n # sanity check\n assert dm.n_unlabelled == (n_unlabelled_before - idxs.shape[0])\n\n # replace true labels with pseudo-labels\n relabelled_dataset = RelabelledDataset(truth, plabs)\n assert len(relabelled_dataset) == idxs.shape[0]\n\n # add to the labelled pool\n n_labelled_before = dm.n_labelled\n dm.append_to_labelled(relabelled_dataset)\n assert dm.n_labelled == (n_labelled_before + idxs.shape[0])\n else:\n print(\n f\"\\tSelf-labelling didn't happen because none of the pseudo-labels are confident enough.\"\n )\n accs.append(accs_r)\n with open(f\"{template}_accs.pkl\", \"wb\") as fp:\n pickle.dump(accs, fp)\n\n\nif __name__ == \"__main__\":\n main(use_al=True, b=10, threshold=0.9, log_every=2)\n main(use_al=False, b=10, threshold=0.9, log_every=2)\n", "sub_path": "experiments/thesis/vanilla_repeated_acquisition/mnist/permanent/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 5697, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "alr.utils.manual_seed", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 30, "usage_type": "attribute"}, {"api_name": "alr.data.datasets.Dataset.MNIST.get_fixed", "line_number": 42, "usage_type": "call"}, {"api_name": "alr.data.datasets.Dataset.MNIST", "line_number": 42, "usage_type": "attribute"}, {"api_name": "alr.data.datasets.Dataset", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.utils.data.random_split", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 43, "usage_type": "name"}, {"api_name": "alr.data.UnlabelledDataset", "line_number": 44, "usage_type": "call"}, {"api_name": "alr.MCDropout", "line_number": 45, "usage_type": "call"}, {"api_name": "alr.data.datasets.Dataset.MNIST", "line_number": 45, "usage_type": "attribute"}, {"api_name": "alr.data.datasets.Dataset", "line_number": 45, "usage_type": "name"}, {"api_name": "alr.acquisition.BALD", "line_number": 46, "usage_type": "call"}, {"api_name": "alr.utils.eval_fwd_exp", "line_number": 46, "usage_type": "call"}, {"api_name": "alr.data.DataManager", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 54, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 63, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 64, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 65, "usage_type": "call"}, {"api_name": "alr.training.Trainer", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.nn.functional.nll_loss", "line_number": 85, "usage_type": "attribute"}, {"api_name": "torch.nn.functional", "line_number": 85, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 87, "usage_type": "name"}, {"api_name": "alr.training.samplers.RandomFixedLengthSampler", "line_number": 90, "usage_type": "call"}, {"api_name": "alr.utils.timeop", "line_number": 95, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 115, "usage_type": "call"}, {"api_name": "alr.training.repeated_acquisition_utils.get_confident_indices", "line_number": 132, "usage_type": "call"}, {"api_name": "alr.training.repeated_acquisition_utils.RelabelledDataset", "line_number": 151, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 164, "usage_type": "call"}]} +{"seq_id": "310123407", "text": "from selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nimport time\r\nimport os\r\n\r\nclass AutoDM:\r\n \r\n def __init__(self):\r\n script_path = os.path.abspath(__file__) \r\n path_list = script_path.split(os.sep)\r\n script_directory = path_list[0:len(path_list)-1]\r\n rel_path1 = \"demofile.txt\"\r\n rel_path2 = \"chromedriver.exe\"\r\n path2 = \"/\".join(script_directory) + \"/\" + rel_path2\r\n path1 = \"/\".join(script_directory) + \"/\" + rel_path1\r\n i = 0\r\n arr = []\r\n f = open(path1, \"r\")\r\n line = f.readline()\r\n arr.append(line)\r\n for line in f:\r\n arr.append(line)\r\n self.bot = webdriver.Chrome(path2)\r\n bot = self.bot\r\n bot.get(\"https://www.instagram.com/\")\r\n time.sleep(1)\r\n username = bot.find_element_by_name(\"username\")\r\n username.click()\r\n username.send_keys('username/email')\r\n password = bot.find_element_by_name(\"password\")\r\n password.click()\r\n password.send_keys(\"password\")\r\n bot.find_element_by_xpath(\"//*[@id='react-root']/section/main/article/div[2]/div[1]/div/form/div[4]/button/div\").click()\r\n time.sleep(3)\r\n bot.find_element_by_class_name(\"xWeGp\").click()\r\n time.sleep(1)\r\n bot.find_element_by_xpath(\"/html/body/div[4]/div/div/div/div[3]/button[2]\").click()\r\n time.sleep(1)\r\n # dm button is the button to search once you are in the DM, step1\r\n dmButton = bot.find_element_by_xpath(\"//*[@id='react-root']/section/div/div[2]/div/div/div[1]/div[1]/div/div[3]/button\")\r\n dmButton.click()\r\n # search is the search box when looking to DM a person, step2\r\n search = bot.find_element_by_name('queryBox')\r\n search.send_keys(arr[i])\r\n time.sleep(2)\r\n # target is the person you are clicking on, the actual string is in the array, step3\r\n target = bot.find_element_by_css_selector(\"body > div.RnEpo.Yx5HN > div > div > div.Igw0E.IwRSH.eGOV_.vwCYk.i0EQd > div.Igw0E.IwRSH.eGOV_.vwCYk._3wFWr > div > div > div.Igw0E.rBNOH.YBx95.ybXk5._4EzTm.soMvl > button > span\")\r\n target.click()\r\n # nextButton is to go into the DM box to start typing, step4\r\n nextButton = bot.find_element_by_xpath(\"/html/body/div[4]/div/div/div[1]/div/div[2]/div/button\")\r\n nextButton.click()\r\n time.sleep(2)\r\n # messageButton is so you can write a message, step5\r\n messageButton = bot.find_element_by_xpath(\"//*[@id='react-root']/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[2]/textarea\")\r\n messageButton.click()\r\n messageButton.send_keys(\"Insert message\")\r\n # send button is the button that sends the message inside the DM, step6\r\n sendButton = bot.find_element_by_xpath(\"//*[@id='react-root']/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[3]/button\")\r\n sendButton.click()\r\n i = i + 1\r\n while i < len(arr):\r\n step1 = bot.find_element_by_xpath(\"//*[@id='react-root']/section/div/div[2]/div/div/div[1]/div[1]/div/div[3]/button\")\r\n step1.click()\r\n step2 = bot.find_element_by_name(\"queryBox\")\r\n step2.send_keys(arr[i])\r\n step2.click()\r\n time.sleep(2)\r\n step3 = bot.find_element_by_css_selector(\"body > div.RnEpo.Yx5HN > div > div > div.Igw0E.IwRSH.eGOV_.vwCYk.i0EQd > div.Igw0E.IwRSH.eGOV_.vwCYk._3wFWr > div > div > div.Igw0E.rBNOH.YBx95.ybXk5._4EzTm.soMvl > button > span\")\r\n step3.click()\r\n step4 = bot.find_element_by_xpath(\"/html/body/div[4]/div/div/div[1]/div/div[2]/div/button\")\r\n step4.click()\r\n time.sleep(2)\r\n step5 = bot.find_element_by_xpath(\"//*[@id='react-root']/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[2]/textarea\")\r\n step5.click()\r\n step5.send_keys(\"Insert Message\")\r\n step6 = bot.find_element_by_xpath(\"//*[@id='react-root']/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[3]/button\")\r\n step6.click()\r\n i = i + 1\r\n \r\nexample = AutoDM()\r\n", "sub_path": "DMBot.py", "file_name": "DMBot.py", "file_ext": "py", "file_size_in_byte": 4192, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "os.path.abspath", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 10, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 23, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 23, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 26, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 34, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 36, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 38, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 45, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 52, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 67, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "525870598", "text": "#!/bin/python3\n\n#############################\n# Collections.OrderedDict() #\n#############################\n\nfrom collections import OrderedDict\n\nif __name__ == '__main__':\n d = OrderedDict()\n for _ in range(int(input())):\n key, price = (input().rsplit(maxsplit=1))\n d[key] = d[key] + int(price) if key in d else int(price)\n for key in d:\n print(\"{} {}\".format(key, d[key]))", "sub_path": "Python/Collections/py-collections-ordereddict.py", "file_name": "py-collections-ordereddict.py", "file_ext": "py", "file_size_in_byte": 402, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "collections.OrderedDict", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "479867675", "text": "from django.db.models.expressions import (\n Col as _Col,\n)\n\n\nclass CompositePrimaryKeyColumn(_Col):\n def as_sql(self, compiler, connection):\n sqls = []\n params = []\n target: _CPK = self.target\n for field in target.fields:\n col = field.col(self.alias)\n sql, param = col.as_sql(compiler, connection)\n sqls.append(sql)\n params.extend(param)\n final_sql = '(' + ','.join(sqls) + ')'\n return final_sql, params\n\n\nfrom .fields import ( # noqa: E402\n CompositePrimaryKey as _CPK,\n)\n", "sub_path": "composite_pk/expressions.py", "file_name": "expressions.py", "file_ext": "py", "file_size_in_byte": 569, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "django.db.models.expressions.Col", "line_number": 6, "usage_type": "name"}]} +{"seq_id": "273893001", "text": "import os\nimport sys\nimport logging\nimport servicemanager\ntry:\n servicemanager.LogInfoMsg(os.getcwd())\n servicemanager.LogInfoMsg(__file__)\n servicemanager.LogInfoMsg(sys.argv[0])\nexcept:\n pass\nclass ServiceEventLogHandler(logging.Handler):\n \"\"\"Dispatches logging events to the win32 services event log.\n Requires pywin32. \n \"\"\"\n import servicemanager\n def emit(self, record):\n \"\"\"Emit a record.\n If a formatter is specified, it is used to format the record.\n This record is then written to the win32 services event log,\n with the type set to the appropriate type based on the level.\n \"\"\"\n try:\n servicemgr = self.servicemanager\n level = record.levelno\n msg = self.format(record)\n if level >= logging.ERROR:\n servicemgr.LogErrorMsg(msg)\n elif level >= logging.WARNING:\n servicemgr.LogWarningMsg(msg)\n elif level >= logging.INFO:\n servicemgr.LogInfoMsg(msg)\n elif level >= logging.DEBUG:\n pass\n else:\n pass\n except:\n self.handleError(record)\n def handleError(self, record):\n \"\"\"\n Handle errors which occur during an emit() call.\n sys.stderr does nowwhere, so redirect this into the event log, too.\n \"\"\"\n if raiseExceptions:\n try:\n import io as StringIO\n except ImportError:\n import io\n import traceback\n ei = sys.exc_info()\n msg = io.StringIO()\n traceback.print_exception(ei[0], ei[1], ei[2], None, msg)\n msg.seek(0)\n self.servicemanager.LogErrorMsg(msg)\n del ei\nclass ServiceEventLogHandlerWrapper(object):\n \"\"\"Pretend that the ServiceEventLogHandler is a file-like object,\n so we can use it while we don't use the proper logging module.\"\"\"\n def __init__(self, service_name, level=logging.INFO):\n self.log = ServiceEventLogHandler()\n self.name = service_name\n self.level = level\n self.data = \"\"\n def write(self, data):\n self.data += data\n if '\\n' not in data:\n return\n if not self.data.strip():\n return\n record = logging.LogRecord(self.name, self.level, \"\", \"\",\n self.data, None, None)\n self.log.emit(record)\n self.data = \"\"\nimport win32api\ntry:\n win32api.GetConsoleTitle()\nexcept win32api.error:\n if hasattr(sys, \"frozen\"):\n sys.stdout = ServiceEventLogHandlerWrapper(\"pop3proxy\")\n sys.stderr = ServiceEventLogHandlerWrapper(\"pop3proxy\",\n logging.ERROR)\n else:\n import win32traceutil\nif not hasattr(sys, \"frozen\"):\n this_filename = __file__\n sb_dir = os.path.dirname(os.path.dirname(this_filename))\n sb_scripts_dir = os.path.join(sb_dir,\"scripts\")\n sys.path.insert(0, sb_dir)\n sys.path.insert(-1, sb_scripts_dir)\n if os.path.exists(os.path.join(sb_dir, \"SpamBayesData\")):\n os.chdir(os.path.join(sb_dir, \"SpamBayesData\"))\n else:\n os.chdir(sb_dir)\n from win32com.shell import shell, shellcon\n sys32path = shell.SHGetFolderPath(0, shellcon.CSIDL_SYSTEM, 0, 0)\n for path in sys.path[:-1]:\n if path == sys32path:\n sys.path.remove(path)\n assert path not in sys.path, \\\n \"Please remove multiple copies of windows\\system32 in path\"\n sys.path.append(path) # put it at the *end*\n del sys32path\n del shell\n del shellcon\n del path\nimport traceback\nimport threading\nimport io\nimport sb_server\nimport win32serviceutil, win32service\nimport pywintypes, win32con, winerror\nfrom ntsecuritycon import *\nclass Service(win32serviceutil.ServiceFramework):\n _svc_name_ = \"pop3proxy\"\n _svc_display_name_ = \"SpamBayes Service\"\n _svc_deps_ = ['tcpip'] # We depend on the tcpip service.\n def __init__(self, args):\n win32serviceutil.ServiceFramework.__init__(self, args)\n self.event_stopped = threading.Event()\n self.event_stopping = threading.Event()\n self.thread = None\n def SvcStop(self):\n self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)\n self.event_stopping.set()\n sb_server.stop()\n def SvcDoRun(self):\n import servicemanager\n try:\n sb_server.prepare(can_stop=False)\n except sb_server.AlreadyRunningException:\n msg = \"The SpamBayes proxy service could not be started, as \"\\\n \"another SpamBayes server is already running on this machine\"\n servicemanager.LogErrorMsg(msg)\n errCode = winerror.ERROR_SERVICE_SPECIFIC_ERROR\n self.ReportServiceStatus(win32service.SERVICE_STOPPED,\n win32ExitCode=errCode, svcExitCode=1)\n return\n assert not sb_server.state.launchUI, \"Service can't launch a UI\"\n thread = threading.Thread(target=self.ServerThread)\n thread.start()\n from spambayes.Options import optionsPathname\n extra = \" as user '%s', using config file '%s'\" \\\n % (win32api.GetUserName(),\n optionsPathname)\n servicemanager.LogMsg(\n servicemanager.EVENTLOG_INFORMATION_TYPE,\n servicemanager.PYS_SERVICE_STARTED,\n (self._svc_name_, extra)\n )\n try:\n self.event_stopping.wait()\n for i in range(60):\n self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)\n self.event_stopped.wait(1)\n if self.event_stopped.isSet():\n break\n print(\"The service is still shutting down...\")\n else:\n print(\"The worker failed to stop - aborting it anyway\")\n except KeyboardInterrupt:\n pass\n s = sb_server.state\n status = \" after %d sessions (%d ham, %d spam, %d unsure)\" % \\\n (s.totalSessions, s.numHams, s.numSpams, s.numUnsure)\n servicemanager.LogMsg(\n servicemanager.EVENTLOG_INFORMATION_TYPE,\n servicemanager.PYS_SERVICE_STOPPED,\n (self._svc_name_, status)\n )\n def ServerThread(self):\n try:\n try:\n sb_server.start()\n except SystemExit:\n print(\"pop3proxy service shutting down due to user request\")\n except:\n ob = io.StringIO()\n traceback.print_exc(file=ob)\n message = \"The pop3proxy service failed with an \" \\\n \"unexpected error\\r\\n\\r\\n\" + ob.getvalue()\n print(message)\n import servicemanager\n servicemanager.LogErrorMsg(message)\n finally:\n self.event_stopping.set()\n self.event_stopped.set()\nif __name__=='__main__':\n if \"install\" in sys.argv:\n from spambayes.Options import optionsPathname\n if not os.path.exists(optionsPathname):\n data_directory = os.path.join(os.path.dirname(sys.argv[0]),\n \"..\", \"SpamBayesData\")\n data_directory = os.path.abspath(data_directory)\n if not os.path.exists(data_directory):\n print(\"Creating data directory at\", data_directory)\n os.makedirs(data_directory)\n win32serviceutil.HandleCommandLine(Service)\n", "sub_path": "SpamBayes/rev3250-3267/right-branch-3267/windows/pop3proxy_service.py", "file_name": "pop3proxy_service.py", "file_ext": "py", "file_size_in_byte": 7552, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "servicemanager.LogInfoMsg", "line_number": 6, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 6, "usage_type": "call"}, {"api_name": "servicemanager.LogInfoMsg", "line_number": 7, "usage_type": "call"}, {"api_name": "servicemanager.LogInfoMsg", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 8, "usage_type": "attribute"}, {"api_name": "logging.Handler", "line_number": 11, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 26, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 28, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 30, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 32, "usage_type": "attribute"}, {"api_name": "sys.exc_info", "line_number": 49, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 50, "usage_type": "call"}, {"api_name": "traceback.print_exception", "line_number": 51, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 58, "usage_type": "attribute"}, {"api_name": "logging.LogRecord", "line_number": 69, "usage_type": "call"}, {"api_name": "win32api.GetConsoleTitle", "line_number": 75, "usage_type": "call"}, {"api_name": "win32api.error", "line_number": 76, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 78, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 79, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 80, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 87, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 88, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 92, "usage_type": "call"}, {"api_name": "win32com.shell.shell.SHGetFolderPath", "line_number": 94, "usage_type": "call"}, {"api_name": "win32com.shell.shell", "line_number": 94, "usage_type": "name"}, {"api_name": "win32com.shell.shellcon.CSIDL_SYSTEM", "line_number": 94, "usage_type": "attribute"}, {"api_name": "win32com.shell.shellcon", "line_number": 94, "usage_type": "name"}, {"api_name": "sys.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "sys.path.remove", "line_number": 97, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 97, "usage_type": "attribute"}, {"api_name": "sys.path", "line_number": 98, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 100, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 100, "usage_type": "attribute"}, {"api_name": "win32com.shell.shell", "line_number": 102, "usage_type": "name"}, {"api_name": "win32com.shell.shellcon", "line_number": 103, "usage_type": "name"}, {"api_name": "win32serviceutil.ServiceFramework", "line_number": 112, "usage_type": "attribute"}, {"api_name": "win32serviceutil.ServiceFramework.__init__", "line_number": 117, "usage_type": "call"}, {"api_name": "win32serviceutil.ServiceFramework", "line_number": 117, "usage_type": "attribute"}, {"api_name": "threading.Event", "line_number": 118, "usage_type": "call"}, {"api_name": "threading.Event", "line_number": 119, "usage_type": "call"}, {"api_name": "win32service.SERVICE_STOP_PENDING", "line_number": 122, "usage_type": "attribute"}, {"api_name": "sb_server.stop", "line_number": 124, "usage_type": "call"}, {"api_name": "sb_server.prepare", "line_number": 128, "usage_type": "call"}, {"api_name": "sb_server.AlreadyRunningException", "line_number": 129, "usage_type": "attribute"}, {"api_name": "servicemanager.LogErrorMsg", "line_number": 132, "usage_type": "call"}, {"api_name": "winerror.ERROR_SERVICE_SPECIFIC_ERROR", "line_number": 133, "usage_type": "attribute"}, {"api_name": "win32service.SERVICE_STOPPED", "line_number": 134, "usage_type": "attribute"}, {"api_name": "sb_server.state", "line_number": 137, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 138, "usage_type": "call"}, {"api_name": "win32api.GetUserName", "line_number": 142, "usage_type": "call"}, {"api_name": "spambayes.Options.optionsPathname", "line_number": 143, "usage_type": "name"}, {"api_name": "servicemanager.LogMsg", "line_number": 144, "usage_type": "call"}, {"api_name": "servicemanager.EVENTLOG_INFORMATION_TYPE", "line_number": 145, "usage_type": "attribute"}, {"api_name": "servicemanager.PYS_SERVICE_STARTED", "line_number": 146, "usage_type": "attribute"}, {"api_name": "win32service.SERVICE_STOP_PENDING", "line_number": 152, "usage_type": "attribute"}, {"api_name": "sb_server.state", "line_number": 161, "usage_type": "attribute"}, {"api_name": "servicemanager.LogMsg", "line_number": 164, "usage_type": "call"}, {"api_name": "servicemanager.EVENTLOG_INFORMATION_TYPE", "line_number": 165, "usage_type": "attribute"}, {"api_name": "servicemanager.PYS_SERVICE_STOPPED", "line_number": 166, "usage_type": "attribute"}, {"api_name": "sb_server.start", "line_number": 172, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 176, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 177, "usage_type": "call"}, {"api_name": "servicemanager.LogErrorMsg", "line_number": 182, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 187, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 189, "usage_type": "call"}, {"api_name": "spambayes.Options.optionsPathname", "line_number": 189, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 189, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 190, "usage_type": "call"}, {"api_name": "os.path", "line_number": 190, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 190, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 190, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 192, "usage_type": "call"}, {"api_name": "os.path", "line_number": 192, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 193, "usage_type": "call"}, {"api_name": "os.path", "line_number": 193, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 195, "usage_type": "call"}, {"api_name": "win32serviceutil.HandleCommandLine", "line_number": 196, "usage_type": "call"}]} +{"seq_id": "169034721", "text": "# coding=utf-8\nimport os\nimport sys\nsys.path.append('../')\nimport logging\nfrom sklearn.externals import joblib\nfrom datetime import datetime, date\nimport pandas as pd\nimport numpy as np\nfrom xgb_wswp.config import data_path, get_train_info, train_frequency\nfrom power_forecast_common.common_misc import load_data_from_pkl, generate_folder\nfrom mlApproach.util import get_nwp_list\nfrom power_forecast_common.wswp_feature import WsWpFeature\nfrom power_forecast_common.wswp_error import write_wind_error, check_original_std, wswp_error_analysis\nfrom power_forecast_common.evaluation_misc import get_training_data\nfrom power_forecast_common.offline_common import filter_data\nfrom xgb_ws.xgb_ws_forecast import XgbWsForecast\nfrom xgb_ws.xgb_linear_ws_forecast import XgbLinearWsForecast\nfrom xgb_ws.xgb_ridge_ws_forecast import XgbRidgeWsForecast\nfrom xgb_ws.xgb_lasso_ws_forecast import XgbLassoWsForecast\nfrom xgb_ws.xgb_elasticnet_ws_forecast import XgbElasticNetWsForecast\nfrom xgb_ws.xgb_nn_ws_forecast import XgbNNWsForecast\nlogger = logging.getLogger(__name__)\n\n\ndef train_turbine_ws_model(master_id, lat, lon, turbine_data_path, feature_file_path, data_resampling=False,\n train_frequency=60, delta_hour=3):\n \"\"\"\n :param master_id:\n :param lat:\n :param lon:\n :param turbine_data_path:\n :param feature_file_path:\n :param train_frequency:\n :param delta_hour:\n :return:\n \"\"\"\n logger.info('------Training model for wtg {}------'.format(master_id))\n\n #model = XgbWsForecast(master_id, lat=lat, lon=lon, grid_params=None)\n #model = XgbLinearWsForecast(master_id, lat=lat, lon=lon, grid_params=None)\n #model = XgbRidgeWsForecast(master_id, lat=lat, lon=lon, grid_params=None)\n #model = XgbLassoWsForecast(master_id, lat=lat, lon=lon, grid_params=None)\n #model = XgbElasticNetWsForecast(master_id, lat=lat, lon=lon, grid_params=None)\n model = XgbNNWsForecast(master_id, lat=lat, lon=lon, grid_params=None)\n\n assert turbine_data_path[-3:] == \"pkl\", \"Unknown data file type!\"\n x_df, y_df = load_data_from_pkl(turbine_data_path)\n if \"Y.power_tb_revised\" in y_df.columns:\n y_df[\"Y.power_tb\"] = y_df[\"Y.power_tb_revised\"]\n y_df.drop(\"Y.power_tb_revised\", axis=1, inplace=True)\n\n if np.sum(np.isnan(y_df[\"Y.power_tb\"])) >= 0.9 * len(y_df):\n return None\n\n nwp_list = get_nwp_list(x_df.columns.values)\n model.set_nwp_list(nwp_list)\n # can change training type to change the number of training data\n x_df, y_df = get_training_data(x_df, y_df, 1)\n\n # feature engineering\n feature_ins = WsWpFeature(train_frequency, delta_hour, nwp_list)\n x_df, feature_dict = feature_ins.transform(x_df)\n\n grid_params = {'silent': [1], 'eta': [0.05], 'max_depth': range(3, 5), 'min_child_weight': [1, 3],\n 'subsample': [0.5], 'lambda': [1]}\n model.configuration(train_frequency=train_frequency, grid_params=grid_params, data_resampling=data_resampling,\n max_trees=500)\n x_df, nn_model = model.fit(x_df, y_df, feature_dict)\n feature_table = pd.concat([x_df, y_df], axis=1)\n feature_table.to_pickle(feature_file_path)\n\n wind_std_dict = check_original_std(x_df, y_df, nwp_list)\n wswp_error_analysis(model, y_df)\n model.update_error(wind_std_dict)\n return model, nn_model\n\n\"\"\"\ndef train_farm(farm_id, train_data_path, model_path, feature_path, data_resampling=False, turbine_index=[]):\n\n farm_ins = Farm(farm_id)\n wtg_list = farm_ins.wtg_list\n if len(turbine_index) == 0:\n turbine_index = list(range(len(wtg_list)))\n for n, wtg_ins in enumerate(wtg_list):\n turbine_id = wtg_ins.master_id\n if n not in turbine_index:\n continue\n lon = wtg_ins.lon\n lat = wtg_ins.lat\n turbine_file_path = os.path.join(train_data_path, \"turbine_{}.pkl\".format(turbine_id))\n model_file_path = os.path.join(model_path, \"turbine_{}.bin\".format(turbine_id))\n feature_file_path = os.path.join(feature_path, \"turbine_{}.pkl\".format(turbine_id))\n if not os.path.exists(turbine_file_path) or os.path.exists(model_file_path):\n continue\n\n print(\"training for turbine {}\".format(turbine_id))\n model = train_turbine_ws_model(turbine_id, lon=lon, lat=lat, feature_file_path=feature_file_path,\n turbine_data_path=turbine_file_path, data_resampling=data_resampling)\n if model is None:\n print(\"No trained model for turbine {}\".format(turbine_id))\n continue\n joblib.dump(model, model_file_path)\n wind_error_file = os.path.join(model_path, \"turbine_{}_train_wind_error.csv\".format(turbine_id))\n write_wind_error(model.get_train_error(), wind_error_file)\n\"\"\"\n\ndef train_farm_local(train_data_path, model_path, feature_path, turbine_info, data_resampling=False):\n\n for i in range(66):\n print(i)\n turbine_id = turbine_info.ix[i]['master_id']\n lat = turbine_info.ix[i]['lat']\n lon = turbine_info.ix[i]['lon']\n\n turbine_file_path = os.path.join(train_data_path, \"turbine_{}.pkl\".format(turbine_id))\n model_file_path = os.path.join(model_path, \"turbine_{}.bin\".format(turbine_id))\n feature_file_path = os.path.join(feature_path, \"turbine_{}.pkl\".format(turbine_id))\n\n if not os.path.exists(turbine_file_path) or os.path.exists(model_file_path):\n print('No File')\n\n print(\"training for turbine {}\".format(turbine_id))\n\n # add new code\n model, nn_model = train_turbine_ws_model(turbine_id, lon=lon, lat=lat, feature_file_path=feature_file_path,\n turbine_data_path=turbine_file_path, data_resampling=data_resampling)\n if model is None:\n print(\"No trained model for turbine {}\".format(turbine_id))\n\n joblib.dump(model, model_file_path)\n # add new code\n nn_model.save(model_file_path[:-4] + '.h5')\n\n wind_error_file = os.path.join(model_path, \"turbine_{}_train_wind_error.csv\".format(turbine_id))\n write_wind_error(model.get_train_error(), wind_error_file)\n\nif __name__ == '__main__':\n\n farm_id = \"WF00\"\n # train_start_date, train_end_date = get_train_info(farm_id)\n # for appointed training set\n train_start_date = '2017-10-04'\n train_end_date = '2018-10-17'\n train_start_date = date(*map(int, train_start_date.split('-')))\n train_end_date = date(*map(int, train_end_date.split('-')))\n\n data_resampling = True\n\n # baseline, linear, ridge, lasso, elasticnet, nn\n model = 'nn_new_sampling'\n model_type = 'model_revised_ws_shift_'+model+'_partial_training_resample'\n feature_type = \"train_data_{}\".format(model_type[6:])\n\n train_data_path = generate_folder(data_path, \"train_data_IBM_5\", farm_id, train_start_date, train_end_date, train_frequency)\n model_path = generate_folder(\"result\", model_type, farm_id, train_start_date, train_end_date, train_frequency)\n feature_path = generate_folder(\"result\", feature_type, farm_id, train_start_date, train_end_date, train_frequency)\n\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n if not os.path.exists(feature_path):\n os.makedirs(feature_path)\n\n log_file_path = os.path.join(model_path, \"train_{}.log\".format(datetime.now().strftime(\"%Y%m%d_%H%M%S\")))\n if os.path.exists(log_file_path):\n os.remove(log_file_path)\n logging.basicConfig(filename=log_file_path,\n level=logging.INFO,\n format='[%(asctime)s]-%(thread)d-%(levelname)s: %(message)s - %(filename)s:%(lineno)d')\n\n logging.info(\"{} {} {} {}\".format(farm_id, train_frequency, train_start_date, train_end_date))\n\n # read farm_info file\n farm_info_path = '../data/farm_'+farm_id+'/farm_'+farm_id+'_info.csv'\n turbine_info = pd.read_csv(farm_info_path)\n\n train_farm_local(train_data_path, model_path, feature_path, turbine_info, data_resampling)\n\n\n\n", "sub_path": "wind prediction/xgb_ws/train_ws_model.py", "file_name": "train_ws_model.py", "file_ext": "py", "file_size_in_byte": 7989, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "sys.path.append", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 23, "usage_type": "call"}, {"api_name": "xgb_ws.xgb_nn_ws_forecast.XgbNNWsForecast", "line_number": 45, "usage_type": "call"}, {"api_name": "power_forecast_common.common_misc.load_data_from_pkl", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 53, "usage_type": "call"}, {"api_name": "mlApproach.util.get_nwp_list", "line_number": 56, "usage_type": "call"}, {"api_name": "power_forecast_common.evaluation_misc.get_training_data", "line_number": 59, "usage_type": "call"}, {"api_name": "power_forecast_common.wswp_feature.WsWpFeature", "line_number": 62, "usage_type": "call"}, {"api_name": "xgb_wswp.config.train_frequency", "line_number": 62, "usage_type": "argument"}, {"api_name": "xgb_wswp.config.train_frequency", "line_number": 67, "usage_type": "name"}, {"api_name": "pandas.concat", "line_number": 70, "usage_type": "call"}, {"api_name": "power_forecast_common.wswp_error.check_original_std", "line_number": 73, "usage_type": "call"}, {"api_name": "power_forecast_common.wswp_error.wswp_error_analysis", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path", "line_number": 117, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path", "line_number": 118, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "sklearn.externals.joblib.dump", "line_number": 131, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 131, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path", "line_number": 135, "usage_type": "attribute"}, {"api_name": "power_forecast_common.wswp_error.write_wind_error", "line_number": 136, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 145, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 146, "usage_type": "call"}, {"api_name": "power_forecast_common.common_misc.generate_folder", "line_number": 155, "usage_type": "call"}, {"api_name": "xgb_wswp.config.data_path", "line_number": 155, "usage_type": "argument"}, {"api_name": "xgb_wswp.config.train_frequency", "line_number": 155, "usage_type": "argument"}, {"api_name": "power_forecast_common.common_misc.generate_folder", "line_number": 156, "usage_type": "call"}, {"api_name": "xgb_wswp.config.train_frequency", "line_number": 156, "usage_type": "argument"}, {"api_name": "power_forecast_common.common_misc.generate_folder", "line_number": 157, "usage_type": "call"}, {"api_name": "xgb_wswp.config.train_frequency", "line_number": 157, "usage_type": "argument"}, {"api_name": "os.path.exists", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 161, "usage_type": "call"}, {"api_name": "os.path", "line_number": 161, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 164, "usage_type": "call"}, {"api_name": "os.path", "line_number": 164, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 164, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 164, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 165, "usage_type": "call"}, {"api_name": "os.path", "line_number": 165, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 166, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 167, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 168, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 171, "usage_type": "call"}, {"api_name": "xgb_wswp.config.train_frequency", "line_number": 171, "usage_type": "argument"}, {"api_name": "pandas.read_csv", "line_number": 175, "usage_type": "call"}]} +{"seq_id": "197081496", "text": "from .models import Product\nfrom .models import Definition\nfrom .models import Stack\nfrom .models import Run\nfrom django.contrib.auth.models import User\nfrom rest_framework import serializers\n \n# Serializers define the API representation. \nclass ProductSerializer(serializers.HyperlinkedModelSerializer):\n\n class Meta:\n model = Product\n fields = ('url', 'id', 'name', 'description','version', 'arch', 'phase','props')\n \n \n# Serializers define the API representation.\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n \n class Meta:\n model = User\n fields = ('url', 'username', 'email', 'is_staff')\n \n# Serializers define the API representation.\nclass StackSerializer(serializers.HyperlinkedModelSerializer):\n \n class Meta:\n model = Stack\n fields = ('url', 'id', 'name','products')\n \n# Serializers define the API representation.\nclass RunSerializer(serializers.HyperlinkedModelSerializer):\n\n class Meta:\n model = Run\n fields = ('url',\n 'id',\n 'definition',\n 'tester',\n 'run_start',\n 'run_stop',\n 'run_step',\n 'run_status',\n 'jenkins',\n 'report',\n 'cdf_original', \n 'cdf_teardown',\n 'test_info', \n 'run_info',\n 'cdf_config', \n 'run_uuid') \n \n# Serializers define the API representation. \nclass ScenarioSerializer(serializers.HyperlinkedModelSerializer):\n \n class Meta:\n model = Definition\n fields = ('url',\n 'name',\n 'description',\n 'version',\n 'test_type',\n 'ready_state', \n 'solution_repo',\n 'product_stack',\n 'created_date',\n 'modified_date',\n 'carbon_provision',\n 'carbon_orchestration',\n 'carbon_execution',\n 'carbon_report',\n 'carbon_cfg',\n 'solution_link',\n 'jira_link',\n 'defect_link',\n 'tcms_link',)\n \n \n \n# Serializers define the API representation. \nclass QueryForScenarioSerializer(serializers.HyperlinkedModelSerializer):\n \n class Meta:\n model = Definition\n fields = ('url',\n 'name',\n 'version'\n)", "sub_path": "dashboard/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 2577, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "rest_framework.serializers.HyperlinkedModelSerializer", "line_number": 9, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 9, "usage_type": "name"}, {"api_name": "models.Product", "line_number": 12, "usage_type": "name"}, {"api_name": "rest_framework.serializers.HyperlinkedModelSerializer", "line_number": 17, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 17, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User", "line_number": 20, "usage_type": "name"}, {"api_name": "rest_framework.serializers.HyperlinkedModelSerializer", "line_number": 24, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 24, "usage_type": "name"}, {"api_name": "models.Stack", "line_number": 27, "usage_type": "name"}, {"api_name": "rest_framework.serializers.HyperlinkedModelSerializer", "line_number": 31, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 31, "usage_type": "name"}, {"api_name": "models.Run", "line_number": 34, "usage_type": "name"}, {"api_name": "rest_framework.serializers.HyperlinkedModelSerializer", "line_number": 53, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 53, "usage_type": "name"}, {"api_name": "models.Definition", "line_number": 56, "usage_type": "name"}, {"api_name": "rest_framework.serializers.HyperlinkedModelSerializer", "line_number": 80, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 80, "usage_type": "name"}, {"api_name": "models.Definition", "line_number": 83, "usage_type": "name"}]} +{"seq_id": "187219760", "text": "import os\nimport sqlite3\nfrom flask import Flask, request, session, g, redirect, url_for, abort, \\\n render_template, flash\n\n\n\n# KyleAnthony 8\napp = Flask(__name__)\n\n################################################################\n########################APP ROUTES##############################\n################################################################\n\n@app.route('/')\ndef index():\n\treturn render_template('index.html') #return index\n\n\n@app.route('/content', methods=['POST'])\ndef load():\n\ttry:\n\t\titem = request.form['item']\n\texcept:\n\t\treturn render_template('error.html') #invalid\n\n\tconn = sqlite3.connect(\"final_exam.sqlite\") \n\tcursor = conn.cursor() \n\tresult = cursor.execute('select Name, Carbs, Fat, Protein from FOOD where Name = ?', (item,))\n\titems = result.fetchall()\n\treturn render_template('content.html', items = items) #return food w/ info\n\n\napp.run()\n\n\n", "sub_path": "Final Exam/KyleAnthony.py", "file_name": "KyleAnthony.py", "file_ext": "py", "file_size_in_byte": 879, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "560000850", "text": "import cv2\nimport numpy as np\nimport sys\n\n# ESTE CODIGO LO EJECUTAREMOS DESDE TEMRINAL: python3.5 TERM-surf-detection-video.py SURF 8000\nalg = sys.argv[1]\n\ndef fd(algorithm):\n if algorithm == \"SIFT\":\n return cv2.xfeatures2d.SIFT_create()\n if algorithm == \"SURF\":\n return cv2.xfeatures2d.SURF_create(float(sys.argv[2]) if len(sys.argv) == 3 else 4000)\n\ncap = cv2.VideoCapture(0)\nwhile(True):\n ret, frame = cap.read()\n gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n fd_alg = fd(alg)\n keypoints, descriptor = fd_alg.detectAndCompute(gray_frame, None)\n cv2.drawKeypoints(image=frame, outImage=frame, keypoints=keypoints, flags=4, color=(51, 163, 236))\n cv2.imshow(\"Keypoints\", frame)\n if cv2.waitKey(1) & 0xff == ord(\"q\"):\n break\ncap.release()\ncv2.destroyAllWindows()", "sub_path": "Capitulo5/TERM-surf-detection-video.py", "file_name": "TERM-surf-detection-video.py", "file_ext": "py", "file_size_in_byte": 818, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "sys.argv", "line_number": 6, "usage_type": "attribute"}, {"api_name": "cv2.xfeatures2d.SIFT_create", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.xfeatures2d", "line_number": 10, "usage_type": "attribute"}, {"api_name": "cv2.xfeatures2d.SURF_create", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.xfeatures2d", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 17, "usage_type": "attribute"}, {"api_name": "cv2.drawKeypoints", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "490586305", "text": "# -*- coding: utf-8 -*-\n\n# from django.db import connection\n# from skuapp.table.t_sys_param import t_sys_param\n# from brick.table.t_config_online_amazon import t_config_online_amazon\n# from brick.table.t_config_amazon_shop_status import t_config_amazon_shop_status\n# from brick.table.t_large_small_corresponding_cate import t_large_small_corresponding_cate\n# from xadmin.views import BaseAdminPlugin\n# from django.template import loader\n# from django.template import RequestContext\n# from django.contrib.auth.models import Group\n# import logging\n# from brick.table.get_wish_product_order_updatetime import get_wish_product_order_updatetime\n# from django.db import connection\n# from datetime import datetime as timetime\n# from django.contrib import messages\n# from django_redis import get_redis_connection\n# from brick.classredis.classshopname import classshopname\n# from Project.settings import connRedis\n\nimport json\nfrom django.db.models import Q\nfrom django.template import loader\nfrom django.contrib.auth.models import User\nfrom xadmin.views import BaseAdminPlugin\nfrom skuapp.table.t_store_configuration_file import t_store_configuration_file\n\n\nclass t_online_info_amazon_store_secondplugin(BaseAdminPlugin):\n amazon_listing_secondplugin = False\n\n def init_request(self, *args, **kwargs):\n return bool(self.amazon_listing_secondplugin)\n\n def block_search_cata_nav(self, context, nodes):\n # messages.error(self.request,'search1-------%s' % timetime.now())\n # redis_coon = get_redis_connection(alias='product')\n\n if self.request.GET.get('shopname', '') != '':\n flag = self.request.GET.get('shopname', '')\n else:\n flag = ''\n if flag.find('AMZ-') == -1:\n flag = 'AMZ-' + flag.zfill(4)\n # lastupdatetime = ''\n # if flag != 'AMZ-':\n # get_wish_product_order_updatetime_obj = get_wish_product_order_updatetime(connection, flag)\n # up_obj = get_wish_product_order_updatetime_obj.get_updatetime('Product')\n # if up_obj:\n # lastupdatetime = up_obj[1] # 上次增量更新的时间\n\n # classshopname_obj = classshopname(redis_cnxn=redis_coon)\n # refreshstatus = classshopname_obj.get_api_status_by_shopname(flag)\n # if refreshstatus is None:\n # refreshstatus = ''\n\n # synurl = ''\n # if flag != 'Wish-0000' and refreshstatus == '':\n # synurl = '/syndata_by_wish_api_shopname/?shopname=%s' % flag\n\n buttonlist = []\n if self.request.user.is_superuser:\n objs = t_store_configuration_file.objects.filter(ShopName__startswith='AMZ-').values('ShopName')\n else:\n allobj = User.objects.filter(groups__id__in=[38])\n userID = []\n for each in allobj:\n userID.append(each.id)\n if (self.request.user.id in userID):\n objs = t_store_configuration_file.objects.filter(ShopName__startswith='AMZ-').values('ShopName')\n else:\n objs = t_store_configuration_file.objects.filter(\n Q(Seller=context['user'].first_name) | Q(Published=context['user'].first_name) | Q(\n Operators=context['user'].first_name)).values('ShopName_temp')\n for obj in objs:\n buttonlist.append(obj['ShopName'])\n buttonlist.sort()\n\n activeflag = self.request.GET.get('_p_is_fba', '')\n\n nowurl = self.request.get_full_path().replace('_p_is_fba=1', '').replace('_p_is_fba=0', '').replace('?&', '?').replace('&&', '&')\n if nowurl[-1:] in ['?', '&']:\n nowurl = nowurl[:-1]\n if nowurl.find('?') == -1:\n nowurl = nowurl + '?'\n else:\n nowurl = nowurl + '&'\n # messages.error(self.request, 'search2-------%s' % timetime.now())\n nodes.append(\n loader.render_to_string(\n 'amazon_products_listing_base_secondtemplate.html',\n {\n 'objs': json.dumps(buttonlist),\n 'flag': flag,\n 'nowurl': nowurl,\n 'activeflag': activeflag\n }\n )\n )\n", "sub_path": "skuapp/plugin/t_online_info_amazon_store_secondplugin.py", "file_name": "t_online_info_amazon_store_secondplugin.py", "file_ext": "py", "file_size_in_byte": 4195, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "xadmin.views.BaseAdminPlugin", "line_number": 29, "usage_type": "name"}, {"api_name": "skuapp.table.t_store_configuration_file.t_store_configuration_file.objects.filter", "line_number": 63, "usage_type": "call"}, {"api_name": "skuapp.table.t_store_configuration_file.t_store_configuration_file.objects", "line_number": 63, "usage_type": "attribute"}, {"api_name": "skuapp.table.t_store_configuration_file.t_store_configuration_file", "line_number": 63, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 65, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 65, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 65, "usage_type": "name"}, {"api_name": "skuapp.table.t_store_configuration_file.t_store_configuration_file.objects.filter", "line_number": 70, "usage_type": "call"}, {"api_name": "skuapp.table.t_store_configuration_file.t_store_configuration_file.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "skuapp.table.t_store_configuration_file.t_store_configuration_file", "line_number": 70, "usage_type": "name"}, {"api_name": "skuapp.table.t_store_configuration_file.t_store_configuration_file.objects.filter", "line_number": 72, "usage_type": "call"}, {"api_name": "skuapp.table.t_store_configuration_file.t_store_configuration_file.objects", "line_number": 72, "usage_type": "attribute"}, {"api_name": "skuapp.table.t_store_configuration_file.t_store_configuration_file", "line_number": 72, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 73, "usage_type": "call"}, {"api_name": "django.template.loader.render_to_string", "line_number": 90, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 90, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 93, "usage_type": "call"}]} +{"seq_id": "106365738", "text": "from collections import namedtuple\nimport datetime\nimport os\n\nimport eloranking\nimport loldb\n\n\n# Because of how pickling works, it will be very, very annoying to change this type.\nclass Match():\n def __init__(self, players1=None, players2=None, score1=None, score2=None, when=None):\n self.players1 = players1\n self.players2 = players2\n self.score1 = score1\n self.score2 = score2\n self.when = when\n \n\nContext = namedtuple('Context',\n ['channel', 'sender', 'slack', 'bot_id', 'users', 'matches'])\n\n\ndef get_name(uid, users):\n matches = filter(lambda x: x['id'] == uid.upper(), users)\n if matches:\n return matches[0]['name']\n return None\n\n\ndef reply_with_message(text, context, fixed_width=False):\n context.slack.chat.post_message(channel=context.channel,\n text=text if not fixed_width else '```{}\\n```'.format(text),\n as_user=True)\n\n\ndef reply_with_file(file, context):\n reply = context.slack.files.upload(file, channels=context.channel)\n\n\ndef rank(context):\n elos = eloranking.get_rankings(context.matches)\n elos = sorted(elos.items(), key=lambda x: x[1], reverse=True)\n uids, scores = zip(*elos)\n ranks = range(1, len(elos) + 1)\n wins, losses = eloranking.get_ws_ls(context.matches, uids)\n\n n = lambda u: get_name(u, context.users)\n longest_name = max(map(len, map(n, uids)))\n rankFmt = lambda u,s,r,w,l: \"{r:>3}. {u:<{ln}} {s} {w:>2} - {l:>2}\".format(u=u,r=r,s=s,w=w,l=l,ln=longest_name)\n result_str = '\\n'.join([rankFmt(*x) for x in zip(*[map(n, uids), scores, ranks, wins, losses])])\n reply_with_message(result_str, context, fixed_width=True)\n\n\ndef stats(users, context):\n if users == []:\n users = [context.sender]\n\n uppercase_users = map(lambda x: x.upper(), users)\n user_names = map(lambda x: get_name(x, context.users), users)\n print(uppercase_users, user_names)\n fig_file = eloranking.get_stats_graph(context.matches,\n uppercase_users,\n user_names)\n print(fig_file)\n reply_with_file(fig_file, context)\n os.remove(fig_file)\n\n # Would like to do all users at once, but not currently supported.\n for u in users:\n u = u.upper()\n name = get_name(u, context.users)\n\n elos = eloranking.compile_histories(context.matches)[u]\n wins, losses = eloranking.get_ws_ls(context.matches, [u])\n\n message = \"Stats for {} since {}\\n\".format(name, elos[1][0].strftime(\"%b %-d %Y\"))\n message += \"-\" * len(message) + \"\\n\"\n message += \"Current score: {}\\n\".format(elos[0][-1])\n message += \"Win-Loss Record: {}-{}\\n\".format(wins[0], losses[0])\n last_game = loldb.getlastgame(u)\n n = lambda u: get_name(u, context.users)\n last_game = \"{} vs {}: {} - {}\".format(' and '.join(map(n, last_game.players1)),\n ' and '.join(map(n, last_game.players2)),\n last_game.score1,\n last_game.score2)\n message += \"Last match: {}\".format(last_game)\n\n reply_with_message(message, context, fixed_width=True)\n\n\ndef results(users, score1, score2, context):\n match = Match(players1=map(lambda x: x.upper(), users[0]),\n players2=map(lambda x: x.upper(), users[1]),\n score1=int(score1),\n score2=int(score2),\n when=datetime.datetime.now())\n game_id = loldb.addmatch(match)\n reply_with_message(\"Match {} submitted.\".format(game_id), context)\n rank(context)\n\n\ndef predict(users, context):\n n = lambda u: get_name(u, context.users)\n odds_statement = \"a {:.1f}% chance of beating \"\n singles_winner = \"{} has \"\n doubles_winner = \"{} and {} have \"\n singles_loser = \"{}.\"\n doubles_loser = \"{} and {}.\"\n\n def predict_fmt(winners, odds, losers):\n if len(winners) > 1:\n m = doubles_winner.format(*map(n, winners))\n else:\n m = singles_winner.format(n(winners[0]))\n m += odds_statement.format(odds)\n if len(losers) > 1:\n m += doubles_loser.format(*map(n, losers))\n else:\n m += singles_loser.format(n(losers[0]))\n return m\n\n team_a = map(lambda x: x.upper(), users[0])\n team_b = map(lambda x: x.upper(), users[1])\n prediction = eloranking.predict_winner(context.matches, team_a, team_b)\n reply_with_message(predict_fmt(*prediction), context)\n\n\ndef delete(game_id, context):\n loldb.deletematch(game_id)\n reply_with_message(\"Match deleted.\", context)\n\n", "sub_path": "core.py", "file_name": "core.py", "file_ext": "py", "file_size_in_byte": 4733, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "collections.namedtuple", "line_number": 19, "usage_type": "call"}, {"api_name": "eloranking.get_rankings", "line_number": 41, "usage_type": "call"}, {"api_name": "eloranking.get_ws_ls", "line_number": 45, "usage_type": "call"}, {"api_name": "eloranking.get_stats_graph", "line_number": 61, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 66, "usage_type": "call"}, {"api_name": "eloranking.compile_histories", "line_number": 73, "usage_type": "call"}, {"api_name": "eloranking.get_ws_ls", "line_number": 74, "usage_type": "call"}, {"api_name": "loldb.getlastgame", "line_number": 80, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 96, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 96, "usage_type": "attribute"}, {"api_name": "loldb.addmatch", "line_number": 97, "usage_type": "call"}, {"api_name": "eloranking.predict_winner", "line_number": 124, "usage_type": "call"}, {"api_name": "loldb.deletematch", "line_number": 129, "usage_type": "call"}]} +{"seq_id": "91635678", "text": "import datetime\nimport time\nimport jwt\nfrom django.http import QueryDict\n\nfrom response import AESJsonResponse\nfrom utils import verify_sign, overwrite_request, RedisUtil\n\n\nclass RequestMiddleware:\n def __init__(self, get_response):\n self.get_response = get_response\n # One-time configuration and initialization.\n\n def __call__(self, request):\n # Code to be executed for each request before\n # the view (and later middleware) are called.\n if request.method == 'PUT':\n request.PUT = QueryDict(request.body).dict()\n elif request.method == 'DELETE':\n request.DELETE = QueryDict(request.body).dict()\n response = self.get_response(request)\n\n # Code to be executed for each request/response after\n # the view is called.\n return response\n\n\nclass SignAuthMiddleware:\n def __init__(self, get_response):\n self.get_response = get_response\n # One-time configuration and initialization.\n\n def __call__(self, request):\n # Code to be executed for each request before\n # the view (and later middleware) are called.\n method = request.method\n m = getattr(request, method, None)\n if m:\n uaid = m.get('uaid')\n if uaid:\n try:\n app = App.objects.get(uaid=uaid)\n request.app = app\n except App.DoesNotExist:\n pass\n response = self.get_response(request)\n\n # Code to be executed for each request/response after\n # the view is called.\n\n return response\n\n def process_view(self, request, view_func, view_args, view_kwargs):\n app_name = request.resolver_match.app_name\n if app_name in [apps.DgzConfig.name]:\n if getattr(view_func, 'sign_exempt', False):\n return\n if request.method in getattr(view_func.view_class, 'sign_exempt_methods', []):\n return\n else:\n return\n params = dict(QueryDict(request.body).dict(), **request.GET.dict())\n timestamp = params.get('timestamp') # type:str\n if '/api/pay/' in request.path_info:\n key = request.app.pay_key\n else:\n key = settings.PRIVATE_KEY\n if not timestamp or not timestamp.isdigit():\n return AESJsonResponse(status=415, msg='timestamp required')\n if int(time.time()) - int(timestamp) > 3600:\n return AESJsonResponse(status=412, msg='请检查设备时间')\n if not verify_sign(params, key):\n return AESJsonResponse(status=412, msg='sign incorrect')\n overwrite_request(request)\n\n\nclass JwtAuthMiddleware:\n def __init__(self, get_response):\n self.get_response = get_response\n # One-time configuration and initialization.\n\n def __call__(self, request):\n # Code to be executed for each request before\n # the view (and later middleware) are called.\n response = self.get_response(request)\n\n # Code to be executed for each request/response after\n # the view is called.\n\n return response\n\n def process_view(self, request, view_func, view_args, view_kwargs):\n app_name = request.resolver_match.app_name\n key = settings.SECRET_KEY\n token = request.META.get('HTTP_AUTHORIZATION')\n if app_name in ['dgz']:\n if getattr(view_func, 'jwt_exempt', False):\n return\n if request.method in getattr(view_func.view_class, 'jwt_exempt_methods', []) and not token:\n return\n else:\n return\n try:\n decoded = jwt.decode(token, key, algorithms='HS256')\n user_id = decoded.get('user_id')\n app = getattr(request, 'app', None)\n if app:\n user = get_object_or_404(User, pk=user_id, app=app)\n uaid = app.uaid\n else:\n user = get_object_or_404(User, pk=user_id)\n uaid = user.app.uaid\n request.user = user\n request.user.is_active = True\n request.user.is_authenticated = True\n # 统计实时在线人数\n now = int(datetime.datetime.now().timestamp())\n RedisUtil.conn.zadd(cache.ONLINE_USERS_KEYS.format(uaid=uaid), {user_id: now})\n if request.path == reverse('api:receive_task') and request.method == 'POST':\n # 统计实时在线做任务人数\n now = int(datetime.datetime.now().timestamp())\n RedisUtil.conn.zadd(cache.ONLINE_TASK_USERS_KEYS.format(uaid=uaid), {user_id: now})\n except jwt.PyJWTError as e:\n return AESJsonResponse(status=401, msg='登录已过期')\n", "sub_path": "middleware.py", "file_name": "middleware.py", "file_ext": "py", "file_size_in_byte": 4757, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "django.http.QueryDict", "line_number": 19, "usage_type": "call"}, {"api_name": "django.http.QueryDict", "line_number": 21, "usage_type": "call"}, {"api_name": "django.http.QueryDict", "line_number": 63, "usage_type": "call"}, {"api_name": "response.AESJsonResponse", "line_number": 70, "usage_type": "call"}, {"api_name": "time.time", "line_number": 71, "usage_type": "call"}, {"api_name": "response.AESJsonResponse", "line_number": 72, "usage_type": "call"}, {"api_name": "utils.verify_sign", "line_number": 73, "usage_type": "call"}, {"api_name": "response.AESJsonResponse", "line_number": 74, "usage_type": "call"}, {"api_name": "utils.overwrite_request", "line_number": 75, "usage_type": "call"}, {"api_name": "jwt.decode", "line_number": 105, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 118, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 118, "usage_type": "attribute"}, {"api_name": "utils.RedisUtil.conn.zadd", "line_number": 119, "usage_type": "call"}, {"api_name": "utils.RedisUtil.conn", "line_number": 119, "usage_type": "attribute"}, {"api_name": "utils.RedisUtil", "line_number": 119, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 122, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 122, "usage_type": "attribute"}, {"api_name": "utils.RedisUtil.conn.zadd", "line_number": 123, "usage_type": "call"}, {"api_name": "utils.RedisUtil.conn", "line_number": 123, "usage_type": "attribute"}, {"api_name": "utils.RedisUtil", "line_number": 123, "usage_type": "name"}, {"api_name": "jwt.PyJWTError", "line_number": 124, "usage_type": "attribute"}, {"api_name": "response.AESJsonResponse", "line_number": 125, "usage_type": "call"}]} +{"seq_id": "129274560", "text": "\n# coding=gbk\nimport csv\nimport os, time\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtWidgets import QWidget, QLabel, QApplication, QLineEdit, QComboBox,QPushButton\nfrom PyQt5 import QtCore\nfrom Initalize20180421 import *\nfrom soap_all_commands_for_dsc import soap_reload_rule_engine,soap_add_decide_route,soap_add_list_cache,soap_reload_listcaches,soap_check_decide_route\nfrom file_transaction import *\nfrom SendEmail import *\nimport os\nif not os.path.exists(r'.\\\\file\\DB.csv'):\n\tINITIALIZE_DB()\n\nPEERINGPOLICY=csv2dict('PeeringPolicy.csv')\n\ndef NEW_EMAIL2PEER(SCENARIO,OP_A,REALM_A,IMSI_A,OP_B,REALM_B,IMSI_B):\n\tfor row in PEERINGPOLICY:\n\t\tif row['SCENARIO']==SCENARIO:\n\t\t\tSVR_PEER=row['SVR_PEER']\n\t\t\tHUB_PEER=row['HUB_PEER']\n\t\t\tSVR_NODE=row['SVR_NODE']\n\t\t\tHUB_NODE=row['HUB_NODE']\n\t\t\tTolist=row['EMAIL']\n\tSubject='New Election Request: '+OP_A+' - '+OP_B\n\temail_body='Dear Colleagues,\\r\\n\\r\\n\\\nGreeting from Syniverse!\\r\\n\\\nWe received a request to establish LTE roaming relationship between '+OP_A+' and '+OP_B+'.\\r\\n'+\\\nOP_A+'\\r\\n'+\\\n'Realm :'+REALM_A+'\\r\\n'+\\\n'IMSI Prefix :'+IMSI_A+'\\r\\n'+\\\nOP_B+'\\r\\n'+\\\n'Realm :'+REALM_B+'\\r\\n'+\\\n'IMSI Prefix :'+IMSI_B+'\\r\\n'+\\\n'Peering Point :'+SVR_PEER+'-'+HUB_PEER+'\\r\\n'+\\\n'Syniverse Node:'+SVR_NODE+'\\r\\n'+\\\n'Peering Node:'+HUB_NODE+'\\r\\n\\r\\n'+\\\n'BR\\r\\nSyniverse DSS Team'\n\tsendemail(Tolist,'DSS_Route_Provision@syniverse.com',Subject,email_body)\n\n\n\t\ndef BACKUP_DB():\n\tfile_list=['DB.csv','HKG_LISTCACHE.csv']\n\tfor file in file_list:\n\t\t#BackupFile ('hello_world.py','.\\\\', '.\\\\backup')\n\t\tBackupFile(file,'.\\\\file\\\\', '.\\\\backup')\n\t\n#Read DB from CSV file and initialize variable\ndef csv2dict(filename):\n\tnew_dict = {}\n\twith open(filename, 'r') as f:\n\t\treader = csv.reader(f, delimiter=',')\n\t\tfieldnames = next(reader)\n\t\treader = csv.DictReader(f, fieldnames=fieldnames, delimiter=',')\n\t\tnew_dict = [row for row in reader]\n\treturn new_dict\n\nDB_sheet = csv2dict(\".\\\\file\\DB.csv\")\n\nDRAlist=['HKG','AMS','CHI','SNG','FRT','DAL']\nRegionList=['AP','EU','NA']\n\n#This function split strings seperates by ; or , to a LIST and strip spaces in each string\ndef SPLIT2LIST(items):\n\tLIST=[]\n\titems = items.lower()\n\titems = items.replace(\",\",\";\")\n\ttemp = items.split(\";\")\n\tfor item in temp:\n\t\tLIST.append(item.strip())\n\treturn(LIST)\n\n#根据region设置全局共享变量SURL1,SURL2,DSC1,DSC2\ndef Region2URL_DSC(Region):\n\t\tglobal SURL1,SURL2,DSC1,DSC2\n\t\tif Region ==\"AP\":\n\t\t\tSURL1=\"http://10.162.28.186:8080/DSC_SOAP/query?\"\n\t\t\tSURL2=\"http://10.163.28.131:8080/DSC_SOAP/query?\"\n\t\t\tDSC1=\"HKG\"\n\t\t\tDSC2=\"SNG\"\n\t\tif Region ==\"EU\":\n\t\t\tSURL1=\"http://10.160.28.32:8080/DSC_SOAP/query?\"\n\t\t\tSURL2=\"http://10.161.28.32:8080/DSC_SOAP/query?\"\n\t\t\tDSC1=\"AMS\"\n\t\t\tDSC2=\"FRT\"\t\t\n\t\tif Region ==\"NA\":\n\t\t\tSURL1=\"http://10.166.28.200:8080/DSC_SOAP/query?\"\n\t\t\tSURL2=\"http://10.164.28.189:8080/DSC_SOAP/query?\"\n\t\t\tDSC1=\"CHI\"\n\t\t\tDSC2=\"DAL\"\n\n#DSC OUTPUT 弹窗代码\ndef MESSAGE_OUTPUT(Title,Output_Text):\n\tdialog=QDialog()\n\tdialog.resize(200,100)\n\tMSG = QLabel(Output_Text,dialog)\n\tMSG.move(50,20)\n\n\tdialog.setWindowTitle(Title)\n\tdialog.setWindowModality(Qt.ApplicationModal)\n\tdialog.exec_()\n\t\ndef BIOUTPUT(Title,Outputlist_1,Outputlist_2):\n\t\tdialog=QDialog()\n\t\tdialog.resize(1250,600)\n\t\tDSC_1 = QLabel(DSC1,dialog)\n\t\tDSC_1.move(50,20)\n\t\tDSC_2 = QLabel(DSC2,dialog)\n\t\tDSC_2.move(650,20)\n\t\n\t\tOUTPUT_1 = QTextEdit(dialog)\n\t\tfor row in Outputlist_1:\n\t\t\tOUTPUT_1.append(row)\n\t\tOUTPUT_1.move(50,50)\n\t\tOUTPUT_1.resize(550,500)\n\t\t\n\t\tOUTPUT_2 = QTextEdit(dialog)\n\t\tfor row in Outputlist_2:\n\t\t\tOUTPUT_2.append(row)\n\t\tOUTPUT_2.move(650,50)\n\t\tOUTPUT_2.resize(550,500)\n\n\t\tdialog.setWindowTitle(Title)\n\t\tdialog.setWindowModality(Qt.ApplicationModal)\n\t\tdialog.exec_()\n\t\t\ndef BIOUTPUT_UPDOWN(Title,Outputlist_1,Outputlist_2):\n\t\tdialog=QDialog()\n\t\tdialog.resize(1366,740)\n\t\tDSC_1 = QLabel(DSC1,dialog)\n\t\tDSC_1.move(50,15)\n\t\tDSC_2 = QLabel(DSC2,dialog)\n\t\tDSC_2.move(50,350)\n\t\n\t\tOUTPUT_1 = QTextEdit(dialog)\n\t\tfor row in Outputlist_1:\n\t\t\tOUTPUT_1.append(row)\n\t\tOUTPUT_1.move(50,30)\n\t\tOUTPUT_1.resize(1300,300)\n\t\t\n\t\tOUTPUT_2 = QTextEdit(dialog)\n\t\tfor row in Outputlist_2:\n\t\t\tOUTPUT_2.append(row)\n\t\tOUTPUT_2.move(50,370)\n\t\tOUTPUT_2.resize(1300,300)\n\n\t\tdialog.setWindowTitle(Title)\n\t\tdialog.setWindowModality(Qt.ApplicationModal)\n\t\tdialog.exec_()\n\t\t\ndef SINGLE_OUTPUT(Title,Outputlist_1):\n\t\tdialog=QDialog()\n\t\tdialog.resize(1366,768)\n\t\tDSC_1 = QLabel(DSC1,dialog)\n\t\tDSC_1.move(50,20)\n\t\n\t\tOUTPUT_1 = QTextEdit(dialog)\n\t\tfor row in Outputlist_1:\n\t\t\tOUTPUT_1.append(row)\n\t\tOUTPUT_1.move(50,50)\n\t\tOUTPUT_1.resize(1360,750)\n\t\t\n\t\tdialog.setWindowTitle(Title)\n\t\tdialog.setWindowModality(Qt.ApplicationModal)\n\t\tdialog.exec_()\n\t\t\n#DSC 组合命令功能执行代码\ndef Reload_Region_LIST(region):\n\tRegion2URL_DSC(region)\n\tOutputlist_1=[]\n\tOutputlist_2=[]\t\n\tOutput1=soap_reload_listcaches(SURL1)\n\tOutput2=soap_reload_listcaches(SURL2)\n\tOutputlist_1.append(Output1)\n\tOutputlist_2.append(Output2)\n\tBIOUTPUT(\"Reload ListCaches\",Outputlist_1,Outputlist_2)\n\ndef Reload_Region_RULE(region):\n\tRegion2URL_DSC(region)\n\tOutputlist_1=[]\n\tOutputlist_2=[]\n\tOutput1=soap_reload_rule_engine(SURL1)\n\tOutput2=soap_reload_rule_engine(SURL2)\n\tOutputlist_1.append(Output1)\n\tOutputlist_2.append(Output2)\n\tBIOUTPUT(\"Reload Rule Engine\",Outputlist_1,Outputlist_2)\n\t\ndef CHECK_DECIDE_ROUTE2OP(region,source_realms,dest_realms):\n\tRegion2URL_DSC(region)\n\tsource_realm_list = []\n\tdest_realm_list = []\n\tsource_realm_list.insert(0,'*')\n\tOutputlist_1=[]\n\tOutputlist_2=[]\n\tsource_realm_list = SPLIT2LIST(source_realms)\n\tsource_realm_list.insert(0,'*')\n\tdest_realm_list = SPLIT2LIST(dest_realms)\n\t\n\tfor source_realm in source_realm_list:\n\t\tfor dest_realm in dest_realm_list:\n\t\t\tOutputlist_1.append(source_realm+\"->\"+dest_realm)\n\t\t\tOutput1 = soap_check_decide_route(SURL1,'*',source_realm,'*',dest_realm,'*','*')\n\t\t\tif Output1==None:\n\t\t\t\tOutput1=\"None\"\n\t\t\tOutputlist_1.append(Output1)\n\t\t\t\n\t\t\tOutputlist_2.append(source_realm+\"->\"+dest_realm)\n\t\t\tOutput2 = soap_check_decide_route(SURL2,'*',source_realm,'*',dest_realm,'*','*')\n\t\t\tif Output2==None:\n\t\t\t\tOutput2=\"None\"\n\t\t\tOutputlist_2.append(Output2)\n\t\t\t#@将来写成HTML代码带颜色\n\tBIOUTPUT_UPDOWN(\"Check Decide Route:*->OP and Source->OP\",Outputlist_1,Outputlist_2)\n\t\n#add realms to list on regional DSCs and sent output to popup window\ndef ADD_REALMS2LIST(Region,Realms,LIST_Name):\n\tif Region=='':\n\t\tMESSAGE_OUTPUT(\"Error\",\"Empty Region\")\n\telif Realms=='':\n\t\tMESSAGE_OUTPUT(\"Error\",\"Empty Realm\")\n\telif LIST_Name=='':\n\t\tMESSAGE_OUTPUT(\"Error\",\"Empty LIST\")\n\telse:\t\n\t\tRegion2URL_DSC(Region)\n\t\tOutputlist_1=[]\n\t\tOutputlist_2=[]\n\t\tRealm_LIST=[]\n\t\tRealms = Realms.lower()\n\t\tRealms = Realms.replace(\",\",\";\")\n\t\ttemp = Realms.split(\";\")\n\t\tfor realm in temp:\n\t\t\tRealm_LIST.append(realm.strip())\n\t\t\n\t\tfor realm in Realm_LIST:\n\t\t\tresponse=soap_add_list_cache(LIST_Name,realm,SURL1)\n\t\t\t#response=\"Success!!\"\n\t\t\tresponse=realm+\" \"+response\n\t\t\tOutputlist_1.append(response)\n\t\t\t\n\t\t\tresponse=soap_add_list_cache(LIST_Name,realm,SURL2)\n\t\t\t#response=\"Success!!\"\n\t\t\tresponse=realm+\" \"+response\n\t\t\tOutputlist_2.append(response)\n\t\t\t\n\t\tBIOUTPUT('Add ListCache',Outputlist_1,Outputlist_2)\n\t\t\ndef K2R(Region,Realms2,List1,Realms1,Name1,Name2):\n\t\n\tif Region=='':\n\t\tMESSAGE_OUTPUT(\"Error\",\"Empty Region\")\n\tif Realms2=='':\n\t\tMESSAGE_OUTPUT(\"Error\",\"Empty Realms2\")\n\tRegion2URL_DSC(Region)\n\tOutputlist_1=[]\n\tOutputlist_2=[]\n\t\n\tif Region =='NA':\n\t\t#Verison K2R route on NA DSC\n\t\tif Name1 =='Verizon Wireless' or Name2 =='Verizon Wireless':\n\t\t\tif Name1 =='Verizon Wireless':\n\t\t\t\tRealms = SPLITLIST(Reams2)\n\t\t\tif Name2 =='Verizon Wireless':\n\t\t\t\tRealms = SPLITLIST(Reams1)\n\t\t\tprint(Realms)\n\t\t\tfor realm in Realms:\n\t\t\t\tresponse=soap_add_list_cache('LIST_VERIZON_WIRELESS_K2R_RP_REALM',realm,SURL1)\n\t\t\t\t#response=\"Success!!\"\n\t\t\t\tresponse='LIST_VERIZON_WIRELESS_K2R_RP_REALM:+'+realm+\" \"+response\n\t\t\t\tOutputlist_1.append(response)\n\t\t\t\t\n\t\t\t\tresponse=soap_add_list_cache('LIST_VERIZON_WIRELESS_K2R_RP_REALM',realm,SURL2)\n\t\t\t\t#response=\"Success!!\"\n\t\t\t\tresponse='LIST_VERIZON_WIRELESS_K2R_RP_REALM:+'+realm+\" \"+response\n\t\t\t\tOutputlist_2.append(response)\n\t\t\t\t\n\t\t\tBIOUTPUT('Add ListCache LIST_VERIZON_WIRELESS_K2R_RP_REALM',Outputlist_1,Outputlist_2)\n\t\telse:\n\t\t\t#NA K2R create RF for all realms\n\t\t\tfor realm1 in Realms1:\n\t\t\t\tfor realm2 in Realms2:\n\t\t\t\t\tDESC='K2R:%request_filter% '+Name1+\"-\"+Name2\n\t\t\t\t\tresponse=soap_add_rule(dsc_url=SURL1,ruletype='REQUEST_FILTER',description=DESC,pop_name='NA PoP',origrealm=realm1,destrealm=realm2)\n\t\t\t\t\tresponse=Name1+':'+realm1+'>'+Name2+':'+realm2+'RF '+response\n\t\t\t\t\tOutputlist_1.append(response)\n\n\t\t\t\t\tDESC='K2R:%request_filter% '+Name1+\"-\"+Name2\n\t\t\t\t\tresponse=soap_add_rule(dsc_url=SURL2,ruletype='REQUEST_FILTER',description=DESC,pop_name='NA PoP',origrealm=realm1,destrealm=realm2)\n\t\t\t\t\tresponse=Name1+':'+realm1+'>'+Name2+':'+realm2+'RF '+response\n\t\t\t\t\tOutputlist_2.append(response)\n\n\t\t\tfor realm2 in Realms2:\n\t\t\t\tfor realm1 in Realms1:\n\t\t\t\t\tDESC='K2R:%request_filter% '+Name2+\"-\"+Name1\n\t\t\t\t\tresponse=soap_add_rule(dsc_url=SURL1,ruletype='REQUEST_FILTER',description=DESC,pop_name='NA PoP',origrealm=realm2,destrealm=realm1)\n\t\t\t\t\tresponse=Name1+':'+realm1+'>'+Name2+':'+realm2+'RF '+response\n\t\t\t\t\tOutputlist_1.append(response)\n\n\t\t\t\t\tDESC='K2R:%request_filter% '+Name2+\"-\"+Name1\n\t\t\t\t\tresponse=soap_add_rule(dsc_url=SURL2,ruletype='REQUEST_FILTER',description=DESC,pop_name='NA PoP',origrealm=realm2,destrealm=realm1)\n\t\t\t\t\tresponse=Name1+':'+realm1+'>'+Name2+':'+realm2+'RF '+response\n\t\t\t\t\tOutputlist_2.append(response)\t\t\t\t\t\n\t\t\t\n\t\t\tBIOUTPUT('Add K2R Filter on NA DSC',Outputlist_1,Outputlist_2)\n\n\telif Region == 'AP' or Region == 'EU':\n\t\tADD_REALMS2LIST(Region,Realms2,LIST1)\t\t\n\t\t\t\t\n\t\t\t\n\t\n#soap_check_decide_route(dsc_url,source_host,source_realm,dest_host,dest_realm,adjacent_source_peer,adjacent_source_realm)\n\n#open route window layout\nclass OPEN_ROUTE(QWidget):\n\tdef __init__(self,parent=None):\n\t\tsuper(OPEN_ROUTE,self).__init__(parent)\n\t\tself.Full_list=['']\n\t\t#DB_sheet = csv2dict(\".\\\\file\\DB.csv\")\n\t\tfor row in DB_sheet:\n\t\t\tself.Full_list.append(row[\"name\"])\n\t\t\t\n\t\tself.resize(1000,800)\n\t\tQE_length = 400\n\t\tQE_hight = 20\n\t\tY_start = 40\n\t\tY_step = 30\n\t\tX10 =1080\n\t\tX11 = 1160\n\t\tX12 = 1240\n\t\tX13 = 1240\n\n\n#COMMAND BUTTONS ON RIGHT\t\t\n#DB BUTTON\n\t\tY_start=70\t\n\t\tself.Lable_DB_DATE = QLabel(\"DB_Date\",self)\n\t\tself.Lable_DB_DATE .move (X10,Y_start-Y_step*1)\t\t\n\t\tself.DB_DATE = QLineEdit(self)\n\t\tself.DB_DATE.setText(GetFileDate('.\\\\file\\DB.csv'))\n\t\tself.DB_DATE.setGeometry(QtCore.QRect(X11, Y_start-Y_step*1, QE_length/3.9, QE_hight))\n\t\t\n\t\tself.UPD_DB = QPushButton('UPD_DB',self)\n\t\tself.UPD_DB.move(X10,Y_start+Y_step*0)\n\t\tself.UPD_DB.clicked.connect(lambda:INITIALIZE_DB())\n\t\t\n\t\tself.UPD_DB = QPushButton('BACKUP_DB',self)\n\t\tself.UPD_DB.move(X11,Y_start+Y_step*0)\n\t\tself.UPD_DB.clicked.connect(lambda:BACKUP_DB())\n\t\t\n\t\tself.UPD_DB = QPushButton('Restore_DB',self)\n\t\tself.UPD_DB.move(X12,Y_start+Y_step*0)\n\t\tself.UPD_DB.clicked.connect(lambda:RestoreFile(\".\\\\backup\",\".\\\\\"))\n#RULE ENGINE BUTTON\t\t\n\t\tself.Lable_DB_DATE = QLabel(\"RULE_Date\",self)\n\t\tself.Lable_DB_DATE .move (X10,Y_start+Y_step*1)\t\t\n\t\tself.DB_DATE = QLineEdit(self)\n\t\t#GetFileDate(filename)\n\t\t#self.DB_DATE.setText(GetFileDate(\"DB.csv\"))\n\t\tself.DB_DATE.setGeometry(QtCore.QRect(X11, Y_start+Y_step*1, QE_length/3.9, QE_hight))\n\t\t\n\t\tself.UPD_DB = QPushButton('UPD_RULE',self)\n\t\tself.UPD_DB.move(X10,Y_start+Y_step*2)\n\t\tself.UPD_DB.clicked.connect(self.UPDATE_DB)\n\t\t\n\t\tself.UPD_DB = QPushButton('BACKUP_RULE',self)\n\t\tself.UPD_DB.move(X11,Y_start+Y_step*2)\n\t\tself.UPD_DB.clicked.connect(self.UPDATE_DB)\n\t\t\n\t\tself.UPD_DB = QPushButton('Restore_RULE',self)\n\t\tself.UPD_DB.move(X12,Y_start+Y_step*2)\n\t\tself.UPD_DB.clicked.connect(lambda:RestoreFile(\".\\\\backup\",\".\\\\\"))\n#RELOAD BUTTON\n\n\t\tself.Lable_DB_DATE = QLabel(\"Reload ListCaches\",self)\n\t\tself.Lable_DB_DATE .move (X10,Y_start+Y_step*4)\t\n\t\tself.Lable_DB_DATE = QLabel(\"Reload Rule\",self)\n\t\tself.Lable_DB_DATE .move (X13,Y_start+Y_step*4)\t\n\t\t\t\t\t\n\t\tself.Re_AP_LIST = QPushButton('Re_AP_LIST',self)\n\t\tself.Re_AP_LIST.move(X10,Y_start+Y_step*5)\n\t\tself.Re_AP_LIST.clicked.connect(lambda: Reload_Region_LIST(\"AP\"))\n\t\t\n\t\tself.Re_EU_LIST = QPushButton('Re_EU_LIST',self)\n\t\tself.Re_EU_LIST.move(X10,Y_start+Y_step*6)\n\t\tself.Re_EU_LIST.clicked.connect(lambda: Reload_Region_LIST(\"EU\"))\n\t\t\n\t\tself.Re_NA_LIST = QPushButton('Re_NA_LIST',self)\n\t\tself.Re_NA_LIST.move(X10,Y_start+Y_step*7)\n\t\tself.Re_NA_LIST.clicked.connect(lambda: Reload_Region_LIST(\"NA\"))\n\t\t\n\t\tself.Re_AP_RULE = QPushButton('Re_AP_RULE',self)\n\t\tself.Re_AP_RULE.move(X13,Y_start+Y_step*5)\n\t\tself.Re_AP_RULE.clicked.connect(lambda: Reload_Region_RULE(\"AP\"))\n\t\t\n\t\tself.Re_EU_RULE = QPushButton('Re_EU_RULE',self)\n\t\tself.Re_EU_RULE.move(X13,Y_start+Y_step*6)\n\t\tself.Re_EU_RULE.clicked.connect(lambda: Reload_Region_RULE(\"EU\"))\n\t\t\n\t\tself.Re_NA_RULE = QPushButton('Re_NA_RULE',self)\n\t\tself.Re_NA_RULE.move(X13,Y_start+Y_step*7)\n\t\tself.Re_NA_RULE.clicked.connect(lambda: Reload_Region_RULE(\"NA\"))\n\n#send email\n\t\t\n\t\tself.Lable_DB_DATE = QLabel(\"Send Email to Peer\",self)\n\t\tself.Lable_DB_DATE .move (X10,Y_start+Y_step*9+10)\t\n\n\t\tself.New_Election = QPushButton('New_Election',self)\n\t\tself.New_Election.move(X10,Y_start+Y_step*10)\n\t\tself.New_Election.clicked.connect(lambda: NEW_EMAIL2PEER(self.Combo_SCENARIO.currentText(),self.OP_A.text(),self.realm_A.text(),self.IMSI_A.text(),self.OP_B.text(),self.realm_B.text(),self.IMSI_B.text()))\n\t\t\n\t\tself.Provisoned = QPushButton('Provisoned',self)\n\t\tself.Provisoned.move(X10,Y_start+Y_step*11)\n\t\tself.Provisoned.clicked.connect(lambda: PROVISONED_EMAIL2PEER(self.Combo_SCENARIO.currentText()))\n\n\t\t#PEERINGPOLICY=csv2dict('PeeringPolicy.csv')\n\t\tSCENARIO=[]\n\t\tfor row in PEERINGPOLICY:\n\t\t\tSCENARIO.append(row['SCENARIO'])\n\t\t\t\n\t\tself.Combo_SCENARIO = QComboBox(self)\n\t\tfor i in SCENARIO:\n\t\t\tself.Combo_SCENARIO.addItem(i)\n\t\tself.Combo_SCENARIO.move(X11,Y_start+Y_step*10)\n\t\tself.Combo_SCENARIO.setMaxVisibleItems (10)\n\t\tself.Combo_SCENARIO.currentIndexChanged.connect(self.update_POLICY)\n\t\t\n\t\tself.POLICY = QLineEdit('POLICY',self)\n\t\tself.POLICY.setGeometry(QtCore.QRect(X11, Y_start+Y_step*11, 175, QE_hight))\n\n\n## OPA and OPB\n\t\tY_start=40\t\n## code for OPA only start\n\t\tX1 = 10 #label1 start\n\t\tX2 = 100#text1 start\n\t\tX3 = 200#labe2 start\n\t\tX4 = 250#text2 start\n\t\tX5 = 320#button3 start\n\n\t\tself.Lable_TADIG_A = QLabel(\"TADIG_A\",self)\n\t\tself.Lable_TADIG_A.move (X4,Y_start-Y_step*1)\t\t\n\t\tself.TADIG_A = QLineEdit(self)\n\t\tself.TADIG_A.setText(\"\")\n\t\tself.TADIG_A.setGeometry(QtCore.QRect(X5, Y_start-Y_step*1, QE_length/2.2, QE_hight))\n\t\t\n\t\tself.Lable_SSID_A = QLabel(\"SSID_A\",self)\n\t\tself.Lable_SSID_A.move (X1,Y_start-Y_step)\t\t\n\t\tself.SSID_A = QLineEdit(self)\n\t\tself.SSID_A.setText(\"\")\n\t\tself.SSID_A.setGeometry(QtCore.QRect(X2, Y_start-Y_step, QE_length/4, QE_hight))\n\n\t\tself.Lable_OP_A = QLabel(\"OP_A\",self)\n\t\tself.Lable_OP_A.move (X1,Y_start)\t\t\n\t\tself.OP_A = QLineEdit(self)\n\t\tself.OP_A.setText(\"\")\n\t\tself.OP_A.setGeometry(QtCore.QRect(X2, Y_start, QE_length, QE_hight))\n\t\t\n\t\tself.Lable_Country_A = QLabel(\"Country_A\",self)\n\t\tself.Lable_Country_A.move (X1,Y_start+Y_step*2)\t\t\n\t\tself.Country_A = QLineEdit(self)\n\t\tself.Country_A.setText(\"\")\n\t\tself.Country_A.setGeometry(QtCore.QRect(X2,Y_start+Y_step*2, QE_length, QE_hight))\n\t\t\n\t\tself.Lable_Realm_A = QLabel(\"Realm_A\",self)\n\t\tself.Lable_Realm_A.move (X1,Y_start+Y_step*3)\t\t\t\t\n\t\tself.realm_A = QLineEdit(self)\n\t\tself.realm_A.setText(\"\")\n\t\tself.realm_A.setGeometry(QtCore.QRect(X2,Y_start+Y_step*3, QE_length, QE_hight))\n\n\t\tself.Lable_IMSI_A = QLabel(\"IMSI_A\",self)\n\t\tself.Lable_IMSI_A.move (X1,Y_start+Y_step*4)\t\t\t\t\n\t\tself.IMSI_A = QLineEdit(self)\n\t\tself.IMSI_A.setText(\"\")\n\t\tself.IMSI_A.setGeometry(QtCore.QRect(X2,Y_start+Y_step*4, QE_length, QE_hight))\n\t\t\n\t\tself.Lable_LIST_A = QLabel(\"LIST_A\",self)\n\t\tself.Lable_LIST_A.move(X1,Y_start+Y_step*5)\t\t\n\t\tself.LIST_A = QLineEdit(self)\n\t\tself.LIST_A.setText(\"\")\n\t\tself.LIST_A.setGeometry(QtCore.QRect(X2,Y_start+Y_step*5, QE_length, QE_hight))\n\t\t\n\t\tself.Label_Owner_A = QLabel(\"Owner_A\",self)\n\t\tself.Label_Owner_A.move(X1,Y_start+Y_step*6)\t\t\n\t\tself.Owner_A = QLineEdit(self)\n\t\tself.Owner_A.setText(\"\")\n\t\tself.Owner_A.setGeometry(QtCore.QRect(X2,Y_start+Y_step*6, QE_length/3, QE_hight))\n\t\t\n\t\tself.Label_RMT_A = QLabel(\"RMT_A\",self)\n\t\tself.Label_RMT_A.move(X4,Y_start+Y_step*6)\t\t\n\t\tself.RMT_A = QLineEdit(self)\n\t\tself.RMT_A.setText(\"\")\n\t\tself.RMT_A.setGeometry(QtCore.QRect(X5,Y_start+Y_step*6, QE_length/2.2, QE_hight))\n\t\t\n\t\tself.Lable_DRA_A = QLabel(\"DRA_A\",self)\n\t\tself.Lable_DRA_A.move(X1,Y_start+Y_step*7)\t\t\n\t\tself.DRA_A = QLineEdit(self)\n\t\tself.DRA_A.setText(\"\")\n\t\tself.DRA_A.setGeometry(QtCore.QRect(X2,Y_start+Y_step*7, QE_length, QE_hight))\t\t\n\t\t\n\t\t#self.Lable_HUB_A = QLabel(\"HUB_A\",self)\n\t\t#self.Lable_HUB_A.move(X1,Y_start+Y_step*8)\t\t\n\t\t#self.HUB_A = QLineEdit(self)\n\t\t#self.HUB_A.setText(\"\")\n\t\t#self.HUB_A.setGeometry(QtCore.QRect(X2,Y_start+Y_step*8, QE_length, QE_hight))\n\t\t\n\t\tself.Lable_HUB_PLOICY_A = QLabel(\"HUB_PLOICY_A\",self)\n\t\tself.Lable_HUB_PLOICY_A.move(X1,Y_start+Y_step*8)\t\t\n\t\tself.HUB_PLOICY_A = QTextEdit(self)\n\t\tself.HUB_PLOICY_A.setText(\"\")\n\t\tself.HUB_PLOICY_A.setGeometry(QtCore.QRect(X2,Y_start+Y_step*8, QE_length, QE_hight*4))\n\t\t\n\t\tself.Lable_TECH_COMMENT_A = QLabel(\"TECH_COMMENT_A\",self)\n\t\tself.Lable_TECH_COMMENT_A.move(X1,Y_start+Y_step*11)\t\t\n\t\tself.TECH_COMMENT_A = QTextEdit(self)\n\t\tself.TECH_COMMENT_A.LineWrapMode\n\t\tself.TECH_COMMENT_A.setText(\"\")\n\t\tself.TECH_COMMENT_A.setGeometry(QtCore.QRect(X2,Y_start+Y_step*11, QE_length, QE_hight*11))\t\t\t\t\n\n#BUILD COMBO LIST_A WITH DROP DOWN SELECTION\n\t\tCombo_LIST_A=self.Full_list\n\t\tself.Combo_Select_A = QComboBox(self)\n\t\tfor i in Combo_LIST_A:\n\t\t\tself.Combo_Select_A.addItem(i)\n\t\tself.Combo_Select_A.move(X2,Y_start+Y_step*1)\n\t\tself.Combo_Select_A.setMaxVisibleItems (10)\n\t\tself.Combo_Select_A.currentIndexChanged.connect(self.update_A)\n\t\tself.OP_A.returnPressed.connect(self.rebuild_A_list)\n\n#Add LISTCACHE FUNCTION_A\n\t\tself.Combo_Region_A = QComboBox(self)\n\t\tfor i in RegionList:\n\t\t\tself.Combo_Region_A.addItem(i)\n\t\t\n\t\tself.Combo_Region_A.move(X2,Y_start+Y_step*19)\n\t\tself.Combo_Region_A.setMaxVisibleItems (4)\n\t\tself.Lable_Region_A = QLabel(\"Normal Route\",self)\n\t\tself.Lable_Region_A.move(X1,Y_start+Y_step*19)\t\t\n\n\t\tself.B_Reams2A_LIST = QPushButton('B_Realms2A_List',self)\n\t\tself.B_Reams2A_LIST.move(X3,Y_start+Y_step*19)\n\t\tself.B_Reams2A_LIST.clicked.connect(lambda: ADD_REALMS2LIST(self.Combo_Region_A.currentText(),self.realm_B.displayText(),self.LIST_A.displayText()))\n\t\t\n#Check DECIDE ROUTE\tFUNCTION_A\n\t\tself.Check_TO_A_Route = QPushButton('Check to A route',self)\n\t\tself.Check_TO_A_Route.move(X5,Y_start+Y_step*19)\n\t\tself.Check_TO_A_Route.clicked.connect(lambda: CHECK_DECIDE_ROUTE2OP(self.Combo_Region_A.currentText(),self.realm_B.displayText(),self.realm_A.displayText()))\n\t\t\n\t\t\n#Add K2R FUNCTION_A\n\t\tself.Lable_Region_K2R_A = QLabel(\"K2R Route\",self)\n\t\tself.Lable_Region_K2R_A.move(X1,Y_start+Y_step*20)\t\t\n\n\t\tself.K2R_B_Reams2A_LIST = QPushButton('K2R_B_Realms2A_List/ADD RequestFilter/UPD vzw LIST',self)\n\t\tself.K2R_B_Reams2A_LIST.move(X3,Y_start+Y_step*20)\n\t\t#PARA:REGION A, REALMs B, LIST A, REALMs A, Name A, Name B\n\t\tself.K2R_B_Reams2A_LIST.clicked.connect(lambda: K2R(self.Combo_Region_A.currentText(),self.realm_B.displayText(),self.LIST_A.displayText(),self.realm_A.displayText(),self.OP_A.displayText(),self.OP_B.displayText()))\n\n\t\t\n\n## code for OPB only start\n\t\tdistance=550\n\t\tX1 = distance+X1 #label1 start\n\t\tX2 = distance+X2#text1 start\n\t\tX3 = distance+X3#labe2 start\n\t\tX4 = distance+X4#text2 start\n\t\tX5 = distance+X5#button3 start\n\n\t\tself.Lable_TADIG_B = QLabel(\"TADIG_B\",self)\n\t\tself.Lable_TADIG_B.move (X4,Y_start-Y_step*1)\t\t\n\t\tself.TADIG_B = QLineEdit(self)\n\t\tself.TADIG_B.setText(\"\")\n\t\tself.TADIG_B.setGeometry(QtCore.QRect(X5, Y_start-Y_step*1, QE_length/2.2, QE_hight))\n\t\t\n\t\tself.Lable_SSID_B = QLabel(\"SSID_B\",self)\n\t\tself.Lable_SSID_B.move (X1,Y_start-Y_step)\t\t\n\t\tself.SSID_B = QLineEdit(self)\n\t\tself.SSID_B.setText(\"\")\n\t\tself.SSID_B.setGeometry(QtCore.QRect(X2, Y_start-Y_step, QE_length/4, QE_hight))\n\n\t\tself.Lable_OP_B = QLabel(\"OP_B\",self)\n\t\tself.Lable_OP_B.move (X1,Y_start)\t\t\n\t\tself.OP_B = QLineEdit(self)\n\t\tself.OP_B.setText(\"\")\n\t\tself.OP_B.setGeometry(QtCore.QRect(X2, Y_start, QE_length, QE_hight))\n\t\t\n\t\tself.Lable_Country_B = QLabel(\"Country_B\",self)\n\t\tself.Lable_Country_B.move (X1,Y_start+Y_step*2)\t\t\n\t\tself.Country_B = QLineEdit(self)\n\t\tself.Country_B.setText(\"\")\n\t\tself.Country_B.setGeometry(QtCore.QRect(X2,Y_start+Y_step*2, QE_length, QE_hight))\n\t\t\n\t\tself.Lable_Realm_B = QLabel(\"Realm_B\",self)\n\t\tself.Lable_Realm_B.move (X1,Y_start+Y_step*3)\t\t\t\t\n\t\tself.realm_B = QLineEdit(self)\n\t\tself.realm_B.setText(\"\")\n\t\tself.realm_B.setGeometry(QtCore.QRect(X2,Y_start+Y_step*3, QE_length, QE_hight))\n\n\t\tself.Lable_IMSI_B = QLabel(\"IMSI_B\",self)\n\t\tself.Lable_IMSI_B.move (X1,Y_start+Y_step*4)\t\t\t\t\n\t\tself.IMSI_B = QLineEdit(self)\n\t\tself.IMSI_B.setText(\"\")\n\t\tself.IMSI_B.setGeometry(QtCore.QRect(X2,Y_start+Y_step*4, QE_length, QE_hight))\n\t\t\n\t\tself.Lable_LIST_B = QLabel(\"LIST_B\",self)\n\t\tself.Lable_LIST_B.move(X1,Y_start+Y_step*5)\t\t\n\t\tself.LIST_B = QLineEdit(self)\n\t\tself.LIST_B.setText(\"\")\n\t\tself.LIST_B.setGeometry(QtCore.QRect(X2,Y_start+Y_step*5, QE_length, QE_hight))\n\t\t\n\t\tself.Owner_B = QLabel(\"Owner_B\",self)\n\t\tself.Owner_B.move(X1,Y_start+Y_step*6)\t\t\n\t\tself.Owner_B = QLineEdit(self)\n\t\tself.Owner_B.setText(\"owner B\")\n\t\tself.Owner_B.setGeometry(QtCore.QRect(X2,Y_start+Y_step*6, QE_length/4, QE_hight))\n\t\t\n\t\tself.Label_RMT_B = QLabel(\"RMT_B\",self)\n\t\tself.Label_RMT_B.move(X4,Y_start+Y_step*6)\t\t\n\t\tself.RMT_B = QLineEdit(self)\n\t\tself.RMT_B.setText(\"RMT B\")\n\t\tself.RMT_B.setGeometry(QtCore.QRect(X5,Y_start+Y_step*6, QE_length/2.2, QE_hight))\n\t\t\n\t\tself.Lable_DRA_B = QLabel(\"DRA_B\",self)\n\t\tself.Lable_DRA_B.move(X1,Y_start+Y_step*7)\t\t\n\t\tself.DRA_B = QLineEdit(self)\n\t\tself.DRA_B.setText(\"\")\n\t\tself.DRA_B.setGeometry(QtCore.QRect(X2,Y_start+Y_step*7, QE_length, QE_hight))\t\t\n\t\t\n\t\t#self.Lable_HUB_B = QLabel(\"HUB_B\",self)\n\t\t#self.Lable_HUB_B.move(X1,Y_start+Y_step*8)\t\t\n\t\t#self.HUB_B = QLineEdit(self)\n\t\t#self.HUB_B.setText(\"\")\n\t\t#self.HUB_B.setGeometry(QtCore.QRect(X2,Y_start+Y_step*8, QE_length, QE_hight))\n\t\t\n\t\tself.Lable_HUB_PLOICY_B = QLabel(\"HUB_PLOICY_B\",self)\n\t\tself.Lable_HUB_PLOICY_B.move(X1,Y_start+Y_step*8)\t\t\n\t\tself.HUB_PLOICY_B = QTextEdit(self)\n\t\tself.HUB_PLOICY_B.setText(\"\")\n\t\tself.HUB_PLOICY_B.setGeometry(QtCore.QRect(X2,Y_start+Y_step*8, QE_length, QE_hight*4))\n\t\t\n\t\tself.Lable_TECH_COMMENT_B = QLabel(\"TECH_COMMENT_B\",self)\n\t\tself.Lable_TECH_COMMENT_B.move(X1,Y_start+Y_step*11)\t\t\n\t\tself.TECH_COMMENT_B = QTextEdit(self)\n\t\tself.TECH_COMMENT_B.LineWrapMode\n\t\tself.TECH_COMMENT_B.setText(\"\")\n\t\tself.TECH_COMMENT_B.setGeometry(QtCore.QRect(X2,Y_start+Y_step*11, QE_length, QE_hight*11))\t\t\t\t\n\n#COMBO LIST_B WITH DROP DOWN SELECTION\n\t\tCombo_LIST_B=self.Full_list\n\t\tself.Combo_Select_B = QComboBox(self)\n\t\tfor i in Combo_LIST_B:\n\t\t\tself.Combo_Select_B.addItem(i)\n\t\tself.Combo_Select_B.move(X2,Y_start+Y_step*1)\n\t\tself.Combo_Select_B.setMaxVisibleItems (10)\n\t\tself.Combo_Select_B.currentIndexChanged.connect(self.update_B)\n\t\tself.OP_B.returnPressed.connect(self.rebuild_B_list)\n\n#Add LISTCACHE FUNCTION_B\n\t\tself.Combo_Region_B = QComboBox(self)\n\t\tfor i in RegionList:\n\t\t\tself.Combo_Region_B.addItem(i)\n\t\t\n\t\tself.Combo_Region_B.move(X2,Y_start+Y_step*19)\n\t\tself.Combo_Region_B.setMaxVisibleItems (4)\n\t\tself.Lable_Region_B = QLabel(\"Normal Route\",self)\n\t\tself.Lable_Region_B.move(X1,Y_start+Y_step*19)\t\t\n\n\t\tself.A_Reams2B_LIST = QPushButton('A_Realms2B_List',self)\n\t\tself.A_Reams2B_LIST.move(X3,Y_start+Y_step*19)\n\t\tself.A_Reams2B_LIST.clicked.connect(lambda: ADD_REALMS2LIST(self.Combo_Region_B.currentText(),self.realm_A.displayText(),self.LIST_B.displayText()))\n\t\t\n#Check DECIDE ROUTE\tFUNCTION_B\n\t\tself.Check_TO_B_Route = QPushButton('Check to B route',self)\n\t\tself.Check_TO_B_Route.move(X5,Y_start+Y_step*19)\n\t\tself.Check_TO_B_Route.clicked.connect(lambda: CHECK_DECIDE_ROUTE2OP(self.Combo_Region_B.currentText(),self.realm_A.displayText(),self.realm_B.displayText()))\n\t\t\n#Add K2R FUNCTION_B\n\t\tself.Lable_Region_K2R_B = QLabel(\"K2R Route\",self)\n\t\tself.Lable_Region_K2R_B.move(X1,Y_start+Y_step*20)\t\t\n\n\t\tself.K2R_B_Reams2B_LIST = QPushButton('K2R_A_Realms2B_List/ADD RequestFilter/UPD vzw LIST',self)\n\t\tself.K2R_B_Reams2B_LIST.move(X3,Y_start+Y_step*20)\n\t\tself.K2R_B_Reams2A_LIST.clicked.connect(lambda: K2R(self.Combo_Region_B.currentText(),self.realm_A.displayText(),self.LIST_B.displayText(),self.realm_B.displayText(),self.OP_B.displayText(),self.OP_A.displayText()))\n\n\tdef update_POLICY(self):\n\t\t#PEERINGPOLICY=csv2dict('PeeringPolicy.csv')\n\t\tfor row in PEERINGPOLICY:\n\t\t\tif row['SCENARIO']==self.Combo_SCENARIO.currentText():\n\t\t\t\tself.POLICY.setText(row['SVR_PEER']+'-'+row['HUB_PEER'])\n\t\t\t\n\n\tdef update_A(self,ii):\n\t\tOPA_Name = self.Combo_Select_A.currentText()\n\t\tself.OP_A.setText(OPA_Name)\n\n\t\tfor row in DB_sheet:\n\t\t\tif row[\"name\"] == OPA_Name:\n\t\t\t\tself.SSID_A.setText(row[\"ssid\"])\n\t\t\t\tself.IMSI_A.setText(row[\"imsi_prefix\"])\n\t\t\t\tself.Country_A.setText(row[\"country\"])\n\t\t\t\tself.realm_A.setText(row[\"realm_name\"])\n\t\t\t\tself.LIST_A.setText(row[\"LIST\"])\n\t\t\t\tself.Owner_A.setText(row[\"owner\"])\n\t\t\t\tself.RMT_A.setText(row[\"status\"])\n\t\t\t\tself.DRA_A.setText(row[\"dra\"])\n\t\t\t\t#self.HUB_A.setText(row[\"hub\"])\n\t\t\t\tself.HUB_PLOICY_A.setText(row[\"hub_policy\"])\n\t\t\t\tself.TECH_COMMENT_A.setText(row[\"technicalcomment\"])\n\t\t\t\tself.TADIG_A.setText(row[\"tagid\"])\n\t\t\t\tself.Combo_Region_A.currentIndex=1\t\t\n\tdef update_B(self,ii):\n\t\tOPB_Name = self.Combo_Select_B.currentText()\n\t\tself.OP_B.setText(OPB_Name)\n\n\t\tfor row in DB_sheet:\n\t\t\tif row[\"name\"] == OPB_Name:\n\t\t\t\tself.SSID_B.setText(row[\"ssid\"])\n\t\t\t\tself.IMSI_B.setText(row[\"imsi_prefix\"])\n\t\t\t\tself.Country_B.setText(row[\"country\"])\n\t\t\t\tself.realm_B.setText(row[\"realm_name\"])\n\t\t\t\tself.LIST_B.setText(row[\"LIST\"])\n\t\t\t\tself.Owner_B.setText(row[\"owner\"])\n\t\t\t\tself.RMT_B.setText(row[\"status\"])\n\t\t\t\tself.DRA_B.setText(row[\"dra\"])\n\t\t\t\t#self.HUB_B.setText(row[\"hub\"])\n\t\t\t\tself.HUB_PLOICY_B.setText(row[\"hub_policy\"])\n\t\t\t\tself.TECH_COMMENT_B.setText(row[\"technicalcomment\"])\n\t\t\t\tself.TADIG_B.setText(row[\"tagid\"])\n\t\t\t\tself.Combo_Region_B.currentIndex=1\n\n## code for OPB only end\n\n\n\n\t\t\n\tdef rebuild_A_list(self):\n\t\tlist=[]\n\t\tkey= self.OP_A.text()\n\t\tprint(key)\n\t\tfor OP in self.Full_list:\n\t\t\tif key.lower() in OP.lower():\n\t\t\t\tlist.append(OP)\n\t\tself.Combo_Select_A.clear()\n\t\tfor i in list:\n\t\t\tself.Combo_Select_A.addItem(i)\n\t\t\t\n\t\t\t\t\n\t\tprint(self.OP_A.text)\n\t\tprint(list)\n\n\tdef rebuild_B_list(self):\n\t\tlist=[]\n\t\tkey= self.OP_B.text()\n\t\tprint(key)\n\t\tfor OP in self.Full_list:\n\t\t\tif key.lower() in OP.lower():\n\t\t\t\tlist.append(OP)\n\t\tself.Combo_Select_B.clear()\n\t\tfor i in list:\n\t\t\tself.Combo_Select_B.addItem(i)\n\n\n\n\n\tdef Reload_RULE(self,region):\n\t\tprint(\"RELOAD RULE \"+region)\t\n\n\tdef UPDATE_DB(self):\n\t\tprint(\"DB updated\")\n\n\t\nclass OthersWidget(QDialog):\n\tdef __init__(self, parent=None):\n\t\tsuper(OthersWidget, self).__init__(parent)\n\t\tself.setStyleSheet(\"background: blue grey\")\n\t\tself.Lable_TADIG_A = QLabel(\"TADIG_TEST\",self)\n\n\nclass Main_TabWidget(QTabWidget):\n def __init__(self, parent=None):\n super(Main_TabWidget, self).__init__(parent)\n self.resize(1366, 768)\n self.mContent = OPEN_ROUTE()\n self.mIndex = OthersWidget()\n self.addTab(self.mContent, u\"Open Route\")\n self.addTab(self.mIndex, u\"Others\")\n\n\nif __name__ == '__main__':\n import sys\n app = QApplication(sys.argv)\n t = Main_TabWidget()\n t.show()\n app.exec_()\n input(\"ddd\")\n\n\n", "sub_path": "backup/DSSOSS20180422-4.py", "file_name": "DSSOSS20180422-4.py", "file_ext": "py", "file_size_in_byte": 27470, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "os.path.exists", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 57, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 59, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 101, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 111, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 113, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 135, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 137, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 159, "usage_type": "call"}, {"api_name": "soap_all_commands_for_dsc.soap_reload_listcaches", "line_number": 177, "usage_type": "call"}, {"api_name": "soap_all_commands_for_dsc.soap_reload_listcaches", "line_number": 178, "usage_type": "call"}, {"api_name": "soap_all_commands_for_dsc.soap_reload_rule_engine", "line_number": 187, "usage_type": "call"}, {"api_name": "soap_all_commands_for_dsc.soap_reload_rule_engine", "line_number": 188, "usage_type": "call"}, {"api_name": "soap_all_commands_for_dsc.soap_check_decide_route", "line_number": 207, "usage_type": "call"}, {"api_name": "soap_all_commands_for_dsc.soap_check_decide_route", "line_number": 213, "usage_type": "call"}, {"api_name": "soap_all_commands_for_dsc.soap_add_list_cache", "line_number": 240, "usage_type": "call"}, {"api_name": "soap_all_commands_for_dsc.soap_add_list_cache", "line_number": 245, "usage_type": "call"}, {"api_name": "soap_all_commands_for_dsc.soap_add_list_cache", "line_number": 271, "usage_type": "call"}, {"api_name": "soap_all_commands_for_dsc.soap_add_list_cache", "line_number": 276, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 318, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 340, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 342, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 344, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 344, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 346, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 350, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 354, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 358, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 360, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 363, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 363, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 365, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 369, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 373, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 378, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 380, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 383, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 387, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 391, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 395, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 399, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 403, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 409, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 412, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 416, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QComboBox", "line_number": 425, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 432, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 433, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 433, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 445, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 447, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 449, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 449, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 451, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 453, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 455, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 455, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 457, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 459, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 461, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 461, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 463, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 465, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 467, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 467, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 469, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 471, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 473, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 473, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 475, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 477, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 479, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 479, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 481, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 483, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 485, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 485, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 487, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 489, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 491, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 491, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 493, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 495, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 497, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 497, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 499, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 501, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 503, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 503, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 511, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 515, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 515, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 517, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 522, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 522, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QComboBox", "line_number": 526, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QComboBox", "line_number": 535, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 541, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 544, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 549, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 555, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 558, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 573, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 575, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 577, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 577, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 579, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 581, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 583, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 583, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 585, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 587, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 589, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 589, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 591, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 593, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 595, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 595, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 597, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 599, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 601, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 601, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 603, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 605, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 607, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 607, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 609, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 611, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 613, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 613, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 615, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 617, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 619, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 619, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 621, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 623, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 625, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 625, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 627, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 629, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 631, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 631, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 639, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 643, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 643, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 645, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 650, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 650, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QComboBox", "line_number": 654, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QComboBox", "line_number": 663, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 669, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 672, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 677, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 682, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 685, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 780, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 795, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 795, "usage_type": "attribute"}]} +{"seq_id": "32435327", "text": "import komand\nfrom .schema import CategorizationInput, CategorizationOutput\n# Custom imports below\nfrom komand.exceptions import PluginException\n\n\nclass Categorization(komand.Action):\n def __init__(self):\n super(self.__class__, self).__init__(\n name='categorization',\n description='Return if domain has been flagged as malicious by the Cisco Security Labs team',\n input=CategorizationInput(),\n output=CategorizationOutput())\n\n def run(self, params={}):\n\n domains = params.get('domains')\n if (len(domains) == 1):\n domains = str(domains[0])\n \n try:\n remoteCategories = self.connection.investigate.categorization(domains, labels=True)\n except Exception as e:\n raise PluginException(preset=PluginException.Preset.UNKNOWN)\n\n categories = []\n for key, value in remoteCategories.items():\n categories.append({\"name\": key, \"status\": value.get('status'), \"security_categories\": value.get('security_categories'), \"content_categories\": value.get('content_categories')})\n\n return {\"categories\": categories}\n\n def test(self):\n return {\"categories\": []}\n", "sub_path": "cisco_umbrella_investigate/komand_cisco_umbrella_investigate/actions/categorization/action.py", "file_name": "action.py", "file_ext": "py", "file_size_in_byte": 1222, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "komand.Action", "line_number": 7, "usage_type": "attribute"}, {"api_name": "schema.CategorizationInput", "line_number": 12, "usage_type": "call"}, {"api_name": "schema.CategorizationOutput", "line_number": 13, "usage_type": "call"}, {"api_name": "komand.exceptions.PluginException", "line_number": 24, "usage_type": "call"}, {"api_name": "komand.exceptions.PluginException.Preset", "line_number": 24, "usage_type": "attribute"}]} +{"seq_id": "230043198", "text": "# coding=utf8\n\"\"\" Upgrades Entry\n\nHandles running upgrade scripts\n\"\"\"\n\n__author__\t\t= \"Chris Nasr\"\n__copyright__\t= \"OuroborosCoding\"\n__version__\t\t= \"1.0.0\"\n__maintainer__\t= \"Chris Nasr\"\n__email__\t\t= \"chris@fuelforthefire.ca\"\n__created__\t\t= \"2019-03-30\"\n\n# Python imports\nimport importlib\nimport os\nimport platform\nimport sys\n\n# Pip imports\nfrom RestOC import Conf, Record_Base, Record_ReDB, REST, Services\n\n# Upgrade imports\nfrom . import UpgradeLog, run\n\n# If the version argument is missing\nif len(sys.argv) < 2:\n\tprint('Must specify the version to run:\\n\\tpython -m upgrades v1.0')\n\tsys.exit(1)\n\n# Store the version\nsVer = sys.argv[1].replace('.', '_')\n\n# Load the config\nConf.load('../config.json')\nsConfOverride = '../config.%s.json' % platform.node()\nif os.path.isfile(sConfOverride):\n\tConf.load_merge(sConfOverride)\n\n# Add the global prepend and primary host to rethinkdb\nRecord_Base.dbPrepend(Conf.get((\"rethinkdb\", \"prepend\"), ''))\nRecord_ReDB.addHost('primary', Conf.get((\"rethinkdb\", \"hosts\", \"primary\")))\n\n# Register all services\nServices.register(\n\t{k:None for k in Conf.get(('rest', 'services'))},\n\tREST.Config(Conf.get(\"rest\")),\n\tConf.get(('services', 'salt'))\n)\n\n# Try to import the version\ntry:\n\toVer = importlib.import_module('upgrades.%s' % sVer)\nexcept ImportError as e:\n\tprint('The given version \"%s\" is invalid.' % sVer)\n\tprint(e)\n\tsys.exit(1)\n\n# Load or create the version file\noLogFile = UpgradeLog('upgrades/%s/_upgrade.log' % sVer)\n\n# Run the version files\nrun(oVer.modules, oLogFile)\n", "sub_path": "services/upgrades/__main__.py", "file_name": "__main__.py", "file_ext": "py", "file_size_in_byte": 1510, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "sys.argv", "line_number": 27, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 29, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 32, "usage_type": "attribute"}, {"api_name": "RestOC.Conf.load", "line_number": 35, "usage_type": "call"}, {"api_name": "RestOC.Conf", "line_number": 35, "usage_type": "name"}, {"api_name": "platform.node", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "RestOC.Conf.load_merge", "line_number": 38, "usage_type": "call"}, {"api_name": "RestOC.Conf", "line_number": 38, "usage_type": "name"}, {"api_name": "RestOC.Record_Base.dbPrepend", "line_number": 41, "usage_type": "call"}, {"api_name": "RestOC.Record_Base", "line_number": 41, "usage_type": "name"}, {"api_name": "RestOC.Conf.get", "line_number": 41, "usage_type": "call"}, {"api_name": "RestOC.Conf", "line_number": 41, "usage_type": "name"}, {"api_name": "RestOC.Record_ReDB.addHost", "line_number": 42, "usage_type": "call"}, {"api_name": "RestOC.Record_ReDB", "line_number": 42, "usage_type": "name"}, {"api_name": "RestOC.Conf.get", "line_number": 42, "usage_type": "call"}, {"api_name": "RestOC.Conf", "line_number": 42, "usage_type": "name"}, {"api_name": "RestOC.Services.register", "line_number": 45, "usage_type": "call"}, {"api_name": "RestOC.Services", "line_number": 45, "usage_type": "name"}, {"api_name": "RestOC.Conf.get", "line_number": 46, "usage_type": "call"}, {"api_name": "RestOC.Conf", "line_number": 46, "usage_type": "name"}, {"api_name": "RestOC.REST.Config", "line_number": 47, "usage_type": "call"}, {"api_name": "RestOC.REST", "line_number": 47, "usage_type": "name"}, {"api_name": "RestOC.Conf.get", "line_number": 47, "usage_type": "call"}, {"api_name": "RestOC.Conf", "line_number": 47, "usage_type": "name"}, {"api_name": "RestOC.Conf.get", "line_number": 48, "usage_type": "call"}, {"api_name": "RestOC.Conf", "line_number": 48, "usage_type": "name"}, {"api_name": "importlib.import_module", "line_number": 53, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "389767974", "text": "import csv\nimport json\nfrom amazon.api import AmazonAPI\nfrom pprint import pprint\n\ndef writeToJSONFile(path, fileName, data):\n filePathNameWExt = './' + path + '/' + fileName + '.json'\n with open(filePathNameWExt, 'w') as fp:\n json.dump(data, fp)\n\nkeys = []\nwith open('../rootkey.csv', 'r') as keysfile:\n spamreader = csv.reader(keysfile, delimiter=' ', quotechar='|')\n for row in spamreader:\n keys.extend(row)\n\nAMAZON_ACCESS_KEY = keys[0]\nAMAZON_SECRET_KEY = keys[1]\nAMAZON_ASSOC_TAG = keys[2]\n\namazon = AmazonAPI(AMAZON_ACCESS_KEY, AMAZON_SECRET_KEY, AMAZON_ASSOC_TAG)\n\n#to json\ndata = []\nwith open('ff-books.json') as data_file: \n ff_books = json.load(data_file)\n for book in ff_books:\n if(book['isbn']):\n try:\n isbn = book['isbn']\n item = amazon.lookup(ItemId=isbn, IdType='ISBN', SearchIndex='Books')\n item_link = ''\n if(type(item) is list):\n item_link = item[0].detail_page_url\n else:\n item_link = item.detail_page_url\n \n i = {}\n i['title'] = book['title']\n i['author'] = book['author']\n i['link'] = book['link']\n i['amazon'] = item_link\n data.append(i)\n except Exception:\n pass\n \nwriteToJSONFile('./','amazon',data)\n", "sub_path": "examples/search_book_by_isbn.py", "file_name": "search_book_by_isbn.py", "file_ext": "py", "file_size_in_byte": 1451, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "json.dump", "line_number": 9, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 13, "usage_type": "call"}, {"api_name": "amazon.api", "line_number": 21, "usage_type": "name"}, {"api_name": "amazon.api.AmazonAPI", "line_number": 21, "usage_type": "call"}, {"api_name": "json.load", "line_number": 26, "usage_type": "call"}, {"api_name": "amazon.api.lookup", "line_number": 31, "usage_type": "call"}, {"api_name": "amazon.api", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "571442410", "text": "\"\"\"\nCreated on Mon Feb 17 12:32:42 2020\n@author: lizzy & richelle\n\"\"\"\nimport numpy as np\nimport math as m\nimport matplotlib.pyplot as plt\n\naircraft = \"A320\"\nCa = 0.547 # m\nla = 2.771 # m\nx1 = 0.153 # m\nx2 = 1.281 # m\nx3 = 2.681 # m\nxa = 0.28 # m\nha = 0.225 # m\ntsk = 1.1/1000 # m\ntsp = 2.9/1000 # m\ntst = 1.2/1000 # m\nhst = 15./1000 # m\nwst = 20./1000 # m\nnst = 17 # -\nd1 = 0.01103 # m\nd3 = 0.01642 # m\ntheta = m.radians(26) # rad\nP = 91.7*1000 # N\n\n\"airfoil plot\"\nRa=ha/2 #radius \nlength=m.pi*Ra**2\n\nAb=wst*tst+(hst-tst)*tst #m\nl=2*m.sqrt((Ca-Ra)**2+(Ra)**2)+m.pi*Ra\nb=l/nst\nk=b/Ra #radians\n\nz_coordinates=[]\ny_coordinates=[]\nz_spar=[]\ny_spar=[]\nA_st=(hst)*tst+tst*wst\n\n\"Stringer placement\"\nz_stringer=[] \ny_stringer=[]\nstringer_array=[]\nz_stringer.append(-Ra)\ny_stringer.append(0)\nstringer_array.append([A_st, 0,-Ra])\nh=0\nfor n in range(0,10001):\n z=-Ra*m.cos(n*b/(Ra))\n y=Ra*m.sin(n*b/(Ra))\n if z<=0. and z>-Ra+0.01:\n z_stringer.append(z)\n z_stringer.append(z)\n y_stringer.append(y)\n y_stringer.append(-y)\n stringer_array.append([A_st, y,z])\n stringer_array.append([A_st, -y,z])\n h=h+2\n elif z>0 or nst==1:\n break\nfor n in range(0,10001):\n last_room=0.5*m.pi*Ra-(h/2)*b\n z1=0\n z2=(n-2)*(Ca-Ra)/10000\n y1=Ra\n y2=-Ra/(Ca-Ra)*z2+Ra\n l_p=round(m.sqrt((z1-z2)**2+(y1-y2)**2),7)\n z=z2\n y=y2\n for i in range (0,nst):\n if abs(l_p-round((i*b-last_room),7))<=0.000022:\n z_stringer.append(z)\n z_stringer.append(z)\n y_stringer.append(y)\n y_stringer.append(-y)\n stringer_array.append([A_st, y,z])\n stringer_array.append([A_st, -y,z])\n elif z>(Ca-Ra) or nst==1:\n break\n \nstringers=np.array(stringer_array)\nprint(stringers) \n\"airfoil placement\"\nfor i in range(0,101):\n z=-Ra+Ra*i/(100)\n y=np.sqrt(Ra**2-z**2)\n z_coordinates.append(z)\n y_coordinates.append(y)\nfor i in range(-100,101):\n z=0\n y=i/100*Ra\n z_spar.append(z)\n y_spar.append(y)\nfor i in range(0,101):\n z=i*(Ca-Ra)/100\n y=-Ra/(Ca-Ra)*z+Ra\n z_coordinates.append(z)\n y_coordinates.append(y)\n\nfor i in range(0,101):\n z=(100-i)*(Ca-Ra)/100\n y=-Ra/(Ca-Ra)*z+Ra\n z_coordinates.append(z)\n y_coordinates.append(-y)\n\nfor i in range(0,101):\n z=-Ra+Ra*(100-i)/100\n y=np.sqrt(Ra**2-z**2)\n z_coordinates.append(z)\n y_coordinates.append(-y)\n\n\"Calculation Centroid\"\nA_sk1=m.pi*Ra*tsk\nA_sk2=m.sqrt((Ca-Ra)**2+Ra**2)*tsk\nA_sp=ha*tsp\n\nA_tot=A_sk1+2*A_sk2+A_sp+nst*A_st\nprint(\"Total area:\", A_tot, \"m^2\")\n\nAz_st=A_st*sum(z_stringer) #m^2\nAz_sk1=A_sk1*2*-Ra/(m.pi) #m^2\nAz_sk2=A_sk2*1/2*(Ca-Ra) #m^2\nAy_st=A_st*sum(y_stringer)\nAy_sk1=A_sk1*0\nAy_sk2=A_sk2*1/2*(Ra)\nAy_sk3=A_sk2*-1/2*(Ra)\nzc=(Az_st+Az_sk1+2*Az_sk2)/A_tot\nyc=(Ay_st+Ay_sk1+Ay_sk2+Ay_sk3)/A_tot #due to symmetry it should be 0\nprint(\"centroid\",zc,yc)\n\n\"airfoil plot\"\n\nplt.plot(z_coordinates,y_coordinates, color='black',label=\"Periphery\")\nplt.plot(z_spar,y_spar,color='green', label=\"Spar\")\nplt.xlim(-0.2,0.6)\nplt.ylim(-0.2,0.3)\nplt.scatter(z_stringer,y_stringer, s=30, color='red',label=\"Stringers\")\nplt.scatter(zc, yc, marker=\"x\", color='orange',label=\"Centroid\")\nplt.legend()\n\n\"Moment of Inertia results\"\n#Calculate moment of inertia of straght skin parts\nangle = m.atan(Ra/(Ca-Ra)) #radians\nlength_beam = m.sqrt(Ra**2+(Ca-Ra)**2) #m\nIyy_straight = ((tsk*length_beam**(3)*m.cos(angle)*m.cos(angle))/12+A_sk2*((Ca-Ra)/2-zc)**2) #2 beams\n#print(\"Moment of inertia of straight skin parts Iyy is:\", Iyy_straight, \"m^4\")\n\nIzz_straight = ((tsk*length_beam**(3)*m.sin(angle)*m.sin(angle))/12+A_sk2*(Ra/2)**2) #2 beams\n#print(\"Moment of inertia of straight skin parts Izz is:\", Izz_straight, \"m^4\")\n\n#Calculate moment of inertia of arc skin part\nIzz_arc = 1/2*m.pi*tsk*Ra**3 #\nIyy_arc = 1/2*m.pi*tsk*Ra**3-4/m.pi*Ra**3*tsk+A_sk1*(2*Ra/m.pi+zc)**2\n#print(\"Moment of inertia of arc skin part Iyy is:\", Iyy_arc, \"m^4\")\n#print(\"Moment of inertia of arc skin part Izz is:\", Izz_arc, \"m^4\")\n\n#Calculate moment of inertia of spar\nIzz_spar = (tsp*ha**3)/12\nIyy_spar = A_sp*(zc)**2\n\n#print(\"Moment of inertia of spar Iyy is:\", Iyy_spar,\"m^4\")\n#print(\"Moment of inertia of spar Izz is:\", Izz_spar, \"m^4\")\n\n#Calculate moment of inertia of stiffners\nvalues_Ady=[]\nfor i in y_stringer:\n dy=i**2\n Ady=A_st*dy\n values_Ady.append(Ady)\nIzz_st=sum(values_Ady)\n\nvalues_Adz=[]\nfor i in z_stringer:\n dz=(i-zc)**2\n Adz=A_st*dz\n values_Adz.append(Adz)\nIyy_st=sum(values_Adz)\n\n#print(\"Moment of inertia of stiffners Izz is:\", Izz_st, \"m^4\") \n#print(\"Moment of inertia of stiffners Iyy is:\", Iyy_st, \"m^4\")\n#Calculate total moment of inertia \nIyy_total = 2*Iyy_straight + Iyy_arc + Iyy_spar + Iyy_st\nIzz_total = 2*Izz_straight + Izz_arc + Izz_spar + Izz_st\n\nprint(\"Total moment of inertia Iyy =\", Iyy_total, \"m^4\")\nprint(\"Total moment of inertia Izz =\", Izz_total, \"m^4\") \n\n\"torsional constant\"\nJ1=4*(0.5*m.pi*Ra**2)**2/((m.pi*Ra/tsk))\nJ2=4*(Ra*(Ca-Ra))**2/((2*m.sqrt(Ra**2+(Ca-Ra)**2)/tsk))\n\nJ=J1+J2\nprint(\"Torsional constant\", J)", "sub_path": "crosssectional_properties5.py", "file_name": "crosssectional_properties5.py", "file_ext": "py", "file_size_in_byte": 5127, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "math.radians", "line_number": 25, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 30, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 33, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 33, "usage_type": "attribute"}, {"api_name": "math.cos", "line_number": 52, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 53, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 65, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 111, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 116, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 117, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 124, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 136, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "math.atan", "line_number": 146, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 147, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 148, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 151, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 155, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 156, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 192, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 193, "usage_type": "call"}]} +{"seq_id": "236717692", "text": "from pyspark import SparkContext\nfrom pyspark.sql import SQLContext\nfrom pyspark.sql import functions\nimport pandas as pd\nimport os\nimport glob\nfrom pyspark.sql import Row\nimport spark\n\n\nsc = SparkContext(\"local\", \"First App\")\nsqlContext = SQLContext(sc)\n\nallFiles = ['waqi-covid19-airqualitydata-2020.csv', 'waqi-covid19-airqualitydata-2019Q1.csv',\n 'waqi-covid19-airqualitydata-2019Q2.csv', 'waqi-covid19-airqualitydata-2019Q3.csv',\n 'waqi-covid19-airqualitydata-2019Q4.csv']\n\n\nfor f in allFiles:\n\n combined = sc.textFile(f) \\\n .map(lambda line: line.split(\",\"))\\\n .filter(lambda line: len(line)>1)\\\n .filter(lambda line: line[3] == \"pm25\" or line[3] == \"temperature\") \\\n .filter(lambda line: line[2] == \"Austin\" ) \\\n .map(lambda line: (line[0],line[2], line[3], line[7]))\n\n combinedDf = sqlContext.createDataFrame(combined, ['date', 'city', 'env', 'median'])\n combinedDf.toPandas().to_csv('combined' + f[:-4] + '.csv')\n\n print(\"done processing \" + f)\n\n\nos.chdir(\"./\")\nextension ='csv'\ncombined_all_filenames = [i for i in glob.glob('combined*.{}'.format(extension))]\ncombined_combined_csv = pd.concat([pd.read_csv(f) for f in combined_all_filenames ])\ncombined_combined_csv.to_csv( \"all_combined_csv.csv\", index=False, encoding='utf-8-sig')\n\n\n", "sub_path": "climate/graph.py", "file_name": "graph.py", "file_ext": "py", "file_size_in_byte": 1315, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pyspark.SparkContext", "line_number": 11, "usage_type": "call"}, {"api_name": "pyspark.sql.SQLContext", "line_number": 12, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 34, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 37, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "199133077", "text": "import torch\nfrom torchvision import datasets, transforms\n\nbatch_size=200\n\n# 准备MNIST数据集\n\ntrain_loader = torch.utils.data.DataLoader(\n datasets.MNIST('data2', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n # transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=batch_size, shuffle=True)\ntest_loader = torch.utils.data.DataLoader(\n datasets.MNIST('data2', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n # transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=batch_size, shuffle=True)\n\n\nprint('MNIST 数据集已经准备完成 !!!')\nprint('==============MNIST 数据集信息============')\nprint('训练集====>',train_loader.dataset.data.shape)\nprint('测试集====>',test_loader.dataset.data.shape)", "sub_path": "MNISTDownload.py", "file_name": "MNISTDownload.py", "file_ext": "py", "file_size_in_byte": 895, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "torch.utils.data.DataLoader", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 9, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 9, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 10, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 10, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 11, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 16, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 16, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 16, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 16, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 17, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "318451648", "text": "from datetime import date, datetime\nimport time, string, random, os, sys\nfrom flask import Flask, request, abort, send_file\nfrom CurriculumVitae import CurriculumVitae\nfrom Logger import log_from_renderer\nfrom I18n import *\nfrom Models import *\nimport Renders\nimport glob\nimport json\nimport timestring\nfrom flask_cors import CORS\nimport pika\n\ndef id_gen(size=6, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))\n\ndef parse_date(str):\n return timestring.Date(str).date\n\ndef get_field_or_none(req, field_name):\n if field_name in req.keys():\n return req[field_name]\n return None\n\ndef get_date_field_or_none(req, field_name):\n field = get_field_or_none(req, field_name)\n if field is None:\n return None\n return datetime.strptime(field, '%Y-%m-%d').date()\n\ndef get_parse_string(cv_key, item):\n gen_cv_item = cv_key + '('\n\n for key, value in item.items():\n gen_cv_item = gen_cv_item + \"{0}=\".format(key)\n if type(value) is dict:\n for in_key, in_value in value.items():\n inside_item = get_parse_string(in_key, in_value)\n gen_cv_item = gen_cv_item + \"{0},\".format(inside_item)\n break\n elif key[-4:] == \"date\":\n gen_cv_item = gen_cv_item + \"parse_date('{0}'),\".format(value)\n elif type(value) is str:\n gen_cv_item = gen_cv_item + \"\\\"\\\"\\\"{0}\\\"\\\"\\\",\".format(value.replace(\"\\\"\",\"\\\\\\\"\"))\n else:\n gen_cv_item = gen_cv_item + \"'{0}',\".format(value)\n\n gen_cv_item = gen_cv_item + ')'\n\n return gen_cv_item\n\ndef parse_item(cv_key, item):\n try:\n cv_item = eval(get_parse_string(cv_key, item))\n except TypeError as err:\n abort(400, err)\n\n return cv_item\n\nrender_map = {}\ndef refresh_render_map():\n for filename in glob.glob('Templates/*.json'):\n with open(filename) as json_file:\n data = json.load(json_file)\n render_map[data['name']] = data\n\ndef render_from_cv_dict(req):\n refresh_render_map()\n cv = CurriculumVitae(req[\"path\"])\n ret = \"\"\n\n log_from_renderer(req[\"curriculum_vitae\"][\"CvHeaderItem\"][\"email\"], cv.cv_hash, \"GENERATING_CV_AST\")\n\n req_cv = req\n path = None\n if 'path' in req:\n path = req['path']\n params = {}\n render_key = \"awesome\"\n params['section_order'] = ['work', 'education', 'achievement', 'project', 'academic', 'language', 'skill']\n if 'curriculum_vitae' in req:\n req_cv = req['curriculum_vitae']\n if 'render_key' in req:\n render_key = req['render_key']\n params = render_map[render_key]['fixed_params']\n if 'params' in req:\n params.update(req['params'])\n if 'section_order' in req:\n params['section_order'] = req['section_order']\n\n if 'CvHeaderItem' not in req_cv:\n log_from_renderer(req[\"curriculum_vitae\"][\"CvHeaderItem\"][\"email\"], cv.cv_hash, \"MISSING_HEADER\")\n abort(400, \"Missing header\")\n\n for cv_key in req_cv.keys():\n req_key = req_cv[cv_key]\n\n items = []\n\n if cv_key == 'CvHeaderItem':\n items.append(req_key)\n else:\n items = req_key\n\n for item in items:\n cv_item = parse_item(cv_key, item)\n cv.add(cv_item)\n\n log_from_renderer(cv.header.email, cv.cv_hash, \"CV_AST_GENERATED\")\n\n baseFolder = render_map[render_key]['base_folder']\n\n if 'lang' not in params:\n params['lang'] = 'en_US'\n \n resources = get_resources(params['lang'][:2])\n\n path = Renders.CvRenderTexToPdf.render(cv, path=path, cvRender=Renders.CvRenderCheetahTemplate, baseFolder=baseFolder, command=render_map[render_key]['command'], params=params, resources=resources)\n return path\n", "sub_path": "renderer-api/Common.py", "file_name": "Common.py", "file_ext": "py", "file_size_in_byte": 3772, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "string.ascii_uppercase", "line_number": 15, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 15, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 16, "usage_type": "call"}, {"api_name": "timestring.Date", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 57, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 63, "usage_type": "call"}, {"api_name": "json.load", "line_number": 65, "usage_type": "call"}, {"api_name": "CurriculumVitae.CurriculumVitae", "line_number": 70, "usage_type": "call"}, {"api_name": "Logger.log_from_renderer", "line_number": 73, "usage_type": "call"}, {"api_name": "Logger.log_from_renderer", "line_number": 93, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 94, "usage_type": "call"}, {"api_name": "Logger.log_from_renderer", "line_number": 110, "usage_type": "call"}, {"api_name": "Renders.CvRenderTexToPdf.render", "line_number": 119, "usage_type": "call"}, {"api_name": "Renders.CvRenderTexToPdf", "line_number": 119, "usage_type": "attribute"}, {"api_name": "Renders.CvRenderCheetahTemplate", "line_number": 119, "usage_type": "attribute"}]} +{"seq_id": "644401215", "text": "import numpy as np\r\nimport random\r\nimport matplotlib.pyplot as plt\r\nimport nltk\r\nimport csv\r\nfrom tempfile import TemporaryFile\r\n\r\n# preprocessing data\r\n# read data from text file\r\ndata = open(\"shakespeare.txt\", \"r\").read()\r\n\r\n# lower case all character in data\r\ndata = data.lower()\r\n\r\n# get all unique letters and symbols\r\ntoken = nltk.word_tokenize(data)\r\n\r\n# # split line into array\r\n# data = data.split(\"\\n\")\r\n\r\n# get alphabet array\r\n# alphabet = 'abcdefghijklmnopqrstuvwxyz'\r\n# alphabet = list(set(alphabet))\r\n\r\n# Initialize\r\nspecialSymbol = []\r\ndictionary = []\r\n# dictionaryTemp = []\r\nindex = 0\r\n\r\n#\r\nfor line in data:\r\n if (line == ''):\r\n data[index] = '\\n'\r\n index += 1\r\n\r\n# assign all symbols\r\nspecialSymbol = [',', '.', '<', '>', '/', '?', ':', ';', \"'\", '\"', '{', '}', '[', ']', '(', ')', '\\n', '-', '_']\r\n\r\n# assign single word to dictionary\r\n# for i in data:\r\n# words = i.split(\" \")\r\n# for word in words:\r\n# if not word in dictionaryTemp:\r\n# dictionaryTemp.append(word)\r\nfor word in token:\r\n if not word in dictionary:\r\n dictionary.append(word)\r\ndictionary.append(\"\\n\")\r\n\r\n\r\n# handle special symbol\r\n# for word in dictionaryTemp:\r\n# count = 1\r\n# if word != \"\" and word not in specialSymbol:\r\n# if word[-1] in specialSymbol or word[0] in specialSymbol:\r\n# realWord = word[:len(word)-1]\r\n# if realWord[-1] in specialSymbol:\r\n# count += 1\r\n# realWord = word[:len(word)-count]\r\n# if word[0] in specialSymbol:\r\n# realWord = realWord[1:]\r\n# if not word[-1] in dictionary:\r\n# dictionary.append(word[-1])\r\n# if not realWord in dictionary:\r\n# dictionary.append(realWord)\r\n# else:\r\n# dictionary.append(word)\r\n# dictionary.append(\"\\n\")\r\n\r\n\r\n\r\nword_to_idx = {w:i for i, w in enumerate(dictionary)}\r\nidx_to_word = {i:w for i, w in enumerate(dictionary)}\r\n\r\ndata_size, vocab_size = len(data), len(dictionary)\r\n\r\ndef initialize_parameters(n_a, n_x, n_y):\r\n Wc = np.random.randn(n_a, n_a + n_x) * 0.01\r\n bc = np.zeros((n_a, 1))\r\n Wu = np.random.randn(n_a, n_a + n_x) * 0.01\r\n bu = np.zeros((n_a, 1))\r\n Wf = np.random.randn(n_a, n_a + n_x) * 0.01\r\n bf = np.zeros((n_a, 1))\r\n Wo = np.random.randn(n_a, n_a + n_x) * 0.01\r\n bo = np.zeros((n_a, 1))\r\n Wy = np.random.randn(n_y, n_a) * 0.01\r\n by = np.zeros((n_y, 1))\r\n\r\n parameters = {\"Wc\": Wc, \"Wu\": Wu, \"Wf\": Wf, \"Wo\": Wo, \"Wy\": Wy, \"bc\": bc, \"bu\": bu, \"bf\": bf, \"bo\": bo, \"by\": by}\r\n return parameters\r\n\r\ndef smooth(cur_loss, loss):\r\n return loss * 0.999 + cur_loss * 0.001\r\n\r\ndef get_initial_loss(vocab_size, seq_length):\r\n return -np.log(1.0/vocab_size) * seq_length\r\n\r\ndef sigmoid(z):\r\n return 1/(1 + np.exp(-z))\r\n\r\ndef softmax(v):\r\n e_v = np.exp(v - np.max(v))\r\n return e_v / np.sum(e_v, axis = 0)\r\n\r\ndef clip(gradients, maxValue):\r\n dWc, dWu, dWf, dWo, dWy, dbc, dbu, dbf, dbo, dby = gradients[\"dWc\"], gradients[\"dWu\"], gradients[\"dWf\"], gradients[\"dWo\"], gradients[\"dWy\"], gradients[\"dbc\"], gradients[\"dbu\"], gradients[\"dbf\"], gradients[\"dbo\"], gradients[\"dby\"]\r\n da_prev, dc_prev, dx = gradients[\"da_prev\"], gradients[\"dc_prev\"], gradients[\"dx\"]\r\n\r\n for gradient in [dWc, dWu, dWf, dWo, dWy, dbc, dbu, dbf, dbo, dby, da_prev, dc_prev, dx]:\r\n gradient = np.clip(gradient, -maxValue, maxValue, out=gradient)\r\n\r\n gradients = {\"dWc\": dWc, \"dWu\": dWu, \"dWf\": dWf, \"dWo\": dWo, \"dWy\": dWy, \"dbc\": dbc, \"dbu\": dbu, \"dbf\": dbf, \"dbo\": dbo, \"dby\": dby,\r\n \"da_prev\": da_prev, \"dc_prev\": dc_prev, \"dx\": dx}\r\n return gradients\r\n\r\ndef rnn_lstm_cell_forward(parameters, a_prevt, c_prevt, xt):\r\n Wc, Wu, Wf, Wo, Wy = parameters[\"Wc\"], parameters[\"Wu\"], parameters[\"Wf\"], parameters[\"Wo\"], parameters[\"Wy\"]\r\n bc, bu, bf, bo, by = parameters[\"bc\"], parameters[\"bu\"], parameters[\"bf\"], parameters[\"bo\"], parameters[\"by\"]\r\n\r\n concat = np.vstack((a_prevt, xt))\r\n\r\n ut = sigmoid(np.dot(Wu, concat) + bu)\r\n ft = sigmoid(np.dot(Wf, concat) + bf)\r\n ot = sigmoid(np.dot(Wo, concat) + bo)\r\n cct = np.tanh(np.dot(Wc, concat) + bc)\r\n\r\n c_next = np.multiply(ut, cct) + np.multiply(ft, c_prevt)\r\n a_next = np.multiply(ot, np.tanh(c_next))\r\n\r\n yt_pred = softmax(np.dot(Wy, a_next) + by)\r\n\r\n cache = (a_next, c_next, a_prevt, c_prevt, ft, ut, ot, cct, xt, parameters)\r\n return cache, yt_pred, a_next, c_next\r\n\r\ndef rnn_lstm_full_forward(parameters, a_prev, X, Y, vocab_size):\r\n x, y, a, c, y_hat = {}, {}, {}, {}, {}\r\n caches = []\r\n a[-1] = np.copy(a_prev)\r\n c[-1] = np.copy(a_prev)\r\n\r\n loss = 0\r\n\r\n for t in range(len(X)):\r\n x[t] = np.zeros((vocab_size, 1))\r\n y[t] = np.zeros((vocab_size, 1))\r\n y[t][Y[t]] = 1\r\n if X[t] != None:\r\n x[t][X[t]] = 1\r\n\r\n cache, y_hat[t], a[t], c[t] = rnn_lstm_cell_forward(parameters, a[t-1], c[t-1], x[t])\r\n loss -= np.log(y_hat[t][Y[t]])\r\n\r\n\r\n caches.append(cache)\r\n loss = np.sum(loss, axis = 0)\r\n caches = (caches, y_hat, a, c, x)\r\n return caches, loss, a\r\n\r\ndef rnn_lstm_cell_backward(cache, da_next, dc_next, dy):\r\n a_next, c_next, a_prevt, c_prevt, ft, ut, ot, cct, xt, parameters = cache\r\n n_x, _ = xt.shape\r\n n_a, _ = a_next.shape\r\n\r\n dot = np.multiply(np.multiply(da_next, np.tanh(c_next)), np.multiply(ot, 1 - ot))\r\n dft = np.multiply(np.multiply(dc_next, c_prevt) + np.multiply(np.multiply(ot, 1 - np.tanh(c_next)**2), np.multiply(c_prevt, da_next)), np.multiply(ft, 1- ft))\r\n dut = np.multiply(np.multiply(dc_next, cct) + np.multiply(np.multiply(ot, 1 - np.tanh(c_next)**2), np.multiply(cct, da_next)), np.multiply(ut, 1 - ut))\r\n dcct = np.multiply(np.multiply(dc_next, ut) + np.multiply(np.multiply(ot, 1 - np.tanh(c_next)**2), np.multiply(ut, da_next)), 1 - np.tanh(cct)**2)\r\n\r\n dWot = np.dot(dot, np.vstack((a_prevt, xt)).T)\r\n dWft = np.dot(dft, np.vstack((a_prevt, xt)).T)\r\n dWut = np.dot(dut, np.vstack((a_prevt, xt)).T)\r\n dWct = np.dot(dcct, np.vstack((a_prevt, xt)).T)\r\n dWy = np.dot(dy, a_next.T)\r\n dbo = np.sum(dot, axis = 1, keepdims = True)\r\n dbf = np.sum(dft, axis = 1, keepdims = True)\r\n dbu = np.sum(dut, axis = 1, keepdims = True)\r\n dbc = np.sum(dcct, axis = 1, keepdims = True)\r\n dby = dy\r\n\r\n da_prevt = np.dot(parameters[\"Wo\"].T[: n_a, :], dot) + np.dot(parameters[\"Wf\"].T[: n_a, :], dft) + np.dot(parameters[\"Wu\"].T[: n_a, :], dut) + np.dot(parameters[\"Wc\"].T[: n_a, :], dcct)\r\n dc_prevt = np.multiply(dc_next, ft) + np.multiply(np.multiply(ot, 1 - np.tanh(c_next)**2), np.multiply(ft, da_next))\r\n dxt = np.dot(parameters[\"Wo\"].T[n_a :, :], dot) + np.dot(parameters[\"Wf\"].T[n_a :, :], dft) + np.dot(parameters[\"Wu\"].T[n_a :, :], dut) + np.dot(parameters[\"Wc\"].T[n_a :, :], dcct)\r\n\r\n gradients = {\"da_prevt\": da_prevt, \"dc_prevt\": dc_prevt, \"dxt\": dxt, \"dWot\": dWot, \"dWft\": dWft, \"dWut\": dWut,\r\n \"dWct\": dWct, \"dWy\": dWy, \"dbo\": dbo, \"dbf\": dbf, \"dbu\": dbu, \"dbc\": dbc, \"dby\": dby}\r\n\r\n return gradients\r\n\r\ndef rnn_lstm_full_backward(caches, X, Y):\r\n (caches, y_hat, a, c, x) = caches\r\n (a_next, c_next, a_prevt, c_prevt, ft, ut, ot, cct, xt, parameters) = caches[0]\r\n\r\n n_a, _ = a_next.shape\r\n n_x, _ = xt.shape\r\n n_y, _ = parameters[\"by\"].shape\r\n\r\n dx = np.zeros((n_x, len(X)))\r\n da0 = np.zeros((n_a, 1))\r\n dc0 = np.zeros((n_a, 1))\r\n da_prev = np.zeros((n_a, 1))\r\n dc_prev = np.zeros((n_a, 1))\r\n dWo = np.zeros((n_a, n_a + n_x))\r\n dWf = np.zeros((n_a, n_a + n_x))\r\n dWu = np.zeros((n_a, n_a + n_x))\r\n dWc = np.zeros((n_a, n_a + n_x))\r\n dWy = np.zeros((n_y, n_a))\r\n dbo = np.zeros((n_a, 1))\r\n dbf = np.zeros((n_a, 1))\r\n dbu = np.zeros((n_a, 1))\r\n dbc = np.zeros((n_a, 1))\r\n dby = np.zeros((n_y, 1))\r\n\r\n for t in reversed(range(len(X))):\r\n dy = np.copy(y_hat[t])\r\n dy[Y[t]] -= 1\r\n da = np.dot(parameters[\"Wy\"].T, dy)\r\n gradients = rnn_lstm_cell_backward(caches[t], da_prev + da, dc_prev, dy )\r\n da_prev, dc_prev, dx = gradients[\"da_prevt\"], gradients[\"dc_prevt\"], gradients[\"dxt\"]\r\n dWot, dWft, dWut, dWct, dWyt = gradients[\"dWot\"], gradients[\"dWft\"], gradients[\"dWut\"], gradients[\"dWct\"], gradients[\"dWy\"]\r\n dbot, dbft, dbut, dbct, dbyt = gradients[\"dbo\"], gradients[\"dbf\"], gradients[\"dbu\"], gradients[\"dbc\"], gradients[\"dby\"]\r\n dWo += dWot\r\n dWf += dWft\r\n dWu += dWut\r\n dWc += dWct\r\n dWy += dWyt\r\n dbo += dbot\r\n dbf += dbft\r\n dbu += dbut\r\n dbc += dbct\r\n dby += dbyt\r\n\r\n da0 = da_prev\r\n dc0 = dc_prev\r\n gradients = {\"dx\": dx, \"da_prev\": da0, \"dc_prev\": dc0, \"dWo\": dWo, \"dWf\": dWf, \"dWu\": dWu, \"dWc\": dWc,\r\n \"dWy\": dWy, \"dbo\": dbo, \"dbf\": dbf, \"dbu\": dbu, \"dbc\": dbc, \"dby\": dby}\r\n\r\n return gradients\r\n\r\n\r\ndef update_parameters(gradients, parameters, lr = 0.01):\r\n\r\n parameters[\"Wo\"] -= lr * gradients[\"dWo\"]\r\n parameters[\"Wf\"] -= lr * gradients[\"dWf\"]\r\n parameters[\"Wu\"] -= lr * gradients[\"dWu\"]\r\n parameters[\"Wc\"] -= lr * gradients[\"dWc\"]\r\n parameters[\"Wy\"] -= lr * gradients[\"dWy\"]\r\n parameters[\"bo\"] -= lr * gradients[\"dbo\"]\r\n parameters[\"bf\"] -= lr * gradients[\"dbf\"]\r\n parameters[\"bu\"] -= lr * gradients[\"dbu\"]\r\n parameters[\"bc\"] -= lr * gradients[\"dbc\"]\r\n parameters[\"by\"] -= lr * gradients[\"dby\"]\r\n\r\n return parameters\r\n\r\ndef optimize(X, Y, a_prev, parameters, vocab_size):\r\n caches, loss, a = rnn_lstm_full_forward(parameters, a_prev, X, Y, vocab_size)\r\n gradients = rnn_lstm_full_backward(caches, X, Y)\r\n gradients = clip(gradients, 5)\r\n parameters = update_parameters(gradients, parameters)\r\n return loss, parameters, a[len(X) - 1]\r\n\r\ndef print_sample(sample_ix, idx_to_word):\r\n txt = ' '.join(idx_to_word[ix] for ix in sample_ix)\r\n print ('%s' % (txt, ), end='')\r\n\r\ndef model (vocab_size, word_to_idx, idx_to_word, specialSymbol, n_a, lr = 0.01, iteration = 1000, seq_length = 100):\r\n n_x, n_y = vocab_size, vocab_size\r\n parameters = initialize_parameters(n_a, n_x, n_y)\r\n initial_loss = get_initial_loss(vocab_size, seq_length)\r\n\r\n with open(\"shakespeare.txt\") as f:\r\n data = f.readlines()\r\n data = [x.lower().strip() for x in data]\r\n a_prev = np.zeros((n_a, 1))\r\n loss_plot = []\r\n for i in range(iteration):\r\n index = i % len(data)\r\n words = nltk.word_tokenize(data[index])\r\n\r\n X = [None] + [word_to_idx[w] for w in words]\r\n Y = X[1:] + [word_to_idx[\"\\n\"]]\r\n\r\n loss, parameters, a_prev = optimize(X, Y, a_prev, parameters, vocab_size)\r\n if i % 10 == 0:\r\n for a in range(5):\r\n sampled_indices = sample(parameters, word_to_idx)\r\n print_sample(sampled_indices, idx_to_word)\r\n print(\"\\n\")\r\n print (\"Loss value at iteration %d: %f\" % (i, loss))\r\n\r\n loss_plot.append(loss)\r\n\r\n plt.plot(loss_plot)\r\n plt.show()\r\n return parameters\r\n\r\ndef sample(parameters, word_to_idx):\r\n Wo, Wf, Wu, Wc, Wy = parameters[\"Wo\"], parameters[\"Wf\"], parameters[\"Wu\"], parameters[\"Wc\"], parameters[\"Wy\"]\r\n bo, bf, bu, bc, by = parameters[\"bo\"], parameters[\"bf\"], parameters[\"bu\"], parameters[\"bc\"], parameters[\"by\"]\r\n vocab_size = by.shape[0]\r\n n_a = Wo.shape[0]\r\n x = np.zeros((vocab_size, 1))\r\n a_prev = np.zeros((n_a , 1))\r\n c_prev = np.zeros((n_a , 1))\r\n indices = []\r\n idx = -1\r\n counter = 0\r\n newline_character = word_to_idx[\"\\n\"]\r\n\r\n while(idx != newline_character and counter != 10):\r\n cct = np.tanh(np.dot(Wc, np.vstack((a_prev, x))) + bc)\r\n ut = sigmoid(np.dot(Wu, np.vstack((a_prev, x))) + bu)\r\n ft = sigmoid(np.dot(Wf, np.vstack((a_prev, x))) + bf)\r\n ot = sigmoid(np.dot(Wo, np.vstack((a_prev, x))) + bo)\r\n c = np.multiply(ut, cct) + np.multiply(ft, c_prev)\r\n a = np.multiply(ot, np.tanh(c))\r\n y = softmax(np.dot(Wy, a) + by)\r\n idx = np.random.choice(list(range(vocab_size)), p = y[:,0])\r\n indices.append(idx)\r\n counter += 1\r\n a_prev = a\r\n c_prev = c\r\n x = y\r\n\r\n return indices\r\nif __name__ == \"__main__\":\r\n # gradients = model(len(dictionary), word_to_idx, idx_to_word, specialSymbol, len(dictionary) + 100, 0.01, 100, 100)\r\n # with open(\"shakespeare.txt\") as f:\r\n # data = f.readlines()\r\n # data = [x.lower().strip() for x in data]\r\n # words = nltk.word_tokenize(data[11])\r\n # print (dictionary)\r\n\r\n # for word in dictionary:\r\n # if word == \"\":\r\n # continue\r\n # if word[-1] in specialSymbol:\r\n\r\n\r\n # print (dictionary[word_to_idx[word]])\r\n\r\n x = np.arange(10)\r\n with open(\"parameters.csv\", \"w\") as out_file:\r\n for i in range(len(x)):\r\n out_string = \"\"\r\n out_string += str(x[i])\r\n", "sub_path": "code.py", "file_name": "code.py", "file_ext": "py", "file_size_in_byte": 12930, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "nltk.word_tokenize", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 79, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 81, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 83, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 85, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 87, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 275, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 279, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 294, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 294, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 295, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 295, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 304, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 305, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 312, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 312, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 312, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 313, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 313, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 314, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 314, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 315, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 315, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 316, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 317, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 317, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 318, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 319, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 319, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 343, "usage_type": "call"}]} +{"seq_id": "76991933", "text": "from flask import Flask, render_template, request\nimport datetime\n\napp = Flask(__name__)\n\nmessages = []\n\n@app.route('/', methods=['GET'])\ndef home():\n return render_template('index.html', messages=messages)\n\n@app.route('/message', methods=['POST'])\ndef message():\n message = request.form['message']\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n messages.append({'message': message, 'ts': now})\n return 'success'\n\nif __name__ == '__main__':\n app.run(port=3000, debug=True, host='0.0.0.0')\n", "sub_path": "part-2/solution/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 522, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 14, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 15, "usage_type": "attribute"}]} +{"seq_id": "320742076", "text": "from sklearn.tree import DecisionTreeClassifier\nimport pandas as pd\n\ndf=pd.read_csv(\"mitbih_train.csv\")\nveri=df.to_numpy()\nx_train=veri[:,0:187]\ny_train=veri[:,187]\n\ndf_test=pd.read_csv(\"mitbih_test.csv\")\nveri_test=df_test.to_numpy()\nx_test=veri_test[:,0:187]\ny_test=veri_test[:,187]\n\nclf = DecisionTreeClassifier(max_depth=20)\nclf.fit(x_train,y_train)\nscore = clf.score(x_test, y_test)\ny_predict=clf.predict_proba(x_test)", "sub_path": "mitbih_with_DecisionTree.py", "file_name": "mitbih_with_DecisionTree.py", "file_ext": "py", "file_size_in_byte": 422, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pandas.read_csv", "line_number": 4, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 9, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "513053919", "text": "# -*- coding: utf-8 -*-\n\nfrom typing import Callable, Tuple, cast\n\nimport lmfit\nimport numpy as np\nimport scipy.linalg as linalg\n\nfrom . import dv, zero_finding\nfrom .base_functions import _fold_exp, _fold_exp_and_coh\n\nposv = linalg.get_lapack_funcs(\n ('posv'\n ))\n\nLinAlgError = np.linalg.LinAlgError\n\ndef direct_solve(a, b):\n c, x, info = posv(a, b, lower=False, overwrite_a=True, overwrite_b=False)\n return x\n\n\nalpha = 0.001\n\n\ndef solve_mat(A, b_mat, method='ridge'):\n \"\"\"\n Returns the solution for the least squares problem |Ax - b_i|^2.\n \"\"\"\n if method == 'fast':\n #return linalg.solve(A.T.dot(A), A.T.dot(b_mat), sym_pos=True)\n return direct_solve(A.T.dot(A), A.T.dot(b_mat))\n\n elif method == 'ridge':\n\n X = np.dot(A.T, A)\n X.flat[::A.shape[1] + 1] += alpha\n Xy = np.dot(A.T, b_mat)\n #return linalg.solve(X, Xy, sym_pos=True, overwrite_a=True)\n return direct_solve(X, Xy)\n\n elif method == 'qr':\n cq, r = linalg.qr_multiply(A, b_mat)\n return linalg.solve_triangular(r, cq)\n\n elif method == 'cho':\n c, l = linalg.cho_factor(A.T.dot(A))\n return linalg.cho_solve((c, l), A.T.dot(b_mat))\n\n elif method == 'lstsq':\n return np.linalg.lstsq(A, b_mat)[0]\n\n elif method == 'lasso':\n import sklearn.linear_model as lm\n s = lm.Lasso(fit_intercept=False)\n s.alpha = alpha\n s.fit(A, b_mat)\n return s.coef_.T\n\n elif method == 'enet':\n import sklearn.linear_model as lm\n s = lm.ElasticNet(fit_intercept=False, l1_ratio=0.2)\n s.alpha = alpha\n s.fit(A, b_mat)\n return s.coef_.T\n\n else:\n raise ValueError('Unknow lsq method, use ridge, qr, fast or lasso')\n\n\nclass Fitter(object):\n \"\"\" The fit object, takes all the need data and allows to fit it.\n\n There a two different methods to fit the data. The fast one\n assumes, that the data has no dispersion, so the base vectors\n are the same for each channel. It is recommended to first work\n with the fast version. Note that the fast version is able to handle\n dispersion by using linear interpolation to transform the data\n to dispersion free data.\n\n The slower version calculates the base vector for each channel,\n in which the dispersion is integrated.\n\n The slower methods using the prefix full.\n\n Parameters\n ----------\n wl : ndarray(M)\n Array containing the wavelength-coordinates.\n t : ndarray(N)\n Array containing the time-coordinates.\n data : ndarry(N,M)\n The 2d-data to fit.\n model_coh : bool\n If the model contains coherent artifacts at the time zero,\n defaults to False.\n model_disp : int\n Degree of the polynomial which models the dispersion. If 1,\n only a offset is modeled, which is very fast.\n \"\"\"\n\n def __init__(self, tup, model_coh=False, model_disp=1):\n\n wl, t, data = tup\n self.t = t\n self.wl = wl\n self.data = data\n self.verbose = False\n self.model_coh = model_coh\n self.model_disp = model_disp\n self.lsq_method = 'ridge'\n\n self.num_exponentials = -1\n self.weights = None\n\n if model_disp > 1:\n self.org = data[:]\n self.disp_x = (wl - np.min(wl)) / (wl.max() - wl.min())\n self.used_disp = np.zeros(model_disp)\n\n def make_model(self, para):\n \"\"\"\n Calculates the model for given parameters. After calling, the\n DAS is at self.c, the model at self.model.\n\n If the dispersion is\n modeled, it is done via linear interpolation. This way, the base-\n vectors and their decomposition are only calculated once.\n\n Parameters\n ----------\n para : ndarray(N)\n para has the following form:\n [p_0, ..., p_M, w, tau_1, ..., tau_N]\n Where p are the coefficients of the dispersion polynomial,\n w is the width of the system response and tau are the decay\n times. M is equal to self.model_disp.\n\n \"\"\"\n self.last_para = np.asarray(para)\n if self._chk_for_disp_change(para):\n # Only calculate interpolated data if necessary:\n self.tn = np.poly1d(para[:self.model_disp])(self.disp_x)\n tup = dv.tup(self.wl, self.t, self.org)\n self.data = zero_finding.interpol(tup, self.tn)[2]\n self.used_disp[:] = para[:self.model_disp]\n\n self.num_exponentials = self.last_para.size - self.model_disp - 1\n if self.model_disp <= 1:\n self._build_xvec(para)\n self.x_vec = np.nan_to_num(self.x_vec)\n self.c = solve_mat(self.x_vec, self.data, self.lsq_method)\n self.model = np.dot(self.x_vec, self.c)\n self.c = self.c.T\n\n def _chk_for_disp_change(self, para):\n if self.model_disp > 1:\n if np.any(para[:self.model_disp] != self.used_disp):\n return True\n return False\n\n def _build_xvec(self, para):\n \"\"\"\n Build the base (the folded functions) for given parameters.\n \"\"\"\n para = np.array(para)\n if self.verbose:\n print(para)\n\n try:\n idx = (para != self._last)\n except AttributeError:\n #self._l\n idx = [True] * len(para)\n\n if self.model_disp == 1:\n x0, w, taus = para[0], para[1], para[2:]\n tau_idx = idx[2:]\n else:\n x0, w, taus = 0., para[0], para[1:]\n tau_idx = idx[1:]\n\n if any(idx[:2]) or self.model_disp or True:\n if self.model_coh:\n x_vec = np.zeros((self.t.size, self.num_exponentials + 3))\n #print(taus)\n a, b = _fold_exp_and_coh(self.t[:, None], w, x0, taus)\n #print(a.shape, b.shape)\n x_vec[:, -3:] = b[..., 0, :]\n x_vec[:, :-3] = a[..., 0, :]\n\n else:\n x_vec = _fold_exp(self.t[:, None], w, x0, taus).squeeze()\n self.x_vec = np.nan_to_num(x_vec)\n #self.x_vec /= np.max(self.x_vec, 0)\n self._last = para.copy()\n else:\n self.x_vec[:, tau_idx] = _fold_exp(self.t, w, x0, taus[tau_idx]).T\n\n def res(self, para):\n \"\"\"\n Return the residuals for given parameters using the same\n basevector for each channel. See make_model for para format.\n \"\"\"\n self.make_model(para)\n self.residuals = (self.model - self.data)\n if self.weights is not None:\n self.residuals *= self.weights\n return self.residuals.ravel()\n\n def full_res(self, para):\n \"\"\"\n Return the residuals for given parameter modelling each\n channel for it own.\n \"\"\"\n self.make_full_model(para)\n self.residuals = (self.model - self.data)\n if self.weights is not None:\n self.residuals *= self.weights\n return self.residuals.ravel()\n\n def make_full_model(self, para):\n \"\"\"\n Calculates the model for given parameters. After calling, the\n DAS is at self.c, the model at self.model.\n\n Parameters\n ----------\n para : ndarray(N)\n para has the following form:\n [p_0, ..., p_M, w, tau_1, ..., tau_N]\n Where p are the coefficients of the dispersion polynomial,\n w is the width of the system response and tau are the decay\n times. M is equal to self.model_disp.\n\n \"\"\"\n\n para = np.asarray(para)\n self._check_num_expontials(para)\n try:\n m_disp = self.model_disp\n is_disp_changed = (para[:m_disp] != self.last_para[:m_disp]).any()\n except AttributeError:\n is_disp_changed = True\n\n self.last_para = para\n\n if self.model_disp != 0 and is_disp_changed or True:\n self.tn = np.poly1d(para[:self.model_disp])(self.disp_x)\n self.t_mat = self.t[:, None] - self.tn[None, :]\n\n self._build_xmat(para[self.model_disp:], is_disp_changed)\n\n for i in range(self.data.shape[1]):\n A = self.xmat[:, i, :]\n self.c[i, :] = solve_mat(A, self.data[:, i], self.lsq_method)\n\n self.model = np.dot(self.xmat, self.c)\n\n def _build_xmat(self, para, is_disp_changed):\n \"\"\"\n Builds the basevector for every channel. The vectors\n are save self.xmat.\n \"\"\"\n para = np.array(para)\n try:\n idx = (para != self._last)\n except AttributeError:\n idx = [True] * len(para)\n\n w = para[0]\n taus = para[1:]\n x0 = 0.\n\n #Only calculate what is necessary.\n if idx[0] or is_disp_changed or True:\n exps, coh = _fold_exp_and_coh(self.t_mat, w, x0, taus)\n if self.model_coh:\n #print('test')\n self.xmat[:, :, -3:] = coh\n num_exp = self.num_exponentials\n self.xmat[:, :, :num_exp] = exps\n elif any(idx):\n self.xmat[:, :, idx[1:]] = _fold_exp(self.t_mat, w, x0, taus[idx[1:]])\n #self.xmat = np.nan_to_num(self.xmat)\n self._last = para\n\n def _check_num_expontials(self, para):\n \"\"\"\n Check if num_exp changed and allocate space as necessary.\n \"\"\"\n new_num_exp = para.size - self.model_disp - 1\n if new_num_exp != self.num_exponentials:\n self.num_exponentials = new_num_exp\n if self.model_disp:\n new_num_exp += 3\n n, m = self.data.shape\n self.xmat = np.empty((n, m, new_num_exp))\n self.c = np.zeros((self.data.shape[1], self.xmat.shape[-1]))\n self.model = np.empty_like(self.data)\n\n def res_sum(self, para):\n \"\"\"Returns the squared sum of the residuals for given parameters\"\"\"\n return np.sum(self.res(para)**2)\n\n def start_lmfit(self,\n x0,\n fixed_names=[],\n lower_bound=0.3,\n fix_long=True,\n fix_disp=False,\n full_model=1):\n p = lmfit.Parameters()\n for i in range(self.model_disp):\n p.add('p' + str(i), x0[i])\n if fix_disp:\n p['p' + str(i)].vary = False\n x0 = x0[self.model_disp:]\n\n p.add('w', x0[0], min=0)\n num_exp = len(x0) - 1\n for i, tau in enumerate(x0[1:]):\n name = 't' + str(i) #\n #\n p.add(name, tau, vary=True)\n if name not in fixed_names:\n p[name].min = lower_bound\n else:\n p[name].vary = False\n\n for i in fixed_names:\n p[i].vary = False\n if fix_long:\n p['t' + str(num_exp - 1)].vary = False\n\n def res(p):\n x = [k.value for k in p.values()]\n return self.res(x)\n\n def full_res(p):\n x = [k.value for k in p.values()]\n return self.full_res(x)\n\n fun = full_res if full_model else res\n return lmfit.Minimizer(fun, p)\n", "sub_path": "skultrafast/fitter.py", "file_name": "fitter.py", "file_ext": "py", "file_size_in_byte": 11102, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "scipy.linalg.get_lapack_funcs", "line_number": 12, "usage_type": "call"}, {"api_name": "scipy.linalg", "line_number": 12, "usage_type": "name"}, {"api_name": "numpy.linalg", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 38, "usage_type": "call"}, {"api_name": "scipy.linalg.qr_multiply", "line_number": 43, "usage_type": "call"}, {"api_name": "scipy.linalg", "line_number": 43, "usage_type": "name"}, {"api_name": "scipy.linalg.solve_triangular", "line_number": 44, "usage_type": "call"}, {"api_name": "scipy.linalg", "line_number": 44, "usage_type": "name"}, {"api_name": "scipy.linalg.cho_factor", "line_number": 47, "usage_type": "call"}, {"api_name": "scipy.linalg", "line_number": 47, "usage_type": "name"}, {"api_name": "scipy.linalg.cho_solve", "line_number": 48, "usage_type": "call"}, {"api_name": "scipy.linalg", "line_number": 48, "usage_type": "name"}, {"api_name": "numpy.linalg.lstsq", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 51, "usage_type": "attribute"}, {"api_name": "sklearn.linear_model.Lasso", "line_number": 55, "usage_type": "call"}, {"api_name": "sklearn.linear_model", "line_number": 55, "usage_type": "name"}, {"api_name": "sklearn.linear_model.ElasticNet", "line_number": 62, "usage_type": "call"}, {"api_name": "sklearn.linear_model", "line_number": 62, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.poly1d", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.nan_to_num", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 185, "usage_type": "call"}, {"api_name": "base_functions._fold_exp_and_coh", "line_number": 187, "usage_type": "call"}, {"api_name": "base_functions._fold_exp", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.nan_to_num", "line_number": 194, "usage_type": "call"}, {"api_name": "base_functions._fold_exp", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.poly1d", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 258, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 265, "usage_type": "call"}, {"api_name": "base_functions._fold_exp_and_coh", "line_number": 277, "usage_type": "call"}, {"api_name": "base_functions._fold_exp", "line_number": 284, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 298, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 299, "usage_type": "call"}, {"api_name": "numpy.empty_like", "line_number": 300, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 304, "usage_type": "call"}, {"api_name": "lmfit.Parameters", "line_number": 313, "usage_type": "call"}, {"api_name": "lmfit.Minimizer", "line_number": 345, "usage_type": "call"}]} +{"seq_id": "436071912", "text": "from sklearn.preprocessing import LabelEncoder, StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.utils import np_utils\nfrom keras.utils import plot_model\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n\n# import data\ndataset = pd.read_csv(\"dataset/iris/iris.csv\")\n\nx = dataset.iloc[:, 1:5].values\ny = dataset.iloc[:, 5].values\n\n# encode labels\n\nle = LabelEncoder()\ntransformed_y = le.fit_transform(y)\n\nencoded_y = np_utils.to_categorical(transformed_y, num_classes=3)\n\nprint(encoded_y)\n\n# feature scaling\nstdSclr = StandardScaler()\n\nx = stdSclr.fit_transform(x)\n\n\n# split the data in to test and train\n\nx_train, x_test, y_train, y_test = train_test_split(x, encoded_y, test_size=0.2,\n random_state=0)\n\n\nprint(x_train)\n\n# create type of neural model\n\nmodel = Sequential()\n\n# input & first hidden layer\nmodel.add(Dense(80, activation='relu', input_dim=4))\nmodel.add(Dropout(0.5))\n\n# second hidden layers\n\nmodel.add(Dense(90, activation='relu'))\nmodel.add(Dropout(0.5))\n\n# third hidden layers\n\nmodel.add(Dense(80, activation='tanh'))\nmodel.add(Dropout(0.5))\n\n\n# final layers\n\nmodel.add(Dense(3, activation='softmax'))\n\n# compile the models - binary classification problem\nmodel.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n# Train the model\ntraining = model.fit(x_train, y_train, batch_size=5, epochs=20, verbose=0)\n\nloss, accuracy = model.evaluate(x_test, y_test, batch_size=2)\n\nprint(\"\\n%s: %.2f%%\" % (model.metrics_names[1], accuracy * 100))\n\nmodel.save('kerasmodel.h5')\n\n# plot_model(model, to_file='model.png')\n\n# summarize history for accuracy\nplt.subplots_adjust(hspace=0.5, wspace=0.7)\nplt.subplot(211)\nplt.plot(training.history['acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.grid(True)\n\n# # summarize history for loss\nplt.subplot(212)\nplt.plot(training.history['loss'], color='r')\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.grid(True)\n\nplt.show()\n", "sub_path": "keras_iris.py", "file_name": "keras_iris.py", "file_ext": "py", "file_size_in_byte": 2258, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pandas.read_csv", "line_number": 13, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 20, "usage_type": "call"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 23, "usage_type": "call"}, {"api_name": "keras.utils.np_utils", "line_number": 23, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 28, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 35, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 43, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 46, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 47, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 51, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 52, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 56, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 57, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}]} +{"seq_id": "218186133", "text": "from django.shortcuts import render, HttpResponseRedirect\nfrom django.views.decorators.http import require_GET\nfrom django.shortcuts import get_object_or_404\nfrom django.views.decorators.csrf import csrf_protect\n\nfrom .forms import CreateAuto, CreateManufacturer\nfrom .models import Manufacturer, Auto\n\n\ndef index(request):\n return render(request, 'models.html')\n\n\n@require_GET\ndef auto(request):\n all_auto = Auto.objects.all()\n if 'searchby' in request.GET:\n searchby = request.GET.get('searchby', '')\n text = request.GET.get('text', '')\n if searchby in ['name', 'manufacturer', 'body', 'year']:\n if searchby == 'manufacturer':\n query = {\"manufacturer__brand__icontains\": text}\n else:\n query = {(\"%s__icontains\" % searchby): text}\n all_auto = Auto.objects.filter(**query)\n if 'sortby' in request.GET:\n sortby = request.GET.get('sortby', '')\n text = request.GET.get('text', '')\n if sortby in ['name', '-name', 'manufacturer', '-manufacturer', 'body', '-body', 'year', '-year']:\n all_auto = Auto.objects.order_by(sortby)\n if 'id' in request.GET:\n id = request.GET.get('id', 0)\n auto = get_object_or_404(Auto, pk=id)\n return render(request, 'object_page_auto.html', {'auto': auto})\n if 'delete' in request.GET:\n delete = request.GET.get('delete', 0)\n result = Auto.objects.filter(pk=delete).delete()\n return render(request, 'objects_auto.html', {'all_auto': all_auto, 'result': result })\n\n return render(request, 'objects_auto.html', {'all_auto': all_auto})\n\n\n@csrf_protect\ndef auto_create(request):\n if request.method == 'POST':\n form = CreateAuto(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect('./')\n else:\n return render(request, 'create_object.html', {'form': form})\n return render(request, 'create_object.html', {'form': CreateAuto()})\n\n\n@csrf_protect\ndef auto_edit(request):\n if 'id' in request.GET:\n id = request.GET.get('id', 0)\n auto = get_object_or_404(Auto, pk=id)\n init_dict = auto.__dict__\n m = init_dict.get('manufacturer_id')\n init_dict.update({'manufacturer': m})\n\n else:\n return HttpResponseRedirect('.')\n if request.method == 'POST':\n form = CreateAuto(request.POST, instance=auto)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect('./')\n else:\n return render(request, 'create_object.html', {'form': form})\n return render(request, 'create_object.html', {'form': CreateAuto(initial=init_dict)})\n\n\n@require_GET\ndef manufacturer(request):\n all_manufacturers = Manufacturer.objects.all()\n if 'searchby' in request.GET:\n searchby = request.GET.get('searchby', '')\n text = request.GET.get('text', '')\n if searchby == 'brand':\n all_manufacturers = Manufacturer.objects.filter(brand__icontains=text)\n if searchby == 'country':\n all_manufacturers = Manufacturer.objects.filter(country__icontains=text)\n if 'sortby' in request.GET:\n sortby = request.GET.get('sortby', '')\n text = request.GET.get('text', '')\n if sortby in ['brand', '-brand', 'country', '-country']:\n all_manufacturers = Manufacturer.objects.order_by(sortby)\n if 'id' in request.GET:\n id = request.GET.get('id', 0)\n manufacturer = get_object_or_404(Manufacturer, pk=id)\n return render(request, 'object_page_man.html', {'manufacturer': manufacturer})\n if 'delete' in request.GET:\n delete = request.GET.get('delete', 0)\n result = Manufacturer.objects.filter(pk=delete).delete()\n return render(request, 'objects_man.html', {'all_manufacturers': all_manufacturers, 'result': result })\n\n return render(request, 'objects_man.html', {'all_manufacturers': all_manufacturers})\n\n\n@csrf_protect\ndef manufacturer_create(request):\n if request.method == 'POST':\n form = CreateManufacturer(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect('./')\n else:\n return render(request, 'create_object.html', {'form': form})\n return render(request, 'create_object.html', {'form': CreateManufacturer()})\n\n\n@csrf_protect\ndef manufacturer_edit(request):\n if 'id' in request.GET:\n id = request.GET.get('id', 0)\n manufacturer = get_object_or_404(Manufacturer, pk=id)\n else:\n return HttpResponseRedirect('.')\n if request.method == 'POST':\n form = CreateManufacturer(request.POST, instance=manufacturer)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect('./')\n else:\n return render(request, 'create_object.html', {'form': form})\n return render(request, 'create_object.html', {'form': CreateManufacturer(initial=manufacturer.__dict__)})\n", "sub_path": "16/autobase_homework/autobase/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4995, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "django.shortcuts.render", "line_number": 11, "usage_type": "call"}, {"api_name": "models.Auto.objects.all", "line_number": 16, "usage_type": "call"}, {"api_name": "models.Auto.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "models.Auto", "line_number": 16, "usage_type": "name"}, {"api_name": "models.Auto.objects.filter", "line_number": 25, "usage_type": "call"}, {"api_name": "models.Auto.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "models.Auto", "line_number": 25, "usage_type": "name"}, {"api_name": "models.Auto.objects.order_by", "line_number": 30, "usage_type": "call"}, {"api_name": "models.Auto.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "models.Auto", "line_number": 30, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 33, "usage_type": "call"}, {"api_name": "models.Auto", "line_number": 33, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 34, "usage_type": "call"}, {"api_name": "models.Auto.objects.filter", "line_number": 37, "usage_type": "call"}, {"api_name": "models.Auto.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "models.Auto", "line_number": 37, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 38, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 40, "usage_type": "call"}, {"api_name": "django.views.decorators.http.require_GET", "line_number": 14, "usage_type": "name"}, {"api_name": "forms.CreateAuto", "line_number": 46, "usage_type": "call"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 49, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 51, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 52, "usage_type": "call"}, {"api_name": "forms.CreateAuto", "line_number": 52, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_protect", "line_number": 43, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 59, "usage_type": "call"}, {"api_name": "models.Auto", "line_number": 59, "usage_type": "argument"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 65, "usage_type": "call"}, {"api_name": "forms.CreateAuto", "line_number": 67, "usage_type": "call"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 70, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 72, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 73, "usage_type": "call"}, {"api_name": "forms.CreateAuto", "line_number": 73, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_protect", "line_number": 55, "usage_type": "name"}, {"api_name": "models.Manufacturer.objects.all", "line_number": 78, "usage_type": "call"}, {"api_name": "models.Manufacturer.objects", "line_number": 78, "usage_type": "attribute"}, {"api_name": "models.Manufacturer", "line_number": 78, "usage_type": "name"}, {"api_name": "models.Manufacturer.objects.filter", "line_number": 83, "usage_type": "call"}, {"api_name": "models.Manufacturer.objects", "line_number": 83, "usage_type": "attribute"}, {"api_name": "models.Manufacturer", "line_number": 83, "usage_type": "name"}, {"api_name": "models.Manufacturer.objects.filter", "line_number": 85, "usage_type": "call"}, {"api_name": "models.Manufacturer.objects", "line_number": 85, "usage_type": "attribute"}, {"api_name": "models.Manufacturer", "line_number": 85, "usage_type": "name"}, {"api_name": "models.Manufacturer.objects.order_by", "line_number": 90, "usage_type": "call"}, {"api_name": "models.Manufacturer.objects", "line_number": 90, "usage_type": "attribute"}, {"api_name": "models.Manufacturer", "line_number": 90, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 93, "usage_type": "call"}, {"api_name": "models.Manufacturer", "line_number": 93, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 94, "usage_type": "call"}, {"api_name": "models.Manufacturer.objects.filter", "line_number": 97, "usage_type": "call"}, {"api_name": "models.Manufacturer.objects", "line_number": 97, "usage_type": "attribute"}, {"api_name": "models.Manufacturer", "line_number": 97, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 98, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 100, "usage_type": "call"}, {"api_name": "django.views.decorators.http.require_GET", "line_number": 76, "usage_type": "name"}, {"api_name": "forms.CreateManufacturer", "line_number": 106, "usage_type": "call"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 109, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 111, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 112, "usage_type": "call"}, {"api_name": "forms.CreateManufacturer", "line_number": 112, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_protect", "line_number": 103, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 119, "usage_type": "call"}, {"api_name": "models.Manufacturer", "line_number": 119, "usage_type": "argument"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 121, "usage_type": "call"}, {"api_name": "forms.CreateManufacturer", "line_number": 123, "usage_type": "call"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 126, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 128, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 129, "usage_type": "call"}, {"api_name": "forms.CreateManufacturer", "line_number": 129, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_protect", "line_number": 115, "usage_type": "name"}]} +{"seq_id": "378010036", "text": "import gender_guesser.detector as gender\nfrom bs4 import BeautifulSoup as Soup\nimport requests\nimport re\n\nPYCON_HTML = 'https://bites-data.s3.us-east-2.amazonaws.com/pycon2019.html'\n\ndef _get_soup(html=PYCON_HTML):\n response = requests.get(PYCON_HTML)\n return Soup(response.content, \"html.parser\")\n\n\ndef get_pycon_speaker_first_names(soup=None):\n \"\"\"Parse the PYCON_HTML using BeautifulSoup, extracting all\n speakers (class \"speaker\"). Note that some items contain\n multiple speakers so you need to extract them.\n Return a list of first names\n \"\"\"\n soup = _get_soup()\n\n speakers = []\n for speaker in soup.find_all('span', 'speaker'):\n speaker = speaker.text.strip()\n\n # Multiple speakers separated by comma\n if re.match(r'.*,.*', speaker):\n multiple_speakers = speaker.split(', ')\n for s in multiple_speakers:\n speakers.append(s.split()[0])\n\n # Multiple speakers seperated by slash\n elif re.match(r'.*/.*', speaker):\n multiple_speakers = speaker.split(' / ')\n for s in multiple_speakers:\n speakers.append(s.split()[0])\n\n # Speaker name begins with acronym\n elif re.match(r'[A-Z]\\.', speaker):\n speakers.append(speaker.split()[1])\n\n else:\n speakers.append(speaker.split()[0])\n\n return speakers\n\n\ndef get_percentage_of_female_speakers(first_names):\n \"\"\"Run gender_guesser on the names returning a percentage\n of female speakers (female and mostly_female),\n rounded to 2 decimal places.\"\"\"\n d = gender.Detector(case_sensitive=False)\n genders = [d.get_gender(speaker) for speaker in first_names]\n total_speakers = len(genders)\n female_speakers = len([g for g in genders if g is 'female' or g is 'mostly_female'])\n return round(female_speakers / total_speakers * 100, 2)\n\n\nif __name__ == '__main__':\n names = get_pycon_speaker_first_names()\n perc = get_percentage_of_female_speakers(names)\n print(perc)", "sub_path": "125_algorithms/_examples/_algorithms_challenges/pybites/intermediate/205_female_speakers_at_pycon_us/save2_passed.py", "file_name": "save2_passed.py", "file_ext": "py", "file_size_in_byte": 2030, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "requests.get", "line_number": 9, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 10, "usage_type": "call"}, {"api_name": "re.match", "line_number": 26, "usage_type": "call"}, {"api_name": "re.match", "line_number": 32, "usage_type": "call"}, {"api_name": "re.match", "line_number": 38, "usage_type": "call"}, {"api_name": "gender_guesser.detector.Detector", "line_number": 51, "usage_type": "call"}, {"api_name": "gender_guesser.detector", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "22737775", "text": "# Standard imports\n\nimport torch\nimport numpy as np\nimport os\n\n# device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n'''Tensors are a data structure similar to arrays/matrices. \nThey can be created directly from data in nested lists, np arrays, other tensors, or random/constant values'''\n\ndata = [[1,2],[3,4]]\nx_data = torch.tensor(data)\n# print(x_data)\n\nx_np_array = np.array(data)\nx_np = torch.from_numpy(x_np_array)\n# print(x_np)\n\nx_ones = torch.ones_like(x_data)\n# print(f'Ones Tensor: \\n {x_ones} \\n')\n\nx_rand = torch.rand_like(x_data, dtype=torch.float)\n# print(f'Random Tensor: \\n {x_rand} \\n')\n\ntensor = torch.rand(3,4)\n\n'''Tensors are normally created on the CPU, and need to explictly be moved to the GPU using the `.to` method'''\nif torch.cuda.is_available():\n print('switching to cuda:')\n tensor = tensor.to('cuda')\nprint(tensor)\n", "sub_path": "Learn_PyTorch/tensors.py", "file_name": "tensors.py", "file_ext": "py", "file_size_in_byte": 870, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "torch.tensor", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.ones_like", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.rand_like", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 23, "usage_type": "attribute"}, {"api_name": "torch.rand", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 29, "usage_type": "attribute"}]} +{"seq_id": "272131770", "text": "import os\n\nimport django\n\nos.environ['DJANGO_SETTINGS_MODULE'] = 'BepMarketplace.settings_development'\ndjango.setup()\n\nimport argparse\nfrom timeline.models import TimePhase\nfrom datetime import datetime, timedelta\nfrom timeline.utils import get_timeslot\nfrom django.core.cache import cache\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Nr: \\\n 1:Generating project proposals\\\n 2:Projects quality check; \\\n 3:Students choosing projects; \\\n 4:Distribution of projects; \\\n 5:Gather and process objections; \\\n 6:Execution of the projects; \\\n 7:Presentation of results\")\n parser.add_argument('p', nargs='?', const=1, type=str, default='0', help='timephase number')\n n = int(parser.parse_args().p)\n assert n >= 0, 'too low number'\n assert n <= 7, 'too high number'\n TimePhase.objects.all().delete()\n if n>0:\n t = TimePhase(Begin=datetime.now() - timedelta(days=2),\n End=datetime.now() + timedelta(days=40),\n Description=n,\n TimeSlot=get_timeslot())\n t.save()\n cache.clear()\n", "sub_path": "dev_set_timephase.py", "file_name": "dev_set_timephase.py", "file_ext": "py", "file_size_in_byte": 1414, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "os.environ", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.setup", "line_number": 6, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 15, "usage_type": "call"}, {"api_name": "timeline.models.TimePhase.objects.all", "line_number": 27, "usage_type": "call"}, {"api_name": "timeline.models.TimePhase.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "timeline.models.TimePhase", "line_number": 27, "usage_type": "name"}, {"api_name": "timeline.models.TimePhase", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 30, "usage_type": "call"}, {"api_name": "timeline.utils.get_timeslot", "line_number": 32, "usage_type": "call"}, {"api_name": "django.core.cache.cache.clear", "line_number": 34, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "196331654", "text": "from django.db import models\nfrom django.contrib.auth.models import User\n\n\nclass Category(models.Model):\n name = models.CharField(max_length=200)\n parent = models.ForeignKey('self', on_delete=models.CASCADE, related_name='children', blank=True, null=True)\n\n @classmethod\n def get_json_repr(cls):\n def dfs_rec(s, context, visited):\n visited[s] = True\n context[s.name] = {}\n for u in s.children.all():\n if visited[u] is False:\n context[s.name] = dfs_rec(u, context[s.name], visited)\n return context\n\n root = Category.objects.get(parent=None)\n visited = {}\n for cat in Category.objects.all():\n visited[cat] = False\n\n context = {}\n return dfs_rec(root, context, visited)\n\n def get_descendants(self):\n result = []\n visited = {}\n for cat in Category.objects.all():\n visited[cat] = False\n return self.get_descendants_util(self, result, visited)\n\n def get_descendants_util(self, s, q, visited):\n visited[s] = True\n q.append(s)\n for child in s.children.all():\n if visited[child] is False:\n q = self.get_descendants_util(child, q, visited)\n return q\n\n def get_ancestor_names_path(self):\n cur_node = self\n ancestors = []\n while cur_node:\n ancestors.append(cur_node.name)\n cur_node = cur_node.parent\n return list(reversed(ancestors))\n\n def __str__(self):\n return ' -> '.join(self.get_ancestor_names_path())\n\n\nclass Post(models.Model):\n title = models.CharField(max_length=200)\n body = models.TextField()\n category = models.ForeignKey(Category, on_delete=models.SET_NULL, null=True, related_name='related_posts')\n STATUS_CHOICES = (\n ('draft', 'Draft'),\n ('publish', 'Publish')\n )\n status = models.CharField(max_length=200, choices=STATUS_CHOICES)\n author = models.ForeignKey(User, on_delete=models.CASCADE)\n time_created = models.DateTimeField(auto_now_add=True)\n time_published = models.DateTimeField(blank=True, null=True)\n\n def get_post_info(self):\n comments = []\n for comment in self.comments.filter(is_approved=True):\n comments.append({\n 'body': comment.body,\n 'author': comment.author.username,\n 'time_published': comment.time_published,\n })\n post_context = {\n 'id': self.id,\n 'title': self.title,\n 'body': self.body,\n 'category': self.category.name,\n 'author': self.author.username,\n 'time_published': self.time_published,\n 'comments': comments\n }\n return post_context\n\n @classmethod\n def search_in_body(cls, to_be_searched):\n def string_found(string1, string2):\n import re\n if re.search(r\"\\b\" + re.escape(string1) + r\"\\b\", string2):\n return True\n return False\n\n words = to_be_searched.split()\n result = []\n\n for post in cls.objects.all():\n seen_words = []\n unseen_words = []\n for word in words:\n if string_found(word, post.body):\n seen_words.append(word)\n else:\n unseen_words.append(word)\n if len(seen_words) != 0:\n post_info = post.get_post_info()\n post_info[\"seen_words\"] = seen_words\n post_info[\"unseen_words\"] = unseen_words\n result.append(post_info)\n\n sorted_result = sorted(result, key=lambda k: len(k[\"seen_words\"]), reverse=True)\n return sorted_result\n\n def __str__(self):\n return '{} by {}'.format(self.title, self.author)\n\n\nclass Comment(models.Model):\n post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name='comments')\n body = models.TextField()\n author = models.ForeignKey(User, on_delete=models.CASCADE)\n is_approved = models.BooleanField(default=False)\n time_created = models.DateTimeField(auto_now_add=True)\n time_published = models.DateTimeField(blank=True, null=True)\n\n def __str__(self):\n return '{} by {}'.format(self.body, self.author)", "sub_path": "blog_app/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 4301, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "django.db.models.Model", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 5, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 6, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 6, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 7, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.models.Model", "line_number": 54, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 54, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 55, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 55, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 56, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 56, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 57, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 57, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 57, "usage_type": "attribute"}, {"api_name": "django.db.models.CharField", "line_number": 62, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 62, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 63, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 63, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 63, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 63, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 64, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 64, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 65, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 65, "usage_type": "name"}, {"api_name": "re.search", "line_number": 90, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 90, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 118, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 118, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 119, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 119, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 119, "usage_type": "attribute"}, {"api_name": "django.db.models.TextField", "line_number": 120, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 120, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 121, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 121, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 121, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 121, "usage_type": "attribute"}, {"api_name": "django.db.models.BooleanField", "line_number": 122, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 122, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 123, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 123, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 124, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 124, "usage_type": "name"}]} +{"seq_id": "611049388", "text": "import cv2\r\nimport numpy as np \r\nfrom PIL import Image\r\n\r\nKEYESCAPE=27\r\nKEYENTER=10 ## Different from Windows\r\nKEYLEFT=81\r\nKEYRIGHT=83\r\nKEYUP=82\r\nKEYDOWN=84\r\nWAITMSTOREFRESH=15\r\n\r\n\r\nCMD_STRLEN_DEF=50 \t\t\t###< Default length for commands\r\nBUFFER_FRAME_DEF=10000000 ###< 10Mb fixed-size buffer for TX-RX\r\n\r\nSTRLEN_SHORT_DEF=50\r\nSTRLEN_LONG_DEF=255\r\n\r\nCAM_FPS_DEF=10\t\t###< Default frames per second (FPS) \r\nCAM_POSX_DEF=10.0\t###< Default X-position (left-right)\r\nCAM_POSY_DEF=-4.0\t###< Default Y - position(Height)\r\nCAM_POSZ_DEF=3.0\t###< Default Z-position (Forward-backward)\r\nCAM_ROTX_DEF=20.0\t###< Default X-axis rotation\r\nCAM_ROTY_DEF=10.0\t###< Default Y-axis rotation\r\nCAM_ROTZ_DEF=0.0\t###< Default Z-axis rotation\r\nCAM_FOV_DEF=1.0\t\t###< Default FOV\r\nCAM_JPGQ_DEF=80\t\t###< Default JPEG quality\r\n\r\n\r\nCAM_MOVE_LEFT = 0\t###< ID to move camera left\r\nCAM_MOVE_RIGHT=1\t###< ID to move camera right\r\nCAM_MOVE_UP=2\t\t###< ID to move camera up\r\nCAM_MOVE_DOWN=3\t\t###< ID to move camera down\r\nCAM_MOVE_FORWARD=4\t###< ID to move camera forward\r\nCAM_MOVE_BACKWARD=5\t###< ID to move camera backward\r\nCAM_ROTATE_LEFT=6\t###< ID to rotate camera left\r\nCAM_ROTATE_RIGHT=7\t###< ID to rotate camera right\r\nCAM_ROTATE_UP=8\t\t###< ID to rotate camera up\r\nCAM_ROTATE_DOWN=9\t###< ID to rotate camera down\r\n\r\n\r\n### Possible compression schemes to TX/RX images between MSS server & client\r\nCAM_JPEG=0\t\t###< JPEG codec (lossy image compression)\r\nCAM_PNG=1\t\t###< PNG codec (lossless image compression)\r\n\r\n## Minimum pixel height-coordinates (in the displayed image for the \"showCameraScheme\")\r\nSECOND_IMG_OFFSET=95.0\r\n\r\n## Distance between min & max user inputs for the 2nd screen (\"showCameraScheme\" --> cameraHeight-GUI)\r\nSECOND_IMG_WIDTH=550.0\r\n\r\ndef overlayImage(bg,icon,pos):\r\n w_bg=bg.width\r\n h_bg=bg.height\r\n w_icon=icon.width\r\n h_icon=icon.height\r\n full_icon=Image.new('RGBA',(w_bg,h_bg),(0,0,0,0))\r\n icon=np.array(icon)\r\n full_icon=np.array(full_icon)\r\n\t# position of start of camera icon given by pos, \r\n\t# or 0 if the pos given is negative\r\n start_full_w=max(pos[0],0)\r\n start_full_h=max(pos[1],0)\r\n\t# position of end of camera icon given by pos, \r\n\t# or size of background if the pos given is out of bounds\r\n end_full_w=min(pos[0]+w_icon,w_bg)\r\n end_full_h=min(pos[1]+h_icon,h_bg)\r\n\t# Coordinates to crop the icon if we go out of bounds with pos\r\n start_icon_w=start_full_w-pos[0]\r\n start_icon_h=start_full_h-pos[1]\r\n end_icon_w=end_full_w-pos[0]\r\n end_icon_h=end_full_h-pos[1]\r\n full_icon[start_full_h:end_full_h,start_full_w:end_full_w,:]=icon[start_icon_h:end_icon_h,start_icon_w:end_icon_w,:]\r\n out_icon=Image.fromarray(np.uint8(full_icon))\r\n imout=Image.alpha_composite(bg,out_icon)\r\n return imout\r\n\r\n### Method create strings to display messages in the command window\r\n\"\"\"static std::map create_codec_str() {\r\n\r\n\tstd::map m;\r\n\tm.insert(std::make_pair(CAM_JPEG, \"CAM_JPEG\"));\r\n\tm.insert(std::make_pair(CAM_PNG, \"CAM_PNG\"));\r\n\tm.insert(std::make_pair(CAM_RAW, \"CAM_RAW\"));\r\n\treturn m;\r\n}\"\"\"\r\n", "sub_path": "utils/MSSutils.py", "file_name": "MSSutils.py", "file_ext": "py", "file_size_in_byte": 3072, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "PIL.Image.new", "line_number": 58, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 58, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 60, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 75, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 75, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 75, "usage_type": "call"}, {"api_name": "PIL.Image.alpha_composite", "line_number": 76, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 76, "usage_type": "name"}]} +{"seq_id": "588992260", "text": "import random\n\nfrom thrift.transport import TSocket\nfrom thrift.transport import TTransport\nfrom thrift.protocol import TBinaryProtocol\n\nfrom rpc import RpcService\n\nimport conf\n\nclass Singleton(type):\n def __init__(cls, name, bases, dict):\n super(Singleton, cls).__init__(name, bases, dict)\n cls.instance = None \n\n def __call__(cls, *args, **kw):\n if cls.instance is None:\n cls.instance = super(Singleton, cls).__call__(*args, **kw)\n return cls.instance\n\nclass PlowConnection(object):\n __metaclass__ = Singleton\n\n def __init__(self, hosts=None):\n self.__hosts = hosts\n if not self.__hosts:\n self.__hosts = conf.get(\"plow\", \"hosts\").split(\",\")\n if not self.__hosts:\n self.__hosts = [\"localhost:11336\"]\n\n self.__socket = None\n self.__transport = None\n self.__protocol = None\n self.__service = None\n\n self.setup()\n\n def setup(self):\n host, port = self.__randomServer()\n self.__socket = TSocket.TSocket(host, port)\n self.__transport = TTransport.TFramedTransport(self.__socket)\n self.__protocol = TBinaryProtocol.TBinaryProtocol(self.__transport)\n\n def disconnect(self):\n self.__transport.close()\n\n @property\n def service(self):\n if not self.__service:\n self.__transport.open()\n self.__service = RpcService.Client(self.__protocol)\n return self.__service\n\n def __randomServer(self):\n rand = random.randint(0, len(self.__hosts)-1)\n host, port = self.__hosts[rand].split(\":\")\n return (host, int(port))\n\n\nConn = PlowConnection()\n\n\n\n\n\n\n", "sub_path": "lib/python/plow/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 1663, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "conf.get", "line_number": 27, "usage_type": "call"}, {"api_name": "thrift.transport.TSocket.TSocket", "line_number": 40, "usage_type": "call"}, {"api_name": "thrift.transport.TSocket", "line_number": 40, "usage_type": "name"}, {"api_name": "thrift.transport.TTransport.TFramedTransport", "line_number": 41, "usage_type": "call"}, {"api_name": "thrift.transport.TTransport", "line_number": 41, "usage_type": "name"}, {"api_name": "thrift.protocol.TBinaryProtocol.TBinaryProtocol", "line_number": 42, "usage_type": "call"}, {"api_name": "thrift.protocol.TBinaryProtocol", "line_number": 42, "usage_type": "name"}, {"api_name": "rpc.RpcService.Client", "line_number": 51, "usage_type": "call"}, {"api_name": "rpc.RpcService", "line_number": 51, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "143524405", "text": "import pygame, sys\nfrom pygame.locals import *\n\nDISPLAYSURF = pygame.display.set_mode((800,480)) # numbers in pixels\n## Explore fullscreen options\n\n# Colors\n## Deside about transparancy\nGRAY = (197, 197, 197)\nBLACK = (0, 0, 0)\nBLUE = (27, 15, 193)\nWHITE = (255, 255, 255)\nMAIZE = (244, 238, 66)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\n\n\n# Images\nstartLogo = pygame.image.load(\"images/startLogo.jpg\")\nstartButton = pygame.image.load(\"images/startButton.jpg\")\nstart_logo = pygame.transform.scale(startLogo, (400,400))\nstart_button = pygame.transform.scale(startButton, (200, 100))\n\npygame.init()\n\nnumPlayers = 1\n\n# Font\nlogoFont = pygame.font.Font(\"wheel-of-fortune/SF Fortune Wheel Bold Italic.ttf\", 28)\nmainFont = pygame.font.Font(\"freesansbold.ttf\", 17)\nbuttonFont = pygame.font.Font(\"freesansbold.ttf\", 28)\nheaderFont = pygame.font.Font(\"freesansbold.ttf\", 40)\nloginText = headerFont.render(\"Login\", True, BLACK)\nlogoText = logoFont.render(\"Guess That Phrase\", True, MAIZE)\nstartText = logoFont.render(\"Start\", True, BLUE)\nenterNameText = mainFont.render(\"Write your Name: \", True, BLACK)\nclearText = buttonFont.render(\"Clear\", True, BLACK)\ndoneText = buttonFont.render(\"Done\", True, BLACK)\nmonitorText = headerFont.render(\"Look up and smile!\", True, BLACK)\n\n# Font Sizes\nloginTextW, loginTextH = headerFont.size(\"Login\")\n## Change all text so its in relation to other things on the screen\n\n\nmenu_rect = pygame.Rect(0, 0, 800, 480) ## possibly faster to use surfaces instead\nstart_button_rect = pygame.Rect(menu_rect.centerx - 95, menu_rect.centery+85, 190, 90)\nenter_name_rect = pygame.Rect(100, 80, 600, 200) ## make vairables for cooridinates\nclear_name_rect = pygame.Rect(103, 83, 594, 194)\nclear_button_rect = pygame.Rect(450, 330, 200, 100)\ndone_button_rect = pygame.Rect(150, 330, 200, 100)\n\nclick = False\npenSize = 5\npenColor = BLACK\nbegin = False\nnumPlayers = 0\n\nwhile True:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n pygame.quit()\n sys.exit()\n if event.type == MOUSEBUTTONDOWN:\n click = True\n if event.type == MOUSEBUTTONUP:\n click = False\n\n # Start menu\n if begin == False:\n pygame.draw.rect(DISPLAYSURF, BLACK, menu_rect)\n pygame.draw.rect(DISPLAYSURF, BLACK, start_button_rect)\n # DISPLAYSURF.blit(start_logo, (menu_rect.centerx - 200, menu_rect.centery - 100))\n DISPLAYSURF.blit(start_button, (menu_rect.centerx - 100, menu_rect.centery+80))\n DISPLAYSURF.blit(startText, (menu_rect.centerx - 50, menu_rect.centery+120))\n DISPLAYSURF.blit(logoText, (menu_rect.centerx - 150, menu_rect.centery-10))\n\n # Collision Detection with start button\n mouse_pos = pygame.mouse.get_pos()\n\n if click == True:\n if start_button_rect.collidepoint(mouse_pos):\n DISPLAYSURF.fill(WHITE)\n click = False\n begin = True\n\n # Enter name menu\n elif begin == True:\n pygame.draw.rect(DISPLAYSURF, BLACK, enter_name_rect, 5)\n DISPLAYSURF.blit(enterNameText, (110, loginTextH + 20)) #60\n pygame.draw.rect(DISPLAYSURF, MAIZE, clear_button_rect)\n DISPLAYSURF.blit(clearText, (510, 370))\n pygame.draw.rect(DISPLAYSURF, BLUE, done_button_rect)\n DISPLAYSURF.blit(doneText, (210, 370))\n DISPLAYSURF.blit(loginText, (10, 10))\n\n mouse_pos = pygame.mouse.get_pos()\n if click == True and mouse_pos[0] > 103 and mouse_pos[0] < 693 and mouse_pos[1] > 87 and mouse_pos[1] < 277:\n pygame.draw.circle(DISPLAYSURF, penColor, mouse_pos, penSize)\n\n elif click == True and clear_button_rect.collidepoint(mouse_pos):\n pygame.draw.rect(DISPLAYSURF, WHITE, clear_name_rect)\n\n elif click == True and done_button_rect.collidepoint(mouse_pos):\n click = False\n if numPlayers > 5:\n DISPLAYSURF.fill(BLACK)\n else:\n save_name = pygame.Surface((594, 194))\n save_name.blit(DISPLAYSURF, (0, 0), (103, 43, 594, 194))\n numPlayers = numPlayers + 1\n pygame.image.save(save_name, \"faces/Player_\" + str(numPlayers) + \".jpg\")\n pygame.draw.rect(DISPLAYSURF, WHITE, clear_name_rect)\n\n # Direct Attention to Monitor\n pygame.Surface(menu_rect, BLUE, (0,0))\n # DISPLAY.blit(monitorText, (\n\n\n\n\n\n\n\n\n\n\n\n pygame.display.update()\n", "sub_path": "game_flow/game_touchscreen.py", "file_name": "game_touchscreen.py", "file_ext": "py", "file_size_in_byte": 4578, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pygame.display.set_mode", "line_number": 4, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 4, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.font.Font", "line_number": 29, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 47, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 49, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 50, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 51, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 60, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 62, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 66, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 67, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 75, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 76, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 83, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 93, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 93, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 95, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 95, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 97, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 97, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 101, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 101, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 103, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 103, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 106, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 106, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 113, "usage_type": "call"}, {"api_name": "pygame.image.save", "line_number": 116, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 116, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 117, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 117, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 120, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 133, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 133, "usage_type": "attribute"}]} +{"seq_id": "632485526", "text": "#!user/bin/python\n\nimport datetime\nfrom logging.config import listen\nimport re\n\n\nclass Messreihe():\n\n def __init__(self, stringfolge = None):\n self.liste = []\n\n if stringfolge is not None:\n i = 0\n for line in stringfolge:\n i+=1\n try:\n m = Messwert(line)\n self.liste.append(m)\n except ValueError:\n print(\"Datei ist Fehlerhaft, Zeile: \"+ str(i) + \" \" + line)\n\n def __len__(self):\n return len(self.liste)\n\n def add(self, *other):\n for ele in other:\n if ele not in self.liste:\n self.liste.append(ele)\n\n def __add__(self, other):\n if type(other) == Messreihe:\n for ele in other.liste:\n if ele not in self.liste:\n self.liste.append(other)\n\n def __iter__(self):\n return MessreiheIterator(self.liste)\n\n def __getitem__(self, item):\n if type(item) == slice:\n return self.liste[item.start:item.stop:item.step]\n\n\n elif type(item) == int:\n return self.liste[item]\n\n elif type(item) == str:\n new = Messreihe()\n for ele in self.liste:\n if str(ele.zeitpunkt[:len(item)]) == item:\n new.add(ele)\n\n return new\n \n def getGenerator(self):\n return MessreiheGenIter(self.liste)\n \n \n\n\n\n\nclass MessreiheIterator():\n\n def __init__(self, liste):\n self.pos = -1\n self.liste = liste\n\n def __next__(self):\n self.pos += 1\n if self.pos >= len(self.liste):\n raise StopIteration\n return self.liste[self.pos]\n\n\nclass MessreiheGenIter():\n \n def __init__(self, liste):\n self.pos = -1\n self.liste = liste\n \n def __iter__(self):\n while True:\n self.pos += 1\n yield self.liste[self.pos]\n\n\n\nclass Messwert(object):\n\n\n def __init__(self, line=None, temperatur=None):\n \n if temperatur is not None:\n self._parseBoth(line, temperatur)\n else:\n self._parseLine(line)\n\n\n def _parseLine(self, line):\n #\"2018-01-10 14:00:01.2586\",22.0\n #\"2018-01-10 14:15:01.570572\",21.4375\n pattern = re.compile(r'\\\"[1-2][0-9]{3}-([0][1-9]|[1][0-2])-(0[1-9]|[12][0-9]|3[01]) ([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]).[0-9]*\\\",[0-9]{2}.[0-9]+')\n \n if pattern.match(line) is None:\n raise ValueError(\"EingabeString des Messwerts entspricht nicht dem richtigen Format\")\n else:\n line = line.replace('\"', \"\")\n line = line.split(\",\")\n self.zeitpunkt = line[0]\n self.temperatur = float(line[1])\n \n\n\n def _parseBoth(self, zeitpunkt, temperatur):\n pattern = re.compile(r'[1-2][0-9]{3}-([0][1-9]|[1][0-2])-(0[1-9]|[12][0-9]|3[01]) ([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]).[0-9]*')\n if pattern.match(zeitpunkt) is None:\n raise ValueError(\"Zeitpunktstring ist fehlerhaft\")\n else:\n self.zeitpunkt = zeitpunkt\n self.temperatur = float(temperatur)\n \n\n def __repr__(self):\n\n return \"('%s', %s)\" % (str(self.zeitpunkt), str(self.temperatur))\n\n def __str__(self):\n return \"('%s', %s)\" % (str(self.zeitpunkt), str(self.temperatur))\n\n def __eq__(self, other):\n return self.temperatur == other.temperatur \\\n and self.zeitpunkt == other.zeitpunkt\n\n #<\n def __lt__(self, other):\n d = datetime.datetime.strptime(self.zeitpunkt[:-5], \"%Y-%m-%d %H:%M:%S\")\n o = datetime.datetime.strptime(other.zeitpunkt[:-5], \"%Y-%m-%d %H:%M:%S\")\n\n if d == o and self.temperatur < other.temperatur:\n return True\n else:\n return d < o\n\n \n def __hash__(self):\n return hash((self.temperatur, self.zeitpunkt))\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n m = Messreihe(open(\"messwerte.csv\"))\n #m1 = Messwert('\"2018-01-10 14:00:01.2586\",22.0')\n \n \n\n", "sub_path": "Python/Blatt12/a48.py", "file_name": "a48.py", "file_ext": "py", "file_size_in_byte": 4081, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "re.compile", "line_number": 104, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 117, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 138, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 138, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 139, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 139, "usage_type": "attribute"}]} +{"seq_id": "91678785", "text": "from typing import Tuple\nfrom flask import current_app\nimport psycopg2 as db \n\n# in a created model, following values should be set:\n# self.TABLE_NAME, name of the table\n# self.COLUMN_NAMES, a tuple consisting of the column names,\n# COLUMN NAMES must be the in the same order as they are defined in table\n\nclass BaseModel():\n # If an id is given , object tries to find it and copies all values\n # The id attribute of an entry created via id should not be changed\n # After creation all values in database can be\n # accessed and changed with object_name.attribute_name\n # Save method will update the entry in database directly\n # Changing id directly after init should not allowed\n # The existance of _ORIGINAL_ATTR means the object was created\n # from a row in the table\n def __init__(self, entry=None):\n if entry is None:\n return \n elif isinstance(entry, int):\n if entry < 0:\n raise NotImplementedError('id must be bigger than zero')\n fetched_values = self._from_table_get_by_id(entry)\n if not fetched_values:\n raise NotImplementedError(f'Entry with id {entry} was not found in table {self.__class__.TABLE_NAME}')\n elif isinstance(entry, tuple):\n assert len(entry) == len(self.__class__.COLUMN_NAMES)\n fetched_values = entry\n else:\n return\n self._ORIGINAL_ATTR = fetched_values\n self._set_attr(fetched_values)\n \n # If an id is not given, an empty entry will be created\n # _ORIGINAL_ATTR will not exist and save function will act accordingly\n # Interface user is expected \n #def __init__(self, *args):\n # pass\n\n # Saves the object to table\n def save(self):\n with db.connect(current_app.config['DB_URL']) as conn:\n cursor = conn.cursor()\n if hasattr(self, '_ORIGINAL_ATTR'):\n assert hasattr(self, 'id')\n changed = self._get_changed()\n if not changed:\n return\n placeholders = ', '.join((key + ' = %s') for key in changed.keys())\n query = f'''UPDATE {self.__class__.TABLE_NAME}\n SET {placeholders}\n WHERE id = {self._ORIGINAL_ATTR[0]}'''\n cursor.execute(query, list(changed.values()))\n print(f'Existing db entry {self.__class__.__name__} updated')\n else:\n if not self._is_attr_complete():\n raise NotImplementedError('INSUFFICENT ATTR')\n columns = ', '.join(self.__class__.COLUMN_NAMES[1:])\n placeholders = (len(self.__class__.COLUMN_NAMES[1:]) * '%s, ')[:-2]\n query = f'''INSERT INTO {self.__class__.TABLE_NAME} ({columns})\n VALUES ({placeholders})\n RETURNING id\n '''\n t = self._get_attr()\n cursor.execute(query, t)\n print(f'New db entry {self.__class__.__name__} created')\n self.id = cursor.fetchone()[0]\n conn.commit()\n\n # deletes object from table\n def delete(self):\n if hasattr(self, 'id'):\n self.delete_direct(self.id)\n else:\n raise NotImplementedError(\"Cannot delete unexisting row, save first or discard\")\n \n # deletes specified row with the supplied primary key\n # if you dont already have a model object but know the id\n # calling this directly is more optimized\n @classmethod\n def delete_direct(cls, pk):\n assert isinstance(pk, int), \"Key must be of type int\"\n with db.connect(current_app.config['DB_URL']) as conn:\n cursor = conn.cursor()\n cursor.execute(f\"DELETE FROM {cls.TABLE_NAME} WHERE id=%s\", (pk, ))\n conn.commit()\n cursor.close()\n \n # assign the given elements from the tuple as attributes to the object\n # this method should not be called from outside as input tuple contains id from SQL Query\n def _set_attr(self, t: Tuple):\n for column, value in zip(self.__class__.COLUMN_NAMES, t):\n setattr(self, column, value)\n \n # returns current attributes combined in a tuple\n # (47, 'James', True, ...) etc\n def _get_attr(self):\n return tuple([getattr(self,column) for column in self.__class__.COLUMN_NAMES[1:]])\n \n # returns current attributes in a dict with the column names as keys\n # {'id': 47, 'first_name': 'James', 'is_admin': True}\n def _get_attr_dict(self):\n return dict(zip(self.__class__.COLUMN_NAMES, self._get_attr()))\n\n # send query to find if id exists in the current \n # if it exists, return as tuple\n def _from_table_get_by_id(self, entry_id):\n with db.connect(current_app.config['DB_URL']) as conn:\n cursor = conn.cursor()\n cursor.execute(\n f'''SELECT * FROM {self.__class__.TABLE_NAME}\n WHERE id=%s''',\n (entry_id, )\n )\n return cursor.fetchone()\n\n # only returns new versions of attributes that have been changed as dictionary\n # must not be called if entity is new\n def _get_changed(self):\n changed = {}\n for column, original, current in zip(self.__class__.COLUMN_NAMES[1:], self._ORIGINAL_ATTR[1:], self._get_attr()):\n if original != current:\n changed[column] = current\n print(original,current,column)\n # id should not have been changed\n if 'id' in changed:\n del changed['id']\n return changed\n\n # if there are any unentered attributes\n def _is_attr_complete(self):\n for column in self.__class__.COLUMN_NAMES[1:]:\n print(column,hasattr(self,column))\n if not hasattr(self, column):\n return False\n return True\n \n @classmethod\n def query_select_all(cls):\n with db.connect(current_app.config['DB_URL']) as conn:\n with conn.cursor() as cursor:\n cursor.execute(f'SELECT * FROM {cls.TABLE_NAME}')\n return cursor.fetchall()\n", "sub_path": "models/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 6206, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "psycopg2.connect", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.current_app.config", "line_number": 44, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 44, "usage_type": "name"}, {"api_name": "psycopg2.connect", "line_number": 85, "usage_type": "call"}, {"api_name": "flask.current_app.config", "line_number": 85, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 85, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 93, "usage_type": "name"}, {"api_name": "psycopg2.connect", "line_number": 110, "usage_type": "call"}, {"api_name": "flask.current_app.config", "line_number": 110, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 110, "usage_type": "name"}, {"api_name": "psycopg2.connect", "line_number": 142, "usage_type": "call"}, {"api_name": "flask.current_app.config", "line_number": 142, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 142, "usage_type": "name"}]} +{"seq_id": "582120306", "text": "from enum import Enum\n\n\nclass Account:\n def __init__(self,\n account_type=None,\n full_name=None,\n name=None,\n code=None,\n description=None,\n color=None,\n notes=None,\n commodity_namespace=None,\n commodity_mnemonic=None,\n hidden=False,\n tax=False,\n placeholder=True):\n self.type = account_type\n self.full_name = full_name\n self.name = name\n self.code = code\n self.description = description\n self.color = color\n self.notes = notes\n self.commodity_namespace = commodity_namespace\n self.commodity_mnemonic = commodity_mnemonic\n self.hidden = hidden\n self.tax = tax\n self.placeholder = placeholder\n\n def __eq__(self, other):\n if isinstance(other, Account):\n return self.full_name == other.full_name\n return False\n\n def __str__(self):\n return self.full_name if self.full_name is not None else \".\"\n\n\nclass AccountType(Enum):\n # Asset types\n CASH = 1\n BANK = 2\n STOCK = 3\n MUTUAL = 4\n ACCOUNTS_RECEIVABLE = 5\n ASSET = 9\n # Liability types\n CREDIT_CARD = 11\n ACCOUNTS_PAYABLE = 12\n LIABILITY = 19\n # Other\n INCOME = 21\n EXPENSE = 22\n EQUITY = 99\n", "sub_path": "moneyscript/model/types/account.py", "file_name": "account.py", "file_ext": "py", "file_size_in_byte": 1397, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "enum.Enum", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "547300010", "text": "from json import dumps\n\n\ndef obj2json(obj):\n date_list = []\n value_list = []\n for record in obj:\n date_list.append(record['Date'])\n value_list.append(float(record['Value']))\n\n return dumps({'date': date_list, 'value': value_list, 'status': False})\n\n\ndef errorRep(erro):\n return dumps({'status': True, 'value': erro})", "sub_path": "mlp/services.py", "file_name": "services.py", "file_ext": "py", "file_size_in_byte": 345, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "json.dumps", "line_number": 11, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "41574715", "text": "# -*- coding: utf-8 -*-\n\"\"\"Tests for libvcs git repos.\"\"\"\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nimport datetime\nimport os\nimport textwrap\n\nimport pytest\n\nfrom libvcs import exc\nfrom libvcs._compat import PY2, string_types\nfrom libvcs.git import GitRemote, GitRepo, extract_status\nfrom libvcs.shortcuts import create_repo_from_pip_url\nfrom libvcs.util import run, which\n\nif not which('git'):\n pytestmark = pytest.mark.skip(reason=\"git is not available\")\n\n\ndef test_repo_git_obtain_initial_commit_repo(tmpdir):\n \"\"\"initial commit repos return 'initial'.\n\n note: this behaviors differently from git(1)'s use of the word \"bare\".\n running `git rev-parse --is-bare-repository` would return false.\n \"\"\"\n repo_name = 'my_git_project'\n\n run(['git', 'init', repo_name], cwd=str(tmpdir))\n\n bare_repo_dir = tmpdir.join(repo_name)\n\n git_repo = create_repo_from_pip_url(\n **{\n 'pip_url': 'git+file://' + str(bare_repo_dir),\n 'repo_dir': str(tmpdir.join('obtaining a bare repo')),\n }\n )\n\n git_repo.obtain()\n assert git_repo.get_revision() == 'initial'\n\n\ndef test_repo_git_obtain_full(tmpdir, git_remote):\n git_repo = create_repo_from_pip_url(\n **{\n 'pip_url': 'git+file://' + git_remote,\n 'repo_dir': str(tmpdir.join('myrepo')),\n }\n )\n\n git_repo.obtain()\n\n test_repo_revision = run(['git', 'rev-parse', 'HEAD'], cwd=git_remote)\n\n assert git_repo.get_revision() == test_repo_revision\n assert os.path.exists(str(tmpdir.join('myrepo')))\n\n\ndef test_repo_update_handle_cases(tmpdir, git_remote, mocker):\n git_repo = create_repo_from_pip_url(\n **{\n 'pip_url': 'git+file://' + git_remote,\n 'repo_dir': str(tmpdir.join('myrepo')),\n }\n )\n\n git_repo.obtain() # clone initial repo\n mocka = mocker.spy(git_repo, 'run')\n git_repo.update_repo()\n\n mocka.assert_any_call(['symbolic-ref', '--short', 'HEAD'])\n\n mocka.reset_mock()\n\n # will only look up symbolic-ref if no rev specified for object\n git_repo.rev = 'HEAD'\n git_repo.update_repo()\n assert mocker.call(['symbolic-ref', '--short', 'HEAD']) not in mocka.mock_calls\n\n\ndef test_progress_callback(tmpdir, git_remote, mocker):\n def progress_callback_spy(output, timestamp):\n assert isinstance(output, string_types)\n assert isinstance(timestamp, datetime.datetime)\n\n progress_callback = mocker.Mock(\n name='progress_callback_stub', side_effect=progress_callback_spy\n )\n\n run(['git', 'rev-parse', 'HEAD'], cwd=git_remote)\n\n # create a new repo with the repo as a remote\n git_repo = create_repo_from_pip_url(\n **{\n 'pip_url': 'git+file://' + git_remote,\n 'repo_dir': str(tmpdir.join('myrepo')),\n 'progress_callback': progress_callback,\n }\n )\n git_repo.obtain()\n\n assert progress_callback.called\n\n\ndef test_remotes(parentdir, git_remote):\n repo_name = 'myrepo'\n remote_name = 'myremote'\n remote_url = 'https://localhost/my/git/repo.git'\n\n git_repo = create_repo_from_pip_url(\n pip_url='git+file://{git_remote}'.format(git_remote=git_remote),\n repo_dir=os.path.join(str(parentdir), repo_name),\n )\n git_repo.obtain()\n git_repo.set_remote(name=remote_name, url=remote_url)\n\n assert (remote_name, remote_url, remote_url) == git_repo.remote(remote_name)\n\n\ndef test_git_get_url_and_rev_from_pip_url():\n pip_url = 'git+ssh://git@bitbucket.example.com:7999/PROJ/repo.git'\n url, rev = GitRepo.get_url_and_revision_from_pip_url(pip_url)\n assert 'ssh://git@bitbucket.example.com:7999/PROJ/repo.git' == url\n assert rev is None\n\n pip_url = '%s@%s' % (\n 'git+ssh://git@bitbucket.example.com:7999/PROJ/repo.git',\n 'eucalyptus',\n )\n url, rev = GitRepo.get_url_and_revision_from_pip_url(pip_url)\n assert 'ssh://git@bitbucket.example.com:7999/PROJ/repo.git' == url\n assert rev == 'eucalyptus'\n\n # the git manual refers to this as \"scp-like syntax\"\n # https://git-scm.com/docs/git-clone\n pip_url = '%s@%s' % ('git+user@hostname:user/repo.git', 'eucalyptus')\n url, rev = GitRepo.get_url_and_revision_from_pip_url(pip_url)\n assert 'user@hostname:user/repo.git' == url\n assert rev == 'eucalyptus'\n\n\ndef test_remotes_preserves_git_ssh(parentdir, git_remote):\n # Regression test for #14\n repo_name = 'myexamplegit'\n repo_dir = os.path.join(str(parentdir), repo_name)\n remote_name = 'myremote'\n remote_url = 'git+ssh://git@github.com/tony/AlgoXY.git'\n\n git_repo = create_repo_from_pip_url(\n pip_url='git+file://{git_remote}'.format(git_remote=git_remote),\n repo_dir=repo_dir,\n )\n git_repo.obtain()\n git_repo.set_remote(name=remote_name, url=remote_url)\n\n assert (\n GitRemote(remote_name, remote_url, remote_url)._asdict()\n in git_repo.remotes().values()\n )\n\n\ndef test_private_ssh_format(pip_url_kwargs):\n pip_url_kwargs.update(\n **{'pip_url': 'git+ssh://github.com:' + '/tmp/omg/private_ssh_repo'}\n )\n\n with pytest.raises(exc.LibVCSException) as excinfo:\n create_repo_from_pip_url(**pip_url_kwargs)\n excinfo.match(r'is malformatted')\n\n\ndef test_ls_remotes(git_repo):\n remotes = git_repo.remotes()\n\n assert 'origin' in remotes\n assert 'origin' in git_repo.remotes(flat=True)\n\n\ndef test_get_remotes(git_repo):\n assert 'origin' in git_repo.remotes()\n\n\n@pytest.mark.parametrize(\n 'repo_name,new_repo_url',\n [\n ['myrepo', 'file:///apples'],\n ],\n)\ndef test_set_remote(git_repo, repo_name, new_repo_url):\n mynewremote = git_repo.set_remote(name=repo_name, url='file:///')\n\n assert 'file:///' in mynewremote, 'set_remote returns remote'\n\n assert 'file:///' in git_repo.remote(name=repo_name), 'remote returns remote'\n\n assert 'myrepo' in git_repo.remotes(), '.remotes() returns new remote'\n\n with pytest.raises(\n exc.CommandError,\n match='.*remote {repo_name} already exists.*'.format(repo_name=repo_name),\n ):\n mynewremote = git_repo.set_remote(name='myrepo', url=new_repo_url)\n\n mynewremote = git_repo.set_remote(name='myrepo', url=new_repo_url, overwrite=True)\n\n assert new_repo_url in git_repo.remote(\n name='myrepo'\n ), 'Running remove_set should overwrite previous remote'\n\n\ndef test_get_git_version(git_repo):\n expected_version = git_repo.run(['--version']).replace('git version ', '')\n assert git_repo.get_git_version()\n assert expected_version == git_repo.get_git_version()\n\n\ndef test_get_current_remote_name(git_repo):\n assert git_repo.get_current_remote_name() == 'origin'\n\n new_branch = 'another-branch-with-no-upstream'\n git_repo.run(['checkout', '-B', new_branch])\n assert (\n git_repo.get_current_remote_name() == new_branch\n ), 'branch w/o upstream should return branch only'\n\n new_remote_name = 'new_remote_name'\n git_repo.set_remote(\n name=new_remote_name, url='file://' + git_repo.path, overwrite=True\n )\n git_repo.run(['fetch', new_remote_name])\n git_repo.run(\n ['branch', '--set-upstream-to', '{}/{}'.format(new_remote_name, new_branch)]\n )\n assert (\n git_repo.get_current_remote_name() == new_remote_name\n ), 'Should reflect new upstream branch (different remote)'\n\n upstream = '{}/{}'.format(new_remote_name, 'master')\n\n git_repo.run(['branch', '--set-upstream-to', upstream])\n assert (\n git_repo.get_current_remote_name() == upstream\n ), 'Should reflect upstream branch (differente remote+branch)'\n\n git_repo.run(['checkout', 'master'])\n\n # Different remote, different branch\n remote = '{}/{}'.format(new_remote_name, new_branch)\n git_repo.run(['branch', '--set-upstream-to', remote])\n assert (\n git_repo.get_current_remote_name() == remote\n ), 'Should reflect new upstream branch (different branch)'\n\n\ndef test_extract_status():\n FIXTURE_A = textwrap.dedent(\n \"\"\"\n # branch.oid d4ccd4d6af04b53949f89fbf0cdae13719dc5a08\n # branch.head fix-current-remote-name\n 1 .M N... 100644 100644 100644 91082f119279b6f105ee9a5ce7795b3bdbe2b0de 91082f119279b6f105ee9a5ce7795b3bdbe2b0de CHANGES\n \"\"\" # NOQA: E501\n )\n assert {\n \"branch_oid\": 'd4ccd4d6af04b53949f89fbf0cdae13719dc5a08',\n \"branch_head\": 'fix-current-remote-name',\n }.items() <= extract_status(FIXTURE_A).items()\n\n\n@pytest.mark.parametrize(\n 'fixture,expected_result',\n [\n [\n \"\"\"\n # branch.oid de6185fde0806e5c7754ca05676325a1ea4d6348\n # branch.head fix-current-remote-name\n # branch.upstream origin/fix-current-remote-name\n # branch.ab +0 -0\n 1 .M N... 100644 100644 100644 91082f119279b6f105ee9a5ce7795b3bdbe2b0de 91082f119279b6f105ee9a5ce7795b3bdbe2b0de CHANGES\n 1 .M N... 100644 100644 100644 302ca2c18d4c295ce217bff5f93e1ba342dc6665 302ca2c18d4c295ce217bff5f93e1ba342dc6665 tests/test_git.py\n \"\"\", # NOQA: E501\n {\n \"branch_oid\": 'de6185fde0806e5c7754ca05676325a1ea4d6348',\n \"branch_head\": 'fix-current-remote-name',\n \"branch_upstream\": 'origin/fix-current-remote-name',\n \"branch_ab\": '+0 -0',\n \"branch_ahead\": '0',\n \"branch_behind\": '0',\n },\n ],\n [\n '# branch.upstream moo/origin/myslash/remote',\n {\"branch_upstream\": 'moo/origin/myslash/remote'},\n ],\n [\n \"\"\"\n # branch.oid c3c5323abc5dca78d9bdeba6c163c2a37b452e69\n # branch.head libvcs-0.4.0\n # branch.upstream origin/libvcs-0.4.0\n # branch.ab +0 -0\n \"\"\",\n {\n \"branch_oid\": 'c3c5323abc5dca78d9bdeba6c163c2a37b452e69',\n \"branch_head\": 'libvcs-0.4.0',\n \"branch_upstream\": 'origin/libvcs-0.4.0',\n \"branch_ab\": '+0 -0',\n \"branch_ahead\": '0',\n \"branch_behind\": '0',\n },\n ],\n ],\n)\ndef test_extract_status_b(fixture, expected_result):\n if PY2:\n assert (\n extract_status(textwrap.dedent(fixture)).items() <= expected_result.items()\n )\n else:\n assert (\n extract_status(textwrap.dedent(fixture)).items() >= expected_result.items()\n )\n\n\n@pytest.mark.parametrize(\n 'fixture,expected_result',\n [\n [\n '# branch.ab +1 -83',\n {\n \"branch_ab\": '+1 -83',\n \"branch_ahead\": '1',\n \"branch_behind\": '83',\n },\n ],\n [\n \"\"\"\n # branch.ab +0 -0\n \"\"\",\n {\n \"branch_ab\": '+0 -0',\n \"branch_ahead\": '0',\n \"branch_behind\": '0',\n },\n ],\n [\n \"\"\"\n # branch.ab +1 -83\n \"\"\",\n {\n \"branch_ab\": '+1 -83',\n \"branch_ahead\": '1',\n \"branch_behind\": '83',\n },\n ],\n [\n \"\"\"\n # branch.ab +9999999 -9999999\n \"\"\",\n {\n \"branch_ab\": '+9999999 -9999999',\n \"branch_ahead\": '9999999',\n \"branch_behind\": '9999999',\n },\n ],\n ],\n)\ndef test_extract_status_c(fixture, expected_result):\n assert expected_result.items() <= extract_status(textwrap.dedent(fixture)).items()\n", "sub_path": "tests/test_git.py", "file_name": "test_git.py", "file_ext": "py", "file_size_in_byte": 11516, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "libvcs.util.which", "line_number": 17, "usage_type": "call"}, {"api_name": "pytest.mark.skip", "line_number": 18, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 18, "usage_type": "attribute"}, {"api_name": "libvcs.util.run", "line_number": 29, "usage_type": "call"}, {"api_name": "libvcs.shortcuts.create_repo_from_pip_url", "line_number": 33, "usage_type": "call"}, {"api_name": "libvcs.shortcuts.create_repo_from_pip_url", "line_number": 45, "usage_type": "call"}, {"api_name": "libvcs.util.run", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "libvcs.shortcuts.create_repo_from_pip_url", "line_number": 61, "usage_type": "call"}, {"api_name": "libvcs._compat.string_types", "line_number": 84, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 85, "usage_type": "attribute"}, {"api_name": "libvcs.util.run", "line_number": 91, "usage_type": "call"}, {"api_name": "libvcs.shortcuts.create_repo_from_pip_url", "line_number": 94, "usage_type": "call"}, {"api_name": "libvcs.shortcuts.create_repo_from_pip_url", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path", "line_number": 113, "usage_type": "attribute"}, {"api_name": "libvcs.git.GitRepo.get_url_and_revision_from_pip_url", "line_number": 123, "usage_type": "call"}, {"api_name": "libvcs.git.GitRepo", "line_number": 123, "usage_type": "name"}, {"api_name": "libvcs.git.GitRepo.get_url_and_revision_from_pip_url", "line_number": 131, "usage_type": "call"}, {"api_name": "libvcs.git.GitRepo", "line_number": 131, "usage_type": "name"}, {"api_name": "libvcs.git.GitRepo.get_url_and_revision_from_pip_url", "line_number": 138, "usage_type": "call"}, {"api_name": "libvcs.git.GitRepo", "line_number": 138, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 146, "usage_type": "call"}, {"api_name": "os.path", "line_number": 146, "usage_type": "attribute"}, {"api_name": "libvcs.shortcuts.create_repo_from_pip_url", "line_number": 150, "usage_type": "call"}, {"api_name": "libvcs.git.GitRemote", "line_number": 158, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 168, "usage_type": "call"}, {"api_name": "libvcs.exc.LibVCSException", "line_number": 168, "usage_type": "attribute"}, {"api_name": "libvcs.exc", "line_number": 168, "usage_type": "name"}, {"api_name": "libvcs.shortcuts.create_repo_from_pip_url", "line_number": 169, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 199, "usage_type": "call"}, {"api_name": "libvcs.exc.CommandError", "line_number": 200, "usage_type": "attribute"}, {"api_name": "libvcs.exc", "line_number": 200, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 184, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 184, "usage_type": "attribute"}, {"api_name": "textwrap.dedent", "line_number": 257, "usage_type": "call"}, {"api_name": "libvcs.git.extract_status", "line_number": 267, "usage_type": "call"}, {"api_name": "libvcs._compat.PY2", "line_number": 314, "usage_type": "name"}, {"api_name": "libvcs.git.extract_status", "line_number": 316, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 316, "usage_type": "call"}, {"api_name": "libvcs.git.extract_status", "line_number": 320, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 320, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 270, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 270, "usage_type": "attribute"}, {"api_name": "libvcs.git.extract_status", "line_number": 368, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 368, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 324, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 324, "usage_type": "attribute"}]} +{"seq_id": "379233742", "text": "import sys\nfrom typing import Dict\ndef get_argument_list() -> Dict[int, tuple[str,str,str]]:\n # Parse config file\n config_file = open(\"scalar_config.txt\",\"r\")\n config_file_lines = config_file.readlines()\n\n for i in range(len(config_file_lines)):\n config_file_lines[i] = config_file_lines[i].strip()\n\n argument_list = {}\n count = 1\n data_types = [\"int\",\"float\",\"double\",\"char\"]\n for x in config_file_lines:\n tokens = x.split()\n if len(tokens) > 3 or len(tokens) < 2:\n sys.stderr.write(\"Invalid Scalar Config Format\\n\")\n exit()\n if tokens[0].strip() not in data_types:\n sys.stderr.write(\"Invalid Input Data Type\\n\")\n exit()\n if len(tokens) == 3:\n argument_list[count] = (tokens[0].strip(),tokens[1].strip(),tokens[2].strip())\n else:\n argument_list[count] = (tokens[0].strip(),tokens[1].strip(),\"NaN\")\n count = count + 1\n \n return argument_list", "sub_path": "scalar_argument_list.py", "file_name": "scalar_argument_list.py", "file_ext": "py", "file_size_in_byte": 986, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "sys.stderr.write", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 20, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 20, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 3, "usage_type": "name"}]} +{"seq_id": "412517225", "text": "# coding:utf8\n\"\"\"\nA wrapper for module `_mysql`,\nmore efficient than mysqldb.\n\nAuthor: ilcwd\n\"\"\"\n\nfrom threading import local\nimport functools\nimport logging\nimport datetime\nimport time\n\nimport _mysql\nfrom _mysql_exceptions import (\n IntegrityError,\n OperationalError,\n)\n\n__all__ = [\n 'QUERY_TUPLE',\n 'QUERY_DICT',\n 'SQLOperationalError',\n 'is_lost_connection_exception',\n 'BaseDB'\n 'escape',\n]\n\nQUERY_TUPLE = 0\nQUERY_DICT = 1\nDEFAULT_CHARSET = 'utf8'\n\nSQL_KEY_ERROR = -2\n\nSQLOperationalError = OperationalError\n_logger = logging.getLogger(__name__)\n\n\nclass DBError(Exception):\n pass\n\n\ndef escape(sql):\n \"\"\"\n :param sql:\n :return: a type object,\n \"\"\"\n if sql is None:\n return ''\n\n if isinstance(sql, unicode):\n sql = sql.encode('utf8')\n\n if isinstance(sql, (int, long, float)):\n return str(sql)\n\n if isinstance(sql, datetime.datetime):\n return sql.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n if not isinstance(sql, str):\n raise DBError(\"Invalid value: %s, unexpected type %s\" % (str(sql), type(sql)))\n\n safe_sql = _mysql.escape_string(sql)\n return safe_sql\n\n\nclass DBConnection(object):\n CONNECTION_IDLE_TIMEOUT = 600\n\n def __init__(self, **kw):\n self._db_params = kw.copy()\n self._charset = self._db_params.pop('charset')\n # 分表代表CLIENT_MULTI_RESULTS 和 CLIENT_MULTI_STATEMENTS\n self._db_params['client_flag'] = 131072 | 65536\n\n self._mysql = None\n self._last_used = 0\n self.is_closed = False\n self.inside_transaction = False\n self.is_occupied = False\n self.reconnect()\n\n @classmethod\n def set_global_idle_timeout(cls, timeout):\n cls.CONNECTION_IDLE_TIMEOUT = int(timeout)\n\n def set_inside_transaction(self, b):\n self.inside_transaction = bool(b)\n\n def reconnect(self):\n self.close()\n\n conn = _mysql.connect(**self._db_params)\n conn.set_character_set(self._charset)\n conn.autocommit(True)\n self._mysql = conn\n\n self._last_used = time.time()\n self.inside_transaction = False\n self.is_closed = False\n\n def _is_timeout(self):\n return abs(self._last_used - time.time()) > self.CONNECTION_IDLE_TIMEOUT\n\n def close(self):\n if self._mysql is not None:\n self._mysql.close()\n self.is_closed = True\n self._mysql = None\n self._last_used = 0\n\n def __str__(self):\n formatter = \"mysql://%(user)s@%(host)s:%(port)d/%(db)s\"\n return formatter % self._db_params\n\n def get_connection(self):\n if self.is_closed:\n raise DBError(\"DB object is closed\")\n\n if self._is_timeout():\n _logger.info(\"Connection timeout reached(%s), reconnect: %s\",\n self._last_used, str(self))\n self.reconnect()\n return self._mysql\n\n\ndef set_db_connection_idle_timeout(timeout):\n DBConnection.set_global_idle_timeout(timeout)\n\n\ndef is_lost_connection_exception(ex):\n if isinstance(ex, OperationalError):\n if ex.args and isinstance(ex.args, (list, tuple)):\n if ex.args[0] in (\n 2003, #Can't connect to MySQL server\n 2006, # MySQL server has gone away\n 2013, # Lost connection to MySQL server during query\n 1213,\n 1205, # Lock wait timeout exceeded; try restarting transaction\n ):\n return True\n return False\n\n\ndef db_executor_retry(func):\n \"\"\"retry once when lost connection error raise.\"\"\"\n\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n try:\n result = func(self, *args, **kwargs)\n except OperationalError as e:\n if not is_lost_connection_exception(e):\n raise\n\n self.reconnect()\n\n conn = self._get_connection()\n _logger.info(\"DB retry error, db is %s, error is %s\", conn, e)\n result = func(self, *args, **kwargs)\n return result\n\n return wrapper\n\n\nclass QueryWrapper(object):\n \"\"\"\n Query Wrapper for _mysql.connection.\n\n Attentions:\n * not thread safe, don't share with other threads\n \"\"\"\n def __init__(self, conn):\n if conn.is_occupied:\n raise DBError(\"Connection is occupied by others,\")\n\n self.conn = conn\n\n def __enter__(self):\n self.query(\"BEGIN\")\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if exc_type:\n self.query(\"ROLLBACK\")\n else:\n self.query(\"COMMIT\")\n\n def execute(self, sql):\n conn = self.conn.get_connection()\n try:\n conn.query(sql)\n rows = conn.affected_rows()\n # 这个值与驱动、系统、硬件CPU位数都可能有关\n if rows == 0xFFFFFFFFFFFFFFFF: # 64位的-1\n rows = 0\n\n # assert conn.next_result() == -1, sql\n return rows\n except IntegrityError:\n return SQL_KEY_ERROR\n\n def query(self, sql, how=QUERY_TUPLE):\n conn = self.conn.get_connection()\n conn.query(sql)\n res = conn.store_result()\n if res:\n result = res.fetch_row(res.num_rows(), how)\n assert conn.next_result() == -1, sql\n return result\n else:\n return ()\n\n def query_one(self, sql, how=QUERY_TUPLE):\n rows = self.query(sql, how)\n if rows:\n return rows[0]\n return None\n\n def insert(self, sql):\n conn = self.conn.get_connection()\n try:\n conn.query(sql)\n rows = conn.affected_rows()\n # 这个值与驱动、系统、硬件CPU位数都可能有关\n if rows == 0xFFFFFFFFFFFFFFFF: # 64位的-1\n rows = 0\n insertid = conn.insert_id()\n\n # assert conn.next_result() == -1, sql\n return insertid, rows\n except IntegrityError:\n return -1, SQL_KEY_ERROR\n\n def call_procedure(self, procedure, params=None, how=QUERY_TUPLE):\n conn = self.conn.get_connection()\n\n def _obj_to_str(obj):\n if isinstance(obj, (int, long)):\n return str(obj)\n else:\n return '\"%s\"' % escape(obj)\n\n if isinstance(params, (list, tuple)):\n sargs = ','.join(map(_obj_to_str, params))\n conn.query('CALL %s(%s)' % (procedure, sargs))\n elif params is None:\n conn.query('CALL %s()' % (procedure, ))\n else:\n conn.query('CALL %s(\"%s\")' % (procedure, escape(params)))\n\n ret = []\n res = conn.store_result()\n if res:\n ret.append(res.fetch_row(res.num_rows(), how))\n # 0 - success\n # -1 - success and no more results\n # >0 - error\n while conn.next_result() != -1:\n res = conn.store_result()\n if res:\n ret.append(res.fetch_row(res.num_rows(), how))\n\n return ret\n\n\nclass BaseDB(local):\n def __init__(self, dbconfig):\n self.config = dbconfig.copy()\n if not 'charset' in self.config:\n self.config['charset'] = DEFAULT_CHARSET\n self.conn = DBConnection(**self.config)\n\n def _get_connection(self):\n return self.conn\n\n def reconnect(self):\n self.conn.reconnect()\n\n def close(self):\n self.conn.close()\n\n @db_executor_retry\n def execute(self, sql):\n return QueryWrapper(self._get_connection()).execute(sql)\n\n @db_executor_retry\n def query(self, sql, how=QUERY_TUPLE):\n return QueryWrapper(self._get_connection()).query(sql, how)\n\n @db_executor_retry\n def query_one(self, sql, how=QUERY_TUPLE):\n return QueryWrapper(self._get_connection()).query_one(sql, how)\n\n @db_executor_retry\n def insert(self, sql):\n \"\"\"\n :param sql:\n :return: insert_id, affected_rows\n \"\"\"\n return QueryWrapper(self._get_connection()).insert(sql)\n\n @db_executor_retry\n def call_procedure(self, procedure, params=None, how=QUERY_TUPLE):\n \"\"\"调用存储过程\n Attentions:\n * param `procedure` is not safe\n \"\"\"\n return QueryWrapper(self._get_connection()).call_procedure(procedure, params, how)\n\n def session(self):\n \"\"\"\n Create a transaction.\n\n Examples:\n\n mydb = BaseDB(myconfig)\n # *do not* share `conn` outside!!\n with mydb.session() as conn:\n conn.query(\"SHOW TABLES\")\n\n Attentions:\n\n * donot deal with `OperationalError`, try and catch it yourself.\n\n \"\"\"\n return QueryWrapper(self._get_connection())\n", "sub_path": "slutils/mysql.py", "file_name": "mysql.py", "file_ext": "py", "file_size_in_byte": 8793, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "_mysql_exceptions.OperationalError", "line_number": 36, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 58, "usage_type": "attribute"}, {"api_name": "_mysql.escape_string", "line_number": 64, "usage_type": "call"}, {"api_name": "_mysql.connect", "line_number": 94, "usage_type": "call"}, {"api_name": "time.time", "line_number": 99, "usage_type": "call"}, {"api_name": "time.time", "line_number": 104, "usage_type": "call"}, {"api_name": "_mysql_exceptions.OperationalError", "line_number": 133, "usage_type": "argument"}, {"api_name": "_mysql_exceptions.OperationalError", "line_number": 153, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 149, "usage_type": "call"}, {"api_name": "_mysql_exceptions.IntegrityError", "line_number": 201, "usage_type": "name"}, {"api_name": "_mysql_exceptions.IntegrityError", "line_number": 233, "usage_type": "name"}, {"api_name": "threading.local", "line_number": 268, "usage_type": "name"}]} +{"seq_id": "571409520", "text": "import pymongo as mongo\nimport bson\n\nfrom rflib.defs import *\nfrom rflib.ipc.MongoIPC import format_address\n\nRFENTRY_IDLE_VM_PORT = 1\nRFENTRY_IDLE_DP_PORT = 2\nRFENTRY_ASSOCIATED = 3\nRFENTRY_ACTIVE = 4\n\nRFENTRY = 0\nRFCONFIGENTRY = 1\nclass MongoTableEntryFactory:\n @staticmethod\n def make(type_):\n if type_ == RFENTRY:\n return RFEntry()\n elif type_ == RFCONFIGENTRY:\n return RFConfigEntry()\n\nclass MongoTable:\n def __init__(self, address, name, entry_type):\n self.address = format_address(address)\n self.connection = mongo.Connection(*self.address)\n self.data = self.connection[MONGO_DB_NAME][name]\n self.entry_type = entry_type\n\n def get_entries(self, **kwargs):\n for (k, v) in kwargs.items():\n kwargs[k] = str(v)\n results = self.data.find(kwargs)\n entries = []\n for result in results:\n entry = MongoTableEntryFactory.make(self.entry_type)\n entry.from_dict(result)\n entries.append(entry)\n return entries\n\n def set_entry(self, entry):\n # TODO: enforce (*_id, *_port) uniqueness restriction\n entry.id = self.data.save(entry.to_dict())\n\n def remove_entry(self, entry):\n self.data.remove(entry.id)\n\n def clear(self):\n self.data.remove()\n\n def __str__(self):\n s = \"\"\n for entry in self.get_entries():\n s += str(entry) + \"\\n\\n\"\n return s.strip(\"\\n\")\n\n\nclass RFTable(MongoTable):\n def __init__(self, address=MONGO_ADDRESS):\n MongoTable.__init__(self, address, RFTABLE_NAME, RFENTRY)\n\n def get_entry_by_vm_port(self, vm_id, vm_port):\n result = self.get_entries(vm_id=vm_id,\n vm_port=vm_port)\n if not result:\n return None\n return result[0]\n\n def get_entry_by_dp_port(self, ct_id, dp_id, dp_port):\n result = self.get_entries(ct_id=ct_id,\n dp_id=dp_id,\n dp_port=dp_port)\n if not result:\n return None\n return result[0]\n\n def get_entry_by_vs_port(self, vs_id, vs_port):\n result = self.get_entries(vs_id=vs_id,\n vs_port=vs_port)\n if not result:\n return None\n return result[0]\n\n def get_dp_entries(self, ct_id, dp_id):\n return self.get_entries(ct_id=ct_id, dp_id=dp_id)\n\n def is_dp_registered(self, ct_id, dp_id):\n return bool(self.get_dp_entries(ct_id, dp_id))\n\n\nclass RFConfig(MongoTable):\n def __init__(self, ifile, address=MONGO_ADDRESS):\n MongoTable.__init__(self, address, RFCONFIG_NAME, RFCONFIGENTRY)\n # TODO: perform validation of config\n configfile = file(ifile)\n entries = [line.strip(\"\\n\").split(\",\") for line in configfile.readlines()[1:]]\n for (a, b, c, d, e) in entries:\n self.set_entry(RFConfigEntry(int(a, 16), int(b),\n int(c),\n int(d, 16), int(e)))\n\n def get_config_for_vm_port(self, vm_id, vm_port):\n result = self.get_entries(vm_id=vm_id,\n vm_port=vm_port)\n if not result:\n return None\n return result[0]\n\n def get_config_for_dp_port(self, ct_id, dp_id, dp_port):\n result = self.get_entries(ct_id=ct_id,\n dp_id=dp_id,\n dp_port=dp_port)\n if not result:\n return None\n return result[0]\n\n\n# Convenience functions for packing/unpacking to a dict for BSON representation\ndef load_from_dict(src, obj, attr):\n setattr(obj, attr, src[attr])\n\ndef pack_into_dict(dest, obj, attr):\n value = getattr(obj, attr)\n dest[attr] = \"\" if value is None else str(value)\n\n\nclass RFEntry:\n def __init__(self, vm_id=None, vm_port=None, ct_id=None, dp_id=None, dp_port=None, vs_id=None, vs_port=None):\n self.id = None\n self.vm_id = vm_id\n self.vm_port = vm_port\n self.ct_id = ct_id\n self.dp_id = dp_id\n self.dp_port = dp_port\n self.vs_id = vs_id\n self.vs_port = vs_port\n\n def _is_idle_vm_port(self):\n return (self.vm_id is not None and\n self.vm_port is not None and\n self.ct_id is None and\n self.dp_id is None and\n self.dp_port is None and\n self.vs_id is None and\n self.vs_port is None)\n\n def _is_idle_dp_port(self):\n return (self.vm_id is None and\n self.vm_port is None and\n self.ct_id is not None and\n self.dp_id is not None and\n self.dp_port is not None and\n self.vs_id is None and\n self.vs_port is None)\n\n def make_idle(self, type_):\n if type_ == RFENTRY_IDLE_VM_PORT:\n self.ct_id = None\n self.dp_id = None\n self.dp_port = None\n self.vs_id = None\n self.vs_port = None\n elif type_ == RFENTRY_IDLE_DP_PORT:\n self.vm_id = None\n self.vm_port = None\n self.vs_id = None\n self.vs_port = None\n\n def associate(self, id_, port, ct_id=None):\n if self._is_idle_vm_port():\n self.ct_id = ct_id\n self.dp_id = id_\n self.dp_port = port\n elif self._is_idle_dp_port():\n self.vm_id = id_\n self.vm_port = port\n else:\n raise ValueError\n\n def activate(self, vs_id, vs_port):\n self.vs_id = vs_id\n self.vs_port = vs_port\n\n def get_status(self):\n if self._is_idle_vm_port():\n return RFENTRY_IDLE_VM_PORT\n elif self._is_idle_dp_port():\n return RFENTRY_IDLE_DP_PORT\n elif self.vs_id is None and self.vs_port is None:\n return RFENTRY_ASSOCIATED\n else:\n return RFENTRY_ACTIVE\n\n def __str__(self):\n return \"vm_id: %s\\nvm_port: %s\\n\"\\\n \"dp_id: %s\\ndp_port: %s\\n\"\\\n \"vs_id: %s\\nvs_port: %s\\n\"\\\n \"ct_id: %s\\nstatus:%s\" % (str(self.vm_id),\n str(self.vm_port),\n str(self.dp_id),\n str(self.dp_port),\n str(self.vs_id),\n str(self.vs_port),\n str(self.ct_id),\n str(self.get_status()))\n\n def from_dict(self, data):\n for k, v in data.items():\n if str(v) is \"\":\n data[k] = None\n elif k != \"_id\": # All our data is int\n data[k] = int(v)\n self.id = data[\"_id\"]\n load_from_dict(data, self, \"vm_id\")\n load_from_dict(data, self, \"vm_port\")\n load_from_dict(data, self, \"ct_id\")\n load_from_dict(data, self, \"dp_id\")\n load_from_dict(data, self, \"dp_port\")\n load_from_dict(data, self, \"vs_id\")\n load_from_dict(data, self, \"vs_port\")\n\n def to_dict(self):\n data = {}\n if self.id is not None:\n data[\"_id\"] = self.id\n pack_into_dict(data, self, \"vm_id\")\n pack_into_dict(data, self, \"vm_port\")\n pack_into_dict(data, self, \"ct_id\")\n pack_into_dict(data, self, \"dp_id\")\n pack_into_dict(data, self, \"dp_port\")\n pack_into_dict(data, self, \"vs_id\")\n pack_into_dict(data, self, \"vs_port\")\n return data\n \n \nclass RFConfigEntry:\n def __init__(self, vm_id=None, vm_port=None, ct_id=None, dp_id=None, dp_port=None):\n self.id = None\n self.vm_id = vm_id\n self.vm_port = vm_port\n self.ct_id = ct_id\n self.dp_id = dp_id\n self.dp_port = dp_port\n \n def __str__(self):\n return \"vm_id: %s vm_port: %s \"\\\n \"dp_id: %s dp_port: %s \"\\\n \"ct_id: %s \" % (str(self.vm_id),\n str(self.vm_port),\n str(self.dp_id),\n str(self.dp_port),\n str(self.ct_id))\n \n def from_dict(self, data):\n for k, v in data.items():\n if str(v) is \"\":\n data[k] = None\n self.id = data[\"_id\"]\n load_from_dict(data, self, \"vm_id\")\n load_from_dict(data, self, \"vm_port\")\n load_from_dict(data, self, \"ct_id\")\n load_from_dict(data, self, \"dp_id\")\n load_from_dict(data, self, \"dp_port\")\n\n\n def to_dict(self):\n data = {}\n if self.id is not None:\n data[\"_id\"] = self.id\n pack_into_dict(data, self, \"vm_id\")\n pack_into_dict(data, self, \"vm_port\")\n pack_into_dict(data, self, \"ct_id\")\n pack_into_dict(data, self, \"dp_id\")\n pack_into_dict(data, self, \"dp_port\")\n return data \n", "sub_path": "rfserver/rftable.py", "file_name": "rftable.py", "file_ext": "py", "file_size_in_byte": 9035, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "rflib.ipc.MongoIPC.format_address", "line_number": 24, "usage_type": "call"}, {"api_name": "pymongo.Connection", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "644409551", "text": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2016 Ilhan Polat\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\nfrom copy import deepcopy\nimport numpy as np\nfrom numpy.linalg import cond, eig, norm\nfrom scipy.linalg import svdvals, qr, block_diag\nfrom ._aux_linalg import haroldsvd, matrix_slice, e_i\nfrom ._classes import *\n\n\"\"\"\nTODO Though the descriptor code also works up-to-production, I truncated\nto explicit systems. I better ask around if anybody needs them (though\nthe answer to such question is always a yes).\n\"\"\"\n\n__all__ = ['staircase', 'cancellation_distance', 'minimal_realization']\n\n\ndef staircase(A, B, C,\n compute_T=False, form='c', invert=False, block_indices=False):\n \"\"\"\n The staircase form is used very often to assess system properties.\n Given a state system matrix triplet A,B,C, this function computes\n the so-called controller/observer-Hessenberg form such that the resulting\n system matrices have the block-form (x denoting the possibly nonzero\n blocks)\n\n [x x x x x|x]\n [x x x x x|0]\n [0 x x x x|0]\n [0 0 x x x|0]\n [0 0 0 x x|0]\n [x x x x x|x]\n [x x x x x|x]\n\n For controllability and observability, the existence of zero-rank\n subdiagonal blocks can be checked, as opposed to forming the Kalman\n matrix and checking the rank. Staircase method can numerically be\n more stable since for certain matrices, A^n computations can\n introduce large errors (for some A that have entries with varying\n order of magnitudes). But it is also prone to numerical rank guessing\n mismatches.\n\n Notice that, if we use the pertransposed data, then we have the\n observer form which is usually asked from the user to supply\n the data as :math:`A,B,C \\Rightarrow A^T,C^T,B^T` and then transpose\n back the result. This is just silly to ask the user to do that. Hence\n the additional ``form`` option denoting whether it is the observer or\n the controller form that is requested.\n\n\n Parameters\n ----------\n\n A,B,C : {(n,n),(n,m),(p,n)} array_like\n System Matrices to be converted\n compute_T : bool, optional\n Whether the transformation matrix T should be computed or not\n form : { 'c' , 'o' }, optional\n Determines whether the controller- or observer-Hessenberg form\n will be computed.\n invert : bool, optional\n Whether to select which side the B or C matrix will be compressed.\n For example, the default case returns the B matrix with (if any)\n zero rows at the bottom. invert option flips this choice either in\n B or C matrices depending on the \"form\" switch.\n block_indices : bool, optional\n\n\n Returns\n -------\n\n Ah,Bh,Ch : {(n,n),(n,m),(p,n)} 2D numpy arrays\n Converted system matrices\n T : (n,n) 2D numpy array\n If the boolean ``compute_T`` is true, returns the transformation\n matrix such that\n\n .. math::\n\n \\\\left[\\\\begin{array}{c|c}\n T^{-1}AT &T^{-1}B \\\\\\\\ \\\\hline\n CT & D\n \\\\end{array}\\\\right]\n\n is in the desired staircase form.\n k: Numpy array\n If the boolean ``block_indices`` is true, returns the array\n of controllable/observable block sizes identified during block\n diagonalization\n\n \"\"\"\n\n if form not in {'c', 'o'}:\n raise ValueError('The \"form\" key can only take values'\n '\\\"c\\\" or \\\"o\\\" denoting\\ncontroller- or '\n 'observer-Hessenberg form.')\n if form == 'o':\n A, B, C = A.T, C.T, B.T\n\n n = A.shape[0]\n ub, sb, vb, m0 = haroldsvd(B, also_rank=True)\n cble_block_indices = np.empty((1, 0))\n\n # Trivially Uncontrollable Case\n # Skip the first branch of the loop by making m0 greater than n\n # such that the matrices are returned as is without any computation\n if m0 == 0:\n m0 = n + 1\n cble_block_indices = np.array([0])\n\n # After these, start the regular case\n if n > m0: # If it is not a square system with full rank B\n\n A0 = ub.T.dot(A.dot(ub))\n\n # Row compress B and consistent zero blocks with the reported rank\n B0 = sb.dot(vb)\n B0[m0:, :] = 0.\n C0 = C.dot(ub)\n cble_block_indices = np.append(cble_block_indices, m0)\n\n if compute_T:\n P = block_diag(np.eye(n-ub.T.shape[0]), ub.T)\n\n # Since we deal with submatrices, we need to increase the\n # default tolerance to reasonably high values that are\n # related to the original data to get exact zeros\n tol_from_A = n*norm(A, 1)*np.finfo(float).eps\n\n # Region of interest\n m = m0\n ROI_start = 0\n ROI_size = 0\n\n for dummy_row_counter in range(A.shape[0]):\n ROI_start += ROI_size\n ROI_size = m\n h1, h2, h3, h4 = matrix_slice(A0[ROI_start:, ROI_start:],\n (ROI_size, ROI_size))\n uh3, sh3, vh3, m = haroldsvd(h3, also_rank=True,\n rank_tol=tol_from_A)\n\n # Make sure reported rank and sh3 are consistent about zeros\n sh3[sh3 < tol_from_A] = 0.\n\n # If the resulting subblock is not full row or zero rank\n if 0 < m < h3.shape[0]:\n cble_block_indices = np.append(cble_block_indices, m)\n if compute_T:\n P = block_diag(np.eye(n-uh3.shape[1]), uh3.T).dot(P)\n A0[ROI_start:, ROI_start:] = np.r_[np.c_[h1, h2],\n np.c_[sh3.dot(vh3),\n uh3.T.dot(h4)]]\n A0 = A0.dot(block_diag(np.eye(n-uh3.shape[1]), uh3))\n C0 = C0.dot(block_diag(np.eye(n-uh3.shape[1]), uh3))\n # Clean up\n A0[abs(A0) < tol_from_A] = 0.\n C0[abs(C0) < tol_from_A] = 0.\n elif m == h3.shape[0]:\n cble_block_indices = np.append(cble_block_indices, m)\n break\n else:\n break\n\n if invert:\n A0 = np.fliplr(np.flipud(A0))\n B0 = np.flipud(B0)\n C0 = np.fliplr(C0)\n if compute_T:\n P = np.flipud(P)\n\n if form == 'o':\n A0, B0, C0 = A0.T, C0.T, B0.T\n\n if compute_T:\n if block_indices:\n return A0, B0, C0, P.T, cble_block_indices\n else:\n return A0, B0, C0, P.T\n else:\n if block_indices:\n return A0, B0, C0, cble_block_indices\n else:\n return A0, B0, C0\n\n else: # Square system B full rank ==> trivially controllable\n cble_block_indices = np.array([n])\n if form == 'o':\n A, B, C = A.T, C.T, B.T\n\n if compute_T:\n if block_indices:\n return A, B, C, np.eye(n), cble_block_indices\n else:\n return A, B, C, np.eye(n)\n else:\n if block_indices:\n return A, B, C, cble_block_indices\n else:\n return A, B, C\n\n\ndef cancellation_distance(F, G):\n \"\"\"\n Given matrices :math:`F,G`, computes the upper and lower bounds of\n the perturbation needed to render the pencil [F-pI | G]` rank deficient.\n It is used for assessing the controllability/observability degenerate\n distance and hence for minimality assessment.\n\n Parameters\n ----------\n\n F,G : 2D arrays\n Pencil matrices to be checked for rank deficiency distance\n\n Returns\n -------\n\n upper2 : float\n Upper bound on the norm of the perturbation\n :math:`\\\\left[\\\\begin{array}{c|c}dF & dG\\\\end{array}\\\\right]` such\n that :math:`\\\\left[\\\\begin{array}{c|c}F+dF-pI & G+dG \\\\end{array}\n \\\\right]` is rank deficient.\n upper1 : float\n A theoretically softer upper bound than the upper2 for the\n same quantity.\n lower0 : float\n Lower bound on the same quantity given in upper2\n e_f : complex\n Indicates the eigenvalue that renders [F + dF - pI | G + dG ]\n rank deficient i.e. equals to the p value at the closest rank\n deficiency.\n radius : float\n The perturbation with the norm bound \"upper2\" is located within\n a disk in the complex plane whose center is on \"e_f\" and whose\n radius is bounded by this output.\n\n Notes\n -----\n Implements the algorithm given in D.Boley SIMAX vol.11(4) 1990.\n\n \"\"\"\n if not np.equal(*F.shape):\n raise ValueError('F input must be a square array.')\n if F.shape[0] != G.shape[0]:\n raise ValueError('F and G inputs must have the same number of rows.')\n\n A = np.c_[F, G].T\n n, m = A.shape\n B = e_i(n, np.s_[:m])\n D = e_i(n, np.s_[m:])\n C = qr(2*np.random.rand(n, n-m) - 1, mode='economic')[0]\n evals, V = eig(np.c_[A, C])\n K = cond(V)\n X = V[:m, :]\n Y = V[m:, :]\n\n upp0 = [0]*n\n for x in range(n):\n upp0[x] = norm((C-evals[x]*D).dot(Y[:, x])) / norm(X[:, x])\n\n f = np.argsort(upp0)[0]\n e_f = evals[f]\n upper1 = upp0[f]\n upper2 = svdvals(A - e_f*B)[-1]\n lower0 = upper2/(K+1)\n radius = upper2*K\n\n return upper2, upper1, lower0, e_f, radius\n\n\ndef minimal_realization(G, tol=1e-6):\n \"\"\"\n Given system realization G, this computes minimal realization such that\n if a State representation is given then the returned representation is\n controllable and observable within the given tolerance ``tol``. If\n a Transfer representation is given, then the fractions are simplified\n in the representation entries.\n\n Parameters\n ----------\n G : State, Transfer\n System representation to be checked for minimality\n tol: float\n The sensitivity threshold for the cancellation.\n\n Returns\n -------\n G_min : realization\n Minimal realization of the input G\n\n Notes\n -----\n For State() inputs the alogrithm uses ``cancellation_distance()`` and\n ``staircase()`` for the tests. A basic two pass algorithm performs:\n 1- First distance to mode cancellation is computed then also\n the Hessenberg form is obtained with the identified o'ble/c'ble\n block numbers.\n 2- If staircase form reports that there are no cancellations but the\n distance is less than the tolerance, distance wins and the corresponding\n mode is removed.\n\n For Transfer() inputs, every entry of the representation is checked for\n pole/zero cancellations and ``tol`` is used to decide for the decision\n precision.\n \"\"\"\n\n try:\n if G._isgain:\n return G\n\n A, B, C, D = G.matrices\n Am, Bm, Cm = _minimal_realization_state(A, B, C, tol=tol)\n return State(Am, Bm, Cm, D, dt=G.SamplingPeriod)\n except AttributeError:\n try:\n num, den = G.polynomials\n numm, denm = _minimal_realization_transfer(num, den, tol=tol)\n return Transfer(numm, denm, dt=G.SamplingPeriod)\n except AttributeError:\n raise ValueError(\"The argument G is not a State() or a \"\n \"Transfer() representation. Instead I got\"\n \"{}\".format(type(G).__qualname__))\n\n\ndef _minimal_realization_state(A, B, C, tol=1e-6):\n keep_looking = True\n run_out_of_states = False\n\n while keep_looking:\n n = A.shape[0]\n # Make sure that we still have states left\n if n == 0:\n A, B, C = [(np.empty((1, 0)))]*3\n break\n\n kc = cancellation_distance(A, B)[0]\n ko = cancellation_distance(A.T, C.T)[0]\n\n if min(kc, ko) > tol: # no cancellation\n keep_looking = False\n else:\n\n Ac, Bc, Cc, blocks_c = staircase(A, B, C, block_indices=True)\n Ao, Bo, Co, blocks_o = staircase(A, B, C, form='o', invert=True,\n block_indices=True)\n\n # ===============Extra Check============================\n \"\"\"\n Here kc,ko reports a possible cancellation so staircase\n should also report fewer than n, c'ble/o'ble blocks in the\n decomposition. If not, staircase tol should be increased.\n Otherwise either infinite loop or uno'ble branch removes\n the system matrices\n\n Thus, we remove the last scalar or the two-by-two block\n artificially. Because we trust the cancelling distance,\n more than our first born. The possible cases of unc'ble\n modes are\n\n -- one real distinct eigenvalue\n -- two real identical eigenvalues\n -- two complex conjugate eigenvalues\n\n We don't regret this. This is sparta.\n \"\"\"\n\n # If unobservability distance is closer, let it handle first\n if ko >= kc:\n if (sum(blocks_c) == n and kc <= tol):\n Ac_mod, Bc_mod, Cc_mod, kc_mod = Ac, Bc, Cc, kc\n\n while kc_mod <= tol: # Until cancel dist gets big\n Ac_mod, Bc_mod, Cc_mod = (Ac_mod[:-1, :-1],\n Bc_mod[:-1, :],\n Cc_mod[:, :-1])\n\n if Ac_mod.size == 0:\n A, B, C = [(np.empty((1, 0)))]*3\n run_out_of_states = True\n break\n else:\n kc_mod = cancellation_distance(Ac_mod, Bc_mod)[0]\n\n kc = kc_mod\n # Fake an iterable to fool the sum below\n blocks_c = [sum(blocks_c)-Ac_mod.shape[0]]\n\n # Same with the o'ble modes\n if (sum(blocks_o) == n and ko <= tol):\n Ao_mod, Bo_mod, Co_mod, ko_mod = Ao, Bo, Co, ko\n\n while ko_mod <= tol: # Until cancel dist gets big\n Ao_mod, Bo_mod, Co_mod = (Ao_mod[1:, 1:],\n Bo_mod[1:, :],\n Co_mod[:, 1:])\n\n # If there is nothing left, break out everything\n if Ao_mod.size == 0:\n A, B, C = [(np.empty((1, 0)))]*3\n run_out_of_states = True\n break\n else:\n ko_mod = cancellation_distance(Ao_mod, Bo_mod)[0]\n\n ko = ko_mod\n blocks_o = [sum(blocks_o)-Ao_mod.shape[0]]\n\n # ===============End of Extra Check=====================\n\n if run_out_of_states:\n break\n\n if sum(blocks_c) > sum(blocks_o):\n remove_from = 'o'\n elif sum(blocks_c) < sum(blocks_o):\n remove_from = 'c'\n else: # both have the same number of states to be removed\n if kc >= ko:\n remove_from = 'o'\n else:\n remove_from = 'c'\n\n if remove_from == 'c':\n l = int(sum(blocks_c))\n A, B, C = Ac[:l, :l], Bc[:l, :], Cc[:, :l]\n else:\n l = n - int(sum(blocks_o))\n A, B, C = Ao[l:, l:], Bo[l:, :], Co[:, l:]\n\n return A, B, C\n\n\ndef _minimal_realization_transfer(num, den, tol=1e-6):\n '''\n A helper function for obtaining a minimal representation of the\n Transfer() models.\n The method is pretty straightforward; going over the pole/zero pairs\n and removing them if they are either exactly the same or within their\n neigbourhood in 2-norm sense with threshold `tol`.\n '''\n # MIMO or not?\n if isinstance(num, list):\n # Don't touch the original data\n num = deepcopy(num)\n den = deepcopy(den)\n\n # Walk over entries for pole/zero cancellations\n m, p = len(num[0]), len(num)\n for row in range(p):\n for col in range(m):\n (num[row][col],\n den[row][col]) = _minimal_realization_simplify(num[row][col],\n den[row][col],\n tol)\n # It's SISO search directly\n else:\n num, den = _minimal_realization_simplify(num, den, tol)\n\n return num, den\n\n\ndef _minimal_realization_simplify(num, den, tol):\n '''\n This is a simple distance checker between the each root of num and all\n roots of den to see whether there are any pairs that are sufficiently\n close to each other defined by `tol`.\n '''\n # Early exit if numerator is a scalar\n if num.size == 1:\n m = den[0, 0]\n return num/m, den/m\n\n # Get the gain from leading coefficients to work with monic polynomials\n k_gain = num[0, 0]/den[0, 0]\n plz = np.roots(den[0])\n zrz = np.roots(num[0])\n\n # Root finding algorithms are inherently ill-conditioned. Hence it might\n # happen that real multiplicities can turn out as complex pairs, e.g.,\n # np.roots(np.poly([1,2,3,2,3,4]))\n\n # This is a simple walk over zeros checking if there is something close\n # enough to it in the pole list with the slight extra check:\n # If we encounter 3.0 zero vs. 3.0+1e-9i pole, we look for another real\n # 3.0 in the zeros and for the conjugate of the pole and vice versa.\n\n # Zeros (both reals and one element of each complex pairs)\n zrz = np.r_[zrz[np.imag(zrz) == 0.], zrz[np.imag(zrz) > 0.]]\n\n safe_z = []\n\n for z in zrz:\n dist = np.abs(plz-z)\n bool_cz = np.imag(z) > 0\n # Do we have a match ?\n if np.min(dist) < tol + tol*np.abs(z):\n # Get the index and check the complex part\n match_index = np.argmin(dist)\n pz = plz[match_index]\n bool_cp = np.imag(pz) > 0\n\n if bool_cz and bool_cp:\n plz = np.delete(plz, match_index)\n # remove also the conjugate\n del_index, = np.where(plz == np.conj(pz))\n plz = np.delete(plz, del_index[0])\n\n elif bool_cz and not bool_cp:\n # We have a complex pair of zeros and a real pole\n # If there is another entry of this pole then we\n # cancel both of them otherwise we assume a real/real\n # cancellation and convert the other complex zero to real.\n\n # First get rid of the real pole\n plz = np.delete(plz, match_index)\n # Now search another real pole that is also close\n dist = np.abs(plz-z)\n if np.min(dist) < tol + tol*np.abs(z):\n match_index = np.argmin(dist)\n plz = np.delete(plz, match_index)\n else:\n # It was a real/complex cancellation, make the zero real\n safe_z += np.real(z)\n\n elif not bool_cz and bool_cp:\n # Same with above but this time we convert the other pole\n # to catch in the next iteration if another zero exists\n plz = np.delete(plz, match_index)\n conj_index, = np.where(plz == np.conj(pz))\n plz[conj_index[0]] = np.real(plz[conj_index[0]])\n\n else:\n plz = np.delete(plz, match_index)\n\n else:\n safe_z += [z]\n\n if bool_cz:\n safe_z += [np.conj(z)]\n\n return np.atleast_2d(k_gain*np.poly(safe_z)), np.atleast_2d(np.poly(plz))\n", "sub_path": "harold/_system_funcs.py", "file_name": "_system_funcs.py", "file_ext": "py", "file_size_in_byte": 20570, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "_aux_linalg.haroldsvd", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 142, "usage_type": "call"}, {"api_name": "scipy.linalg.block_diag", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.finfo", "line_number": 150, "usage_type": "call"}, {"api_name": "_aux_linalg.matrix_slice", "line_number": 160, "usage_type": "call"}, {"api_name": "_aux_linalg.haroldsvd", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 170, "usage_type": "call"}, {"api_name": "scipy.linalg.block_diag", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 173, "usage_type": "attribute"}, {"api_name": "numpy.c_", "line_number": 173, "usage_type": "attribute"}, {"api_name": "numpy.c_", "line_number": 174, "usage_type": "attribute"}, {"api_name": "scipy.linalg.block_diag", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 176, "usage_type": "call"}, {"api_name": "scipy.linalg.block_diag", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.fliplr", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.flipud", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.flipud", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.fliplr", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.flipud", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.equal", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 270, "usage_type": "attribute"}, {"api_name": "_aux_linalg.e_i", "line_number": 272, "usage_type": "call"}, {"api_name": "numpy.s_", "line_number": 272, "usage_type": "attribute"}, {"api_name": "_aux_linalg.e_i", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.s_", "line_number": 273, "usage_type": "attribute"}, {"api_name": "scipy.linalg.qr", "line_number": 274, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 274, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 274, "usage_type": "attribute"}, {"api_name": "numpy.linalg.eig", "line_number": 275, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 275, "usage_type": "attribute"}, {"api_name": "numpy.linalg.cond", "line_number": 276, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 282, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 284, "usage_type": "call"}, {"api_name": "scipy.linalg.svdvals", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 356, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 401, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 422, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 467, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 468, "usage_type": "call"}, {"api_name": "numpy.roots", "line_number": 498, "usage_type": "call"}, {"api_name": "numpy.roots", "line_number": 499, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 511, "usage_type": "attribute"}, {"api_name": "numpy.imag", "line_number": 511, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 516, "usage_type": "call"}, {"api_name": "numpy.imag", "line_number": 517, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 519, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 519, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 521, "usage_type": "call"}, {"api_name": "numpy.imag", "line_number": 523, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 526, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 528, "usage_type": "call"}, {"api_name": "numpy.conj", "line_number": 528, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 529, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 538, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 540, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 541, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 541, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 542, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 543, "usage_type": "call"}, {"api_name": "numpy.real", "line_number": 546, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 551, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 552, "usage_type": "call"}, {"api_name": "numpy.conj", "line_number": 552, "usage_type": "call"}, {"api_name": "numpy.real", "line_number": 553, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 556, "usage_type": "call"}, {"api_name": "numpy.conj", "line_number": 562, "usage_type": "call"}, {"api_name": "numpy.atleast_2d", "line_number": 564, "usage_type": "call"}, {"api_name": "numpy.poly", "line_number": 564, "usage_type": "call"}]} +{"seq_id": "147745665", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.serialization import normalize_storage_type\nimport torchvision\nfrom torchvision import transforms\nimport time\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1=nn.Conv2d(1, 32, 5, padding=2)\n self.conv2=nn.Conv2d(32, 64, 5, padding=2)\n self.fc1=nn.Linear(64 * 7 * 7, 64)\n self.fc2=nn.Linear(64, 32)\n self.fc3=nn.Linear(32, 10)\n def forward(self,x):\n x = F.max_pool2d(F.relu(self.conv1(x)), 2)\n x = F.max_pool2d(F.relu(self.conv2(x)), 2)\n x=x.view(-1,64*7*7)\n x=F.relu(self.fc1(x))\n x=F.dropout(x, 0.5, training=True)\n x=F.relu(self.fc2(x))\n x=F.softmax(self.fc3(x))\n return x\n\ndef main():\n transform=transforms.Compose([transforms.ToTensor()])\n train_set=torchvision.datasets.MNIST(\n root=\"./data\", train=True, download=True, transform=transform\n )\n train_loader=torch.utils.data.DataLoader(\n train_set, batch_size=64, shuffle=True, num_workers=0\n )\n test_set=torchvision.datasets.MNIST(\n root=\"./data\", train=False,download=True, transform=transform\n )\n test_loader=torch.utils.data.DataLoader(\n test_set, batch_size=4, shuffle=False, num_workers=0\n ) \n \n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n net=Net()\n\n optimizer = optim.SGD(net.parameters(), lr=0.01)\n criterion = nn.CrossEntropyLoss()\n net.to(device)\n net.train()\n \n torch.cuda.synchronize()\n ts = time.time()\n for epoch in range(10):\n for batch_idx,(data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output=net(data)\n loss=criterion(output, target)\n loss.backward()\n optimizer.step()\n\n if(batch_idx + 1) % 100 == 0:\n print(\n f\"Epoch: {epoch} | Batch: {batch_idx+1} | Loss: {loss.item():.6f}\"\n )\n\n torch.cuda.synchronize()\n print(f\"Train time {time.time() - ts:.2f}\")\n\n net.eval()\n correct_count=0\n torch.cuda.synchronize()\n ts=time.time()\n with torch.no_grad():\n for data,target in test_loader:\n data, target = data.to(device), target.to(device)\n output=net(data)\n pred=output.argmax(dim=1, keepdim=True)\n correct_count+=pred.eq(target.view_as(pred)).sum().item()\n torch.cuda.synchronize()\n print(f\"\\nTest time{time.time() - ts:.2f}\")\n print(\n f\"Accuracy: {100.0 * correct_count / len(test_loader.dataset)}%({correct_count}/{len(test_loader.dataset)})\" \n )\n \n torch.save(net.state_dict(), \"./exr.pt\")\n\n net_ = Net()\n net_.load_state_dict(torch.load(\"./exr.pt\"))\n net_tsp = torch.jit.script(net_)\n net_tsp.save(\"./exr_tsp.pt\")\n\n\nif __name__==\"__main__\":\n main()\n", "sub_path": "train_ffn.py", "file_name": "train_ffn.py", "file_ext": "py", "file_size_in_byte": 3002, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "torch.nn.Module", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 10, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.functional.max_pool2d", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn.functional.max_pool2d", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.functional.dropout", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.functional.softmax", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 25, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 29, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 29, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 29, "usage_type": "call"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 30, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 30, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 33, "usage_type": "attribute"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 36, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 39, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 43, "usage_type": "attribute"}, {"api_name": "torch.optim.SGD", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.cuda.synchronize", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 52, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.cuda.synchronize", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 68, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.cuda.synchronize", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 73, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.cuda.synchronize", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 81, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.jit.script", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.jit", "line_number": 91, "usage_type": "attribute"}]} +{"seq_id": "54343762", "text": "import matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\nfrom sklearn.datasets import load_iris\nfrom sklearn.datasets import make_blobs\nimport tensorflow as tf\nprint('tensorflow version', tf.__version__)\nif int(tf.__version__.split('.')[0]) >= 2:\n from tensorflow import keras\nelse:\n import keras\nfrom keras.datasets import mnist\nfrom keras.utils import to_categorical\n\n\n\ndef read_data_MNIST():\n # MNISTの手書き数字データセット(学習用と評価用に分割済)\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n \n # MNISTの手書き数字のデータを表示\n fig = plt.figure(figsize=(9, 15))\n fig.subplots_adjust(left=0, right=1, bottom=0, top=0.5, hspace=0.05, wspace=0.05)\n for i in range(9):\n ax = fig.add_subplot(1, 9, i + 1, xticks=[], yticks=[])\n # 各MNIST画像の上に(タイトルとして)対応するラベルを表示\n ax.set_title(str(y_train[i]))\n ax.imshow(x_train[i], cmap='gray')\n \n # 名��尺度をone-hot表現に変換\n # 入力画像を行列(28x28)からベクトル(長さ784)に変換\n x_train = x_train.reshape(-1, 784) / 255.\n x_test = x_test.reshape(-1, 784) / 255.\n \n # 名義尺度の値をone-hot表現へ変換\n y_train = to_categorical(y_train)\n y_test = to_categorical(y_test)\n\n return x_train, y_train, x_test, y_test\n \n\n \ndef read_data_Iris():\n iris = load_iris()\n X = iris.data\n Y = iris.target\n \n iris_data = pd.DataFrame(X,columns=['Sepal Length','Sepal Width','Petal Length','Petal Width'])\n iris_target = pd.DataFrame(Y,columns=['Species'])\n \n df_iris = pd.concat([iris_data,iris_target],axis=1)\n \n # 可視化\n sns.pairplot(df_iris,hue='Species',size=2)\n sns.countplot('Petal Length',data=df_iris,hue='Species')\n \n return df_iris\n\n\n\ndef make_data_blobs():\n X, Y = make_blobs(n_samples=500, centers=4, random_state=8, cluster_std=2.4)\n \n # 表示\n plt.figure(figsize=(11,7))\n plt.scatter(X[:, 0], X[:, 1], c=Y, s=50, cmap='jet')\n plt.grid()\n plt.show()\n \n return X, Y\n \n\n\ndef read_data_01():\n X = np.loadtxt('data/data_quality.txt', delimiter=',')\n \n # データプロット\n plt.figure(figsize=(11,7))\n plt.title('data01')\n plt.scatter(X[:,0], X[:,1], marker='o', facecolors='none', edgecolors='black', s=80) \n x_min, x_max = X[:, 0].min()-1, X[:, 0].max()+1 \n y_min, y_max = X[:, 1].min()-1, X[:, 1].max()+1\n plt.xlim(x_min, x_max) \n plt.ylim(y_min, y_max) \n plt.xticks(()) \n plt.yticks(()) \n plt.show()\n \n return X[:,0], X[:,1]\n\n\n\ndef read_data_02():\n X = np.loadtxt('data/data_clustering.txt', delimiter=',')\n \n # データプロット\n plt.figure(figsize=(11,7))\n plt.title('data02')\n plt.scatter(X[:,0], X[:,1], marker='o', facecolors='none', edgecolors='black', s=80) \n x_min, x_max = X[:, 0].min()-1, X[:, 0].max()+1 \n y_min, y_max = X[:, 1].min()-1, X[:, 1].max()+1\n plt.xlim(x_min, x_max) \n plt.ylim(y_min, y_max) \n plt.xticks(()) \n plt.yticks(()) \n plt.show()\n \n return X[:,0], X[:,1]\n\n\n\ndef read_data_03():\n data = np.loadtxt('data/knnsample.csv', delimiter=',')\n X = data[:, :2]\n Y = data[:, 2:].reshape(data.shape[0])\n \n # データプロット\n plt.figure(figsize=(11,7))\n plt.title('data03')\n plt.scatter(X[:,0], X[:,1], marker='o', facecolors='none', edgecolors='black', s=80) \n x_min, x_max = X[:, 0].min()-1, X[:, 0].max()+1 \n y_min, y_max = X[:, 1].min()-1, X[:, 1].max()+1\n plt.xlim(x_min, x_max) \n plt.ylim(y_min, y_max) \n plt.xticks(()) \n plt.yticks(()) \n plt.show()\n \n return X, Y\n\n\n\ndef read_data_04():\n data = np.loadtxt('data/OCSVM_sample.csv', delimiter=',')\n \n # データプロット\n plt.figure(figsize=(11,7))\n plt.title('data04')\n plt.scatter(data[:,0], data[:,1], marker='o', facecolors='none', edgecolors='black', s=80) \n x_min, x_max = data[:, 0].min()-1, data[:, 0].max()+1 \n y_min, y_max = data[:, 1].min()-1, data[:, 1].max()+1\n plt.xlim(x_min, x_max) \n plt.ylim(y_min, y_max) \n plt.xticks(()) \n plt.yticks(()) \n plt.show()\n \n return data\n\n\n\ndef read_data_05():\n x = np.loadtxt('data/Cumulative_sum.csv', delimiter=',')\n plt.plot(x)\n \n return x\n\n\n \ndef make_random_sin():\n # ダミーデータ生成\n x_train = 10 * np.random.rand(100)\n # サイン波にノイズをのせる関数\n def sin_model(x, sigma=0.2):\n noise = sigma * np.random.randn(len(x))\n return np.sin(5 * x) + np.sin(0.5 * x) + noise\n \n # xからyを計算\n y_train = sin_model(x_train)\n \n # 表示\n plt.figure(figsize=(11,7))\n plt.errorbar(x_train, y_train, 0.1, fmt='o')\n \n # テスト用データ生成\n x_test = np.linspace(0, 10, 1000)\n y_test = sin_model(x_test, 0)\n \n return x_train, y_train, x_test, y_test\n \n\n\ndef make_random_XY():\n # ダミーデータ生成\n X = 0.3 * np.random.randn(100, 2)\n # 外れ値生成\n ANOMALY_DATA_COUNT = 20\n X_outliers = np.random.uniform(low=-4, high=4, size=(ANOMALY_DATA_COUNT, 2))\n X = np.r_[X + 2, X - 2, X_outliers]\n \n return X[:,0], X[:,1]\n\n\n\ndef electro_cardiogram():\n data = np.loadtxt(\"data/qtdbsel102.txt\",delimiter=\"\\t\")\n\n return data\n\n\n\ndef read_imdb():\n imdb = keras.datasets.imdb\n (train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)\n \n print(\"Training entries: {}, labels: {}\".format(len(train_data), len(train_labels)))\n \n # A dictionary mapping words to an integer index\n word_index = imdb.get_word_index()\n # The first indices are reserved\n word_index = {k:(v+3) for k,v in word_index.items()}\n word_index[\"\"] = 0\n word_index[\"\"] = 1\n word_index[\"\"] = 2 # unknown\n word_index[\"\"] = 3\n \n reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])\n \n def decode_review(text):\n return ' '.join([reverse_word_index.get(i, '?') for i in text])\n \t\n decode_review(train_data[0])\n \n train_data = keras.preprocessing.sequence.pad_sequences(train_data,\n value=word_index[\"\"],\n padding='post',\n maxlen=256)\n \n test_data = keras.preprocessing.sequence.pad_sequences(test_data,\n value=word_index[\"\"],\n padding='post',\n maxlen=256)\n \n return train_data, test_data, train_labels, test_labels, reverse_word_index\n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "data/read_data.py", "file_name": "read_data.py", "file_ext": "py", "file_size_in_byte": 6876, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "tensorflow.__version__", "line_number": 8, "usage_type": "attribute"}, {"api_name": "tensorflow.__version__.split", "line_number": 9, "usage_type": "call"}, {"api_name": "tensorflow.__version__", "line_number": 9, "usage_type": "attribute"}, {"api_name": "keras.datasets.mnist.load_data", "line_number": 20, "usage_type": "call"}, {"api_name": "keras.datasets.mnist", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "keras.utils.to_categorical", "line_number": 37, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 38, "usage_type": "call"}, {"api_name": "sklearn.datasets.load_iris", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 49, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 52, "usage_type": "call"}, {"api_name": "seaborn.pairplot", "line_number": 55, "usage_type": "call"}, {"api_name": "seaborn.countplot", "line_number": 56, "usage_type": "call"}, {"api_name": "sklearn.datasets.make_blobs", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "numpy.loadtxt", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "numpy.loadtxt", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "numpy.loadtxt", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}, {"api_name": "numpy.loadtxt", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "numpy.loadtxt", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "numpy.random.rand", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 163, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 166, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 173, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.errorbar", "line_number": 174, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 174, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 186, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 189, "usage_type": "attribute"}, {"api_name": "numpy.r_", "line_number": 190, "usage_type": "attribute"}, {"api_name": "numpy.loadtxt", "line_number": 197, "usage_type": "call"}, {"api_name": "keras.datasets", "line_number": 204, "usage_type": "attribute"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 225, "usage_type": "call"}, {"api_name": "keras.preprocessing", "line_number": 225, "usage_type": "attribute"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 230, "usage_type": "call"}, {"api_name": "keras.preprocessing", "line_number": 230, "usage_type": "attribute"}]} +{"seq_id": "157152847", "text": "#!/usr/bin/env python3\nimport PIL\nfrom helper import *\nimport cv2\nimport torch\nimport torchvision\n\ndef overlay_class_names(image, save_path, predictions, class_dict):\n \"\"\"\n Adds detected class names and scores in the positions defined by the\n top-left corner of the predicted bounding box\n Arguments:\n image (np.ndarray): an image as returned by OpenCV\n predictions (BoxList): the result of the computation by the model.\n It should contain the field `scores` and `labels`.\n \"\"\"\n scores = predictions[\"scores\"].tolist()\n labels = predictions[\"labels\"].tolist()\n a = dict(sorted(class_dict.items(), key=lambda item: item[1]))\n keys = list(a.keys())\n labels = [keys[int(i) - 1] for i in labels]\n boxes = predictions['boxes']\n template = \"{}: {:.2f}\"\n for box, score, label in zip(boxes, scores, labels):\n x, y = box[:2]\n s = template.format(label, score)\n cv2.putText(image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1)\n\n cv2.imwrite(save_path, image)\n\n return image\n\n\ndef compute_colors_for_labels(labels, palette=None):\n \"\"\"\n Simple function that adds fixed colors depending on the class\n \"\"\"\n if palette is None:\n palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])\n colors = labels[:, None] * palette\n colors = (colors % 255).numpy().astype(\"uint8\")\n return colors\n\n\ndef overlay_boxes(image, save_path, predictions):\n \"\"\"\n Adds the predicted boxes on top of the image\n Arguments:\n image (np.ndarray): an image as returned by OpenCV\n predictions (BoxList): the result of the computation by the model.\n It should contain the field `labels`.\n \"\"\"\n labels = predictions[\"labels\"]\n boxes = predictions['boxes']\n colors = compute_colors_for_labels(labels).tolist()\n for box, color in zip(boxes, colors):\n box = box.to(torch.int64)\n top_left, bottom_right = box[:2].tolist(), box[2:].tolist()\n image = cv2.rectangle(\n image, tuple(top_left), tuple(bottom_right), tuple(color), 1\n )\n\n cv2.imwrite(save_path, image)\n\n return image\n\n\ndef overlay_mask(image, save_path, predictions):\n \"\"\"\n Adds the instances contours for each predicted object.\n Each label has a different color.\n Arguments:\n image (np.ndarray): an image as returned by OpenCV\n predictions (BoxList): the result of the computation by the model.\n It should contain the field `mask` and `labels`.\n \"\"\"\n masks = predictions[\"masks\"].ge(0.5).mul(255).byte().numpy()\n labels = predictions[\"labels\"]\n colors = compute_colors_for_labels(labels).tolist()\n for mask, color in zip(masks, colors):\n thresh = mask[0, :, :, None]\n contours, hierarchy = cv2.findContours(\n thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE\n )\n image = cv2.drawContours(image, contours, -1, color, 3)\n\n composite = image\n\n cv2.imwrite(save_path, composite)\n\n return composite\n\ndef select_top_predictions(predictions, threshold):\n idx = (predictions[\"scores\"] > threshold).nonzero().squeeze(1)\n new_predictions = {}\n for k, v in predictions.items():\n new_predictions[k] = v[idx]\n return new_predictions\n\ndef main(num_classes,model_path,img_path,save_folder):\n\n model = get_model_instance_segmentation(num_classes)\n # device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n device = torch.device('cpu')\n model.load_state_dict(torch.load(model_path, map_location=device))\n model.to(device)\n model.eval()\n #predict\n image = PIL.Image.open(img_path)\n image_tensor = torchvision.transforms.functional.to_tensor(image)\n image_tensor = image_tensor.to(device)\n output = model([image_tensor])\n top_predictions = select_top_predictions(output[0], 0.5)\n top_predictions = {k: v.cpu() for k, v in top_predictions.items()}\n #print result\n print (\"precition: \", top_predictions)\n #visulize\n cv_img = np.array(image)\n\n #mkdir for sub folders\n (filepath, tempfilename) = os.path.split(img_path)\n os.makedirs(os.path.join(save_folder,'box'), exist_ok=True)\n os.makedirs(os.path.join(save_folder, 'mask'), exist_ok=True)\n save_path_mask = os.path.join(save_folder,'mask',tempfilename)\n save_path_box = os.path.join(save_folder,'box',tempfilename)\n result = overlay_boxes(cv_img, save_path_box, top_predictions)\n result = overlay_mask(result, save_path_mask, top_predictions)\n\ndef main_folder(num_classes,model_path,img_folder,save_folder):\n for i in os.listdir(img_folder)[:50]:\n try:\n main(num_classes, model_path, os.path.join(img_folder,i), save_folder)\n print (\"<<< None:\n menu()\n\n\ndef menu() -> None:\n print('======================================')\n print('=========== Bem-vindo(a) =============')\n print('=========== Matheus Shop =============')\n print('======================================')\n\n print('\\nSelecione uma das opções abaixo:')\n print('\\t1 - Cadastrar produto')\n print('\\t2 - Listar produtos')\n print('\\t3 - Comprar produtos')\n print('\\t4 - Visualizar carrinho')\n print('\\t5 - Fechar pedido')\n print('\\t6 - Sair do sistema')\n\n opcao: int = int(input('Opção: '))\n\n # Opções redirecionando para as funções\n if opcao == 1:\n cadastrar_produto()\n elif opcao == 2:\n listar_produtos()\n elif opcao == 3:\n comprar_produto()\n elif opcao == 4:\n visualizar_carrinho()\n elif opcao == 5:\n fechar_pedido()\n elif opcao == 6:\n print(\"Volte sempre!\")\n sleep(2)\n exit(0)\n else:\n print('Opção inválida!')\n sleep(1)\n menu()\n\n\ndef cadastrar_produto() -> None:\n print('Cadastro de produto')\n print('===================')\n\n nome: str = input(\"Informe o nome do produto: \")\n preco: float = float(input(\"Informe o preco do produto: \"))\n\n # Instancia de produto\n produto: Produto = Produto(nome, preco)\n\n # Adicionando a lista de produtos\n produtos.append(produto)\n\n print(f'O produto {produto.nome} foi cadastrado com sucesso')\n sleep(2)\n\n # Volta para o menu\n menu()\n\n\ndef listar_produtos() -> None:\n # Listar somente se tiver produtos\n if len(produtos) > 0:\n print('Listagem de produtos')\n print('====================')\n\n for produto in produtos:\n print(produto)\n print('---------------')\n sleep(1)\n else:\n print('Ainda não existem produtos cadastrados')\n\n # Volta para o menu\n sleep(2)\n menu()\n\n\ndef comprar_produto() -> None:\n # Verifica se existe produtos na lista\n if len(produtos) > 0:\n print(\"Informe o código do produto que deseja adicionar ao carrinho: \")\n print('--------------------------------------------------------------')\n # Mostrar código de produtos\n print(\"================= Produtos Disponíveis =======================\")\n\n for produto in produtos:\n print(produto) # print reescrito com o __str__ -> class Produto\n print('----------------------------------------------------------')\n sleep(1)\n\n codigo: int = int(input(\"\\nCódigo: \"))\n\n produto: Produto = pega_produto_por_codigo(codigo)\n\n # Verifica se o produto existe na lista de produtos\n if produto:\n # Verifica se o produto já está no carrinho\n if len(carrinho) > 0:\n # Verifica se o produto ja esta no carrinho e só incrementa a quantidade\n tem_no_carrinho: bool = False\n\n for item in carrinho:\n quant: int = item.get(produto) # Valor em um dicionario (chave:valor)\n if quant:\n item[produto] = quant + 1\n print(f'O produto {produto.nome} agora possui {quant+1} unidades no carrinho')\n tem_no_carrinho = True\n sleep(2)\n menu()\n # Produto ainda nao esta no carrinho\n if not tem_no_carrinho:\n prod = {produto: 1}\n carrinho.append(prod)\n print(f'O produto {produto.nome} foi adicionado ao carrinho')\n sleep(2)\n menu()\n else:\n item = {produto: 1}\n carrinho.append(item)\n print(f'O produto {produto.nome} foi adicionado ao carrinho')\n sleep(2)\n menu()\n else:\n print(f\"O produto com código {codigo} não foi encontrado\")\n sleep(2)\n menu()\n\n else:\n print(\"Ainda não existem produtos para vender\")\n\n sleep(2)\n menu()\n\n\ndef visualizar_carrinho() -> None:\n if len(carrinho) > 0:\n print(\"Produtos no carrinho: \")\n\n for item in carrinho:\n for dados in item.items():\n print(dados[0])\n print(f'Quantidade: {dados[1]}')\n print('-------------------------')\n sleep(1)\n\n else:\n print(\"Ainda não existem produtos no carrinho\")\n\n sleep(2)\n menu()\n\n\ndef fechar_pedido() -> None:\n # Fechar pedido se o carrinho tiver algo\n if len(carrinho) > 0:\n # Somatório do valor total\n valor_total: float = 0\n\n print('Produtos do carrinho')\n for item in carrinho:\n for dados in item.items(): # Items -> Chave:Valor\n print(dados[0]) # Posição 0 -> Chave -> Produto\n print(f'Quantidade: {dados[1]}') # Posicao 1 -> Quantidade\n valor_total += dados[0].preco * dados[1]\n print('---------------')\n sleep(1)\n\n # Mostra o valor da fatura\n print(f'Sua fatura é {formata_float_str_moeda(valor_total)}')\n\n # Despede\n print('Volte sempre!')\n\n # Limpa o carrinho\n carrinho.clear()\n sleep(5)\n else:\n print('Ainda não existem produtos no carrinho')\n\n # Executa o menu novamente\n sleep(2)\n menu()\n\n\ndef pega_produto_por_codigo(codigo: int) -> Produto:\n p: Produto = None\n\n for produto in produtos:\n if produto.codigo == codigo:\n p = produto\n\n # Retorna o produto\n return p\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "src/mercado.py", "file_name": "mercado.py", "file_ext": "py", "file_size_in_byte": 5931, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "typing.List", "line_number": 8, "usage_type": "name"}, {"api_name": "models.produto.Produto", "line_number": 8, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 11, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 11, "usage_type": "name"}, {"api_name": "models.produto.Produto", "line_number": 11, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 47, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 51, "usage_type": "call"}, {"api_name": "models.produto.Produto", "line_number": 63, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 69, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 84, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 89, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 104, "usage_type": "call"}, {"api_name": "models.produto.Produto", "line_number": 108, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 123, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 130, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 136, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 140, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 146, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 159, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 164, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 181, "usage_type": "call"}, {"api_name": "utils.helper.formata_float_str_moeda", "line_number": 184, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 191, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 196, "usage_type": "call"}, {"api_name": "models.produto.Produto", "line_number": 201, "usage_type": "name"}, {"api_name": "models.produto.Produto", "line_number": 200, "usage_type": "name"}]} +{"seq_id": "311394895", "text": "__author__ = 'imalkov'\n\nimport os, sys\nimport ast\nfrom argparse import ArgumentParser\nfrom configparser import ConfigParser\n\nDEBUG_FLAG = False\n\ndef expose_mdlexe(f):\n def wrapper(*args):\n print('calling {0} with {1}'.format(type(args[0]).__name__, args[1]))\n i = 2\n if isinstance(args[0], MdlExecutor):\n i = 0\n print('state: {0}'.format(type(args[i].state).__name__))\n return f(*args)\n return wrapper\n\ndef runcmd(cmd, kls):\n global DEBUG_FLAG\n if DEBUG_FLAG is True:\n return\n sys.stdout.flush()\n print('cmd: {0}'.format(cmd))\n proc = os.popen(cmd)\n while True:\n line = proc.readline()\n if line != '':\n print(line.strip())\n sys.stdout.flush()\n else:\n print(\"------------ completed -----------\")\n break\n exec_state = proc.close()\n if exec_state != None and exec_state != 256:\n raise Exception('{0} fail with {1}'.format(type(kls).__name__, exec_state))\n\nclass EnvState:\n @expose_mdlexe\n def process(self, remaining_arr, mdl_executor):\n cmd = 'python3 modelenv/mdlenv.py -c'\n runcmd(cmd, EnvState)\n if remaining_arr != [] and remaining_arr != None:\n state = remaining_arr[0]\n if state == 'run':\n mdl_executor.state = TestState()\n elif state == 'stat':\n mdl_executor.state = StatisticsState()\n elif state == 'plot':\n mdl_executor.state = GraphState()\n elif state == 'end':\n return None\n else:\n raise NotImplemented('{0}: state not found', self.__name__)\n remaining_arr = remaining_arr[1:]\n return remaining_arr\n\nclass TestState:\n @expose_mdlexe\n def process(self, remaining_arr, mdl_executor):\n cmd = 'python3 modelexe/mdlexec.py -m Test -f {0}'.format(mdl_executor._exec_log)\n runcmd(cmd, self)\n mdl_executor.state = PecubeState()\n return remaining_arr\n\nclass PecubeState:\n @expose_mdlexe\n def process(self, remaining_arr, mdl_executor):\n cmd = 'python3 modelexe/mdlexec.py -m Pecube -f {0}'.format(mdl_executor._exec_log)\n runcmd(cmd, self)\n mdl_executor.state = VtkState()\n return remaining_arr\n\nclass VtkState:\n @expose_mdlexe\n def process(self, remaining_arr, mdl_executor):\n cmd = 'python3 modelexe/mdlexec.py -m Vtk -f {0}'.format(mdl_executor._exec_log)\n runcmd(cmd, self)\n if remaining_arr != [] and remaining_arr != None:\n state = remaining_arr[0]\n if state == 'stat':\n mdl_executor.state = StatisticsState()\n elif state == 'plot':\n mdl_executor.state = GraphState()\n elif state == 'end':\n return None\n else:\n raise NotImplemented('{0}: state not found', self.__name__)\n remaining_arr = remaining_arr[1:]\n return remaining_arr\n\nclass CsvState:\n @expose_mdlexe\n def process(self, remaining_arr, mdl_executor):\n pass\n\nclass GraphState:\n @expose_mdlexe\n def process(self, remaining_arr, mdl_executor):\n data_ = ast.literal_eval(mdl_executor.defaults['sub_data'])\n l = [n.strip() for n in ast.literal_eval(data_)]\n data_path = mdl_executor.defaults['data_root'].replace('~', os.environ['HOME'])\n age_pic = mdl_executor.defaults['age_pic'].replace('~', os.environ['HOME'])\n geotherm_pic = mdl_executor.defaults['geoterm_pic'].replace('~', os.environ['HOME'])\n for node_path in [os.path.join(data_path, n) for n in l]:\n #convert files\n cmd = 'python3 modelanalysis/pecanalysis.py -i {0} -c'.format(node_path)\n runcmd(cmd,self)\n #create age-elevation\n cmd = 'python3 modelanalysis/pecanalysis.py -i {0} -o {1} -e'.format(node_path, age_pic)\n runcmd(cmd,self)\n #create temperature\n cmd = 'python3 modelanalysis/pecanalysis.py -i {0} -o {1} -tp'.format(node_path, geotherm_pic)\n runcmd(cmd,self)\n\n if remaining_arr !=[] and remaining_arr != None:\n state = remaining_arr[0]\n if state == 'stat':\n mdl_executor.state = StatisticsState()\n elif state == 'end':\n return None\n remaining_arr = remaining_arr[1:]\n return remaining_arr\n\nclass StatisticsState:\n @expose_mdlexe\n def process(self, remaining_arr, mdl_executor):\n data_ = ast.literal_eval(mdl_executor.defaults['sub_data'])\n l = [n.strip() for n in ast.literal_eval(data_)]\n data_path = mdl_executor.defaults['data_root'].replace('~', os.environ['HOME'])\n for node_path in [os.path.join(data_path,n) for n in l]:\n cmd = 'python3 modelanalysis/pecanalysis.py -i {0} -o {0} -ta -d {1}'.format(node_path, mdl_executor.analysis['esc_dist'])\n runcmd(cmd, self)\n cmd = 'find {1} -name Age-Elevation0.csv | sort | xargs {0} -n 5 > {1}/age-elevation-stats-{0}.txt'.format('head', node_path)\n runcmd(cmd, self)\n\n if remaining_arr !=[] and remaining_arr != None:\n state = remaining_arr[0]\n if state == 'plot':\n mdl_executor.state = GraphState()\n elif state == 'end':\n return None\n remaining_arr = remaining_arr[1:]\n return remaining_arr\n\nclass InitState:\n @expose_mdlexe\n def process(self, remaining_arr, mdl_executor):\n if remaining_arr !=[] and remaining_arr != None:\n strt = remaining_arr[0]\n if strt == 'env':\n mdl_executor.state = EnvState()\n elif strt == 'run':\n mdl_executor.state = TestState()\n elif strt == 'stat':\n mdl_executor.state = StatisticsState()\n elif strt == 'plot':\n mdl_executor.state = GraphState()\n else:\n raise NotImplemented('{0}: state not found', self.__name__)\n remaining_arr = remaining_arr[1:]\n return remaining_arr\n\nclass MdlExecutor:\n def __init__(self, states_arr, config, analysis):\n self.states_arr = states_arr\n self.state = InitState()\n self.defaults = config\n self.analysis = analysis\n self._exec_log = '{0}/log_{1}.txt'.format(os.environ['HOME'], os.getpid())\n\n @expose_mdlexe\n def process(self, remaining_arr):\n remaining = self.state.process(remaining_arr, self)\n if remaining != [] and remaining != None:\n self.process(remaining)\n\n def start(self):\n self.process(self.states_arr)\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument( \"-l\", dest=\"states_list\", help=\"list of states to be executed: env, run, plot, stat\", default= '[]')\n parser.add_argument( \"-d\", action=\"store_true\", dest=\"debug\", help=\"debug purpose\", default= False)\n\n kvargs = parser.parse_args()\n sl = [n.strip() for n in ast.literal_eval(kvargs.states_list)] + ['end']\n config = ConfigParser()\n config.read('model.conf')\n DEBUG_FLAG = kvargs.debug\n\n mlexe = MdlExecutor(sl, config['Default'], config['Analysis'])\n mlexe.start()\n\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 7272, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "sys.stdout.flush", "line_number": 24, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.popen", "line_number": 26, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 31, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 31, "usage_type": "attribute"}, {"api_name": "ast.literal_eval", "line_number": 101, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 102, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 103, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 104, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 105, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "ast.literal_eval", "line_number": 129, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 130, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 131, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path", "line_number": 132, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 171, "usage_type": "attribute"}, {"api_name": "os.getpid", "line_number": 171, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 183, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 188, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 189, "usage_type": "call"}]} +{"seq_id": "601168246", "text": "\r\nimport numpy as np \r\nimport tensorflow as tf\r\nimport torch\r\nimport torchvision\r\nimport torchvision.models as models\r\n\r\ndef conv2d(c, **kwargs):\r\n x = kwargs['inp']\r\n padding = 'VALID'\r\n if c.padding[0] != 0:\r\n psize = c.padding[0]\r\n x = tf.pad(x, [[0, 0], [psize, psize], [psize, psize], [0, 0]], \"CONSTANT\")\r\n filters = c.out_channels\r\n size = c.kernel_size\r\n parameters = [p for p in c.parameters()]\r\n W = parameters[0].data.numpy()\r\n if len(parameters) > 1:\r\n b = parameters[1].data.numpy()\r\n\r\n W = np.transpose(W, [2,3,1,0])\r\n\r\n wi = tf.constant_initializer(W)\r\n if len(parameters) > 1:\r\n bi = tf.constant_initializer(b)\r\n Wt = tf.get_variable('weights', shape=W.shape, initializer=wi)\r\n\r\n if len(parameters) > 1:\r\n bt = tf.get_variable('bias',shape=b.shape,initializer=bi)#,\r\n\r\n x = tf.nn.conv2d(x, Wt, [1, c.stride[0], c.stride[1], 1], padding)\r\n if len(parameters) > 1:\r\n x = tf.nn.bias_add(x,bt)\r\n return x\r\n\r\ndef relu(c, **kwargs):\r\n return tf.nn.relu(kwargs['inp'])\r\n\r\ndef max_pool(c, **kwargs):\r\n x = kwargs['inp']\r\n padding = 'VALID'\r\n if c.padding != 0:\r\n psize = c.padding\r\n x = tf.pad(x, [[0, 0], [psize, psize], [psize, psize], [0, 0]], \"CONSTANT\")\r\n x = tf.nn.max_pool(x, [1,c.kernel_size,c.kernel_size,1], strides=[1,c.stride,c.stride,1], padding=padding)\r\n return x\r\n\r\ndef avg_pool(c, **kwargs):\r\n x = kwargs['inp']\r\n padding = 'VALID'\r\n if c.padding!= 0:\r\n psize = c.padding\r\n x = tf.pad(x, [[0, 0], [psize, psize], [psize, psize], [0, 0]], \"CONSTANT\")\r\n x = tf.nn.avg_pool(kwargs['inp'],[1,c.kernel_size,c.kernel_size,1],strides=[1,c.stride,c.stride,1],padding=padding)\r\n return x\r\n\r\ndef dropout(c, **kwargs):\r\n return tf.layers.dropout(kwargs['inp'], rate=c.p, training=kwargs['train_flag']) #is_train,TODO\r\n\r\ndef batch_norm(c, **kwargs):\r\n beta = c.bias.data.numpy()\r\n beta = tf.constant_initializer(beta)\r\n gamma = c.weight.data.numpy()\r\n gamma = tf.constant_initializer(gamma)\r\n moving_mean = c.running_mean.data.numpy()\r\n moving_mean = tf.constant_initializer(moving_mean)\r\n moving_var = c.running_var.data.numpy()\r\n moving_var = tf.constant_initializer(moving_var)\r\n return tf.layers.batch_normalization(kwargs['inp'], epsilon=1e-5, beta_initializer=beta, gamma_initializer=gamma, \r\n moving_mean_initializer=moving_mean, moving_variance_initializer=moving_var, training=kwargs['train_flag'])\r\n\r\ndef linear(c, **kwargs):\r\n x = tf.reshape(tf.transpose(kwargs['inp'], [0, 3, 1, 2]), [-1, 2048])\r\n kernal = c.weight.data.numpy()\r\n kernal = tf.constant(kernal)\r\n bias = c.bias.data.numpy()\r\n bias = tf.constant(bias)\r\n return tf.nn.bias_add(tf.matmul(x, kernal, transpose_b=True), bias)\r\n\r\ndef bottleneck_module(c, **kwargs):\r\n with tf.variable_scope(\"bottleneck\"):\r\n residual = kwargs['inp']\r\n train_flag = kwargs['train_flag']\r\n with tf.variable_scope(\"conv1\"):\r\n x = conv2d(c.conv1, inp=kwargs['inp'])\r\n x = batch_norm(c.bn1, inp=x, train_flag=train_flag)\r\n x = tf.nn.relu(x)\r\n with tf.variable_scope(\"conv2\"):\r\n x = conv2d(c.conv2, inp=x)\r\n x = batch_norm(c.bn2, inp=x, train_flag=train_flag)\r\n x = tf.nn.relu(x)\r\n with tf.variable_scope(\"conv3\"):\r\n x = conv2d(c.conv3, inp=x)\r\n x = batch_norm(c.bn3, inp=x, train_flag=train_flag)\r\n if c.downsample is not None:\r\n for d in c.downsample.children():\r\n residual = type_lookups[d.__class__](d, inp=residual, train_flag=train_flag)\r\n x += residual\r\n x = tf.nn.relu(x)\r\n return x\r\n\r\ndef seq_container(c, **kwargs):\r\n x = kwargs['inp']\r\n train_flag = kwargs['train_flag']\r\n for c2 in enumerate(c.children()):\r\n c2_class = c2[1].__class__\r\n if c2_class in type_lookups:\r\n with tf.variable_scope('layer' + str(c2[0])):\r\n x = type_lookups[c2_class](c2[1], inp=x, train_flag=train_flag)\r\n else:\r\n unknown_class(c2[1])\r\n print(c2_class)\r\n return x\r\n\r\ntype_lookups = {}\r\ntype_lookups[torch.nn.modules.conv.Conv2d] = conv2d\r\ntype_lookups[torch.nn.modules.activation.ReLU] = relu\r\ntype_lookups[torch.nn.modules.container.Sequential] = seq_container\r\ntype_lookups[torch.nn.modules.pooling.MaxPool2d] = max_pool\r\ntype_lookups[torch.nn.modules.pooling.AvgPool2d] = avg_pool\r\ntype_lookups[torch.nn.modules.dropout.Dropout] = dropout\r\ntype_lookups[torchvision.models.resnet.Bottleneck] = bottleneck_module\r\ntype_lookups[torch.nn.modules.batchnorm.BatchNorm2d] = batch_norm\r\ntype_lookups[torch.nn.modules.linear.Linear] = linear\r\n\r\ndef convert(input_image, is_training):\r\n model = models.resnet50(pretrained=True)\r\n with tf.variable_scope(\"resnet50\", reuse=tf.AUTO_REUSE):\r\n for idx, c in enumerate(model.children()):\r\n if idx == 0:\r\n with tf.variable_scope(\"conv1\"):\r\n x = type_lookups[c.__class__](c, inp=input_image)\r\n elif idx in [1, 2, 3]:\r\n with tf.variable_scope(\"conv1\"):\r\n x = type_lookups[c.__class__](c, inp=x, train_flag=is_training)\r\n elif idx in [4, 5, 6, 7]:\r\n with tf.variable_scope(\"conv%d\"%(idx-2)):\r\n x = type_lookups[c.__class__](c, inp=x, train_flag=is_training)\r\n # elif idx == 8:\r\n # with tf.variable_scope(\"avg_pool\"):\r\n # x = type_lookups[c.__class__](c, inp=x)\r\n else:\r\n break\r\n return x\r\n", "sub_path": "GAN/torch2tf.py", "file_name": "torch2tf.py", "file_ext": "py", "file_size_in_byte": 5273, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "tensorflow.pad", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.constant_initializer", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.constant_initializer", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.get_variable", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.get_variable", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.nn.conv2d", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.bias_add", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 33, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tensorflow.pad", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.nn.max_pool", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tensorflow.pad", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.nn.avg_pool", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.dropout", "line_number": 58, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 58, "usage_type": "attribute"}, {"api_name": "tensorflow.constant_initializer", "line_number": 62, "usage_type": "call"}, {"api_name": "tensorflow.constant_initializer", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.constant_initializer", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.constant_initializer", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 69, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.transpose", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.nn.bias_add", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 78, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 84, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 87, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 87, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 91, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 91, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 92, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 99, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 99, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 116, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 117, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 118, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 119, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 120, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 121, "usage_type": "attribute"}, {"api_name": "torchvision.models", "line_number": 122, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 123, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 124, "usage_type": "attribute"}, {"api_name": "torchvision.models.resnet50", "line_number": 127, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 127, "usage_type": "name"}, {"api_name": "tensorflow.variable_scope", "line_number": 128, "usage_type": "call"}, {"api_name": "tensorflow.AUTO_REUSE", "line_number": 128, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 131, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 134, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "210748382", "text": "from ShelterSmartHome.apps.lead_node.models import LeadNode\nfrom ShelterSmartHome.apps.slave_node.models import SlaveNode\nfrom ShelterSmartHome.apps.eventprocessor.models import EventProcess\nimport datetime\n\n\ndef send_command_to_execute(slave_channel):\n slave = SlaveNode.objects.get(name=slave_channel)\n return {\"state\": \"go\",\n \"path\": slave.path,\n slave.conn_type_interface: slave.data,\n \"date\": str(datetime.datetime.now())[:19]}\n\n\ndef eventprocessor(json_input):\n \n for key in json_input.keys(): # ----- Cheking Data Base for registered Paths and Interface -------\n \n if LeadNode.objects.filter(conn_type_interface=key).exists() and LeadNode.objects.filter(path=json_input['path']).exists():\n \n for lead_channel in LeadNode.objects.filter(conn_type_interface=key).values_list('name', flat=True):\n\n for event_name in EventProcess.objects.filter(lead_channel=lead_channel).values_list('name', flat=True):\n \n for slave_channel in EventProcess.objects.filter(name=event_name).values_list('slave_channel', flat=True):\n event = EventProcess.objects.get(name=event_name)\n \n \"\"\"Checking ON/OFF Events Status\"\"\"\n if event.status == 1:\n type_activation_event = event.type_activation_event\n range_threshold_activity = event.range_threshold_activity\n \n # ------- Checking json values for registered values in events data base -------\n if type_activation_event == 'parse':\n to_find = str(range_threshold_activity)\n result = str(json_input[key]).find(to_find)\n if result > -1:\n yield send_command_to_execute(slave_channel)\n \n if type_activation_event == 'static_string':\n if range_threshold_activity == str(json_input[key]):\n yield send_command_to_execute(slave_channel)\n \n if type_activation_event == 'static_value':\n if int(range_threshold_activity) == int(json_input[key]):\n yield send_command_to_execute(slave_channel)\n \n if type_activation_event == 'dynamic_range_value':\n rta = range_threshold_activity.split(',')\n if int(json_input[key]) in range(int(rta[0].split(':')[1]), int(rta[1].split(':')[1])):\n yield send_command_to_execute(slave_channel)\n else:\n try:\n if LeadNode.objects.filter(conn_type_interface=key).exists() and \\\n LeadNode.objects.filter(path=json_input['path']).exists():\n yield LeadNode.objects.get(conn_type_interface=key).name, key, json_input[key], str(datetime.datetime.now())[:19]\n except:\n yield \"Unknown data\", key, json_input[key], str(datetime.datetime.now())[:19] ", "sub_path": "ShelterSmartHome/apps/eventprocessor/eventprocessor.py", "file_name": "eventprocessor.py", "file_ext": "py", "file_size_in_byte": 3377, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "ShelterSmartHome.apps.slave_node.models.SlaveNode.objects.get", "line_number": 8, "usage_type": "call"}, {"api_name": "ShelterSmartHome.apps.slave_node.models.SlaveNode.objects", "line_number": 8, "usage_type": "attribute"}, {"api_name": "ShelterSmartHome.apps.slave_node.models.SlaveNode", "line_number": 8, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "attribute"}, {"api_name": "ShelterSmartHome.apps.lead_node.models.LeadNode.objects.filter", "line_number": 19, "usage_type": "call"}, {"api_name": "ShelterSmartHome.apps.lead_node.models.LeadNode.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "ShelterSmartHome.apps.lead_node.models.LeadNode", "line_number": 19, "usage_type": "name"}, {"api_name": "ShelterSmartHome.apps.lead_node.models.LeadNode.objects.filter", "line_number": 21, "usage_type": "call"}, {"api_name": "ShelterSmartHome.apps.lead_node.models.LeadNode.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "ShelterSmartHome.apps.lead_node.models.LeadNode", "line_number": 21, "usage_type": "name"}, {"api_name": "ShelterSmartHome.apps.eventprocessor.models.EventProcess.objects.filter", "line_number": 23, "usage_type": "call"}, {"api_name": "ShelterSmartHome.apps.eventprocessor.models.EventProcess.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "ShelterSmartHome.apps.eventprocessor.models.EventProcess", "line_number": 23, "usage_type": "name"}, {"api_name": "ShelterSmartHome.apps.eventprocessor.models.EventProcess.objects.filter", "line_number": 25, "usage_type": "call"}, {"api_name": "ShelterSmartHome.apps.eventprocessor.models.EventProcess.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "ShelterSmartHome.apps.eventprocessor.models.EventProcess", "line_number": 25, "usage_type": "name"}, {"api_name": "ShelterSmartHome.apps.eventprocessor.models.EventProcess.objects.get", "line_number": 26, "usage_type": "call"}, {"api_name": "ShelterSmartHome.apps.eventprocessor.models.EventProcess.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "ShelterSmartHome.apps.eventprocessor.models.EventProcess", "line_number": 26, "usage_type": "name"}, {"api_name": "ShelterSmartHome.apps.lead_node.models.LeadNode.objects.filter", "line_number": 54, "usage_type": "call"}, {"api_name": "ShelterSmartHome.apps.lead_node.models.LeadNode.objects", "line_number": 54, "usage_type": "attribute"}, {"api_name": "ShelterSmartHome.apps.lead_node.models.LeadNode", "line_number": 54, "usage_type": "name"}, {"api_name": "ShelterSmartHome.apps.lead_node.models.LeadNode.objects.filter", "line_number": 55, "usage_type": "call"}, {"api_name": "ShelterSmartHome.apps.lead_node.models.LeadNode.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "ShelterSmartHome.apps.lead_node.models.LeadNode", "line_number": 55, "usage_type": "name"}, {"api_name": "ShelterSmartHome.apps.lead_node.models.LeadNode.objects.get", "line_number": 56, "usage_type": "call"}, {"api_name": "ShelterSmartHome.apps.lead_node.models.LeadNode.objects", "line_number": 56, "usage_type": "attribute"}, {"api_name": "ShelterSmartHome.apps.lead_node.models.LeadNode", "line_number": 56, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 56, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 56, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 58, "usage_type": "attribute"}]} +{"seq_id": "252460187", "text": "import re\nimport urllib\nfrom urlparse import urlparse, parse_qs\n\nfrom twisted.internet.defer import inlineCallbacks\n\nfrom vumi.transports import Transport\nfrom vumi.utils import http_request_full\nfrom vumi import log\n\n\n## Outgoing only transport, will generate a URL replacing element of the to_addr field\nclass ForwardHttp(Transport):\n \n def setup_transport(self):\n log.msg(\"Setup forward http transport %s\" % self.config)\n self.message_replacement = self.config['message_replacement']\n self.compile_replacement(self.message_replacement)\n self.message_metadata_replacement = self.config['message_metadata_replacement']\n self.compile_replacement(self.message_metadata_replacement)\n\n def compile_replacement(self, replacements={}):\n for field, regex_txt in replacements.iteritems():\n replacements[field] = re.compile(regex_txt)\n\n def replace_arguments_get_url(self, dictionnary, replacement_rules, url):\n for field, regex in replacement_rules.iteritems():\n try:\n url = regex.sub(urllib.quote(dictionnary[field]), url)\n except:\n pass\n return url\n \n @inlineCallbacks\n def handle_outbound_message(self, message):\n log.msg(\"Outboung message to be processed %s\" % repr(message))\n try:\n url = message['to_addr']\n url = self.replace_arguments_get_url(message, self.message_replacement, url)\n url = self.replace_arguments_get_url(message['transport_metadata'], self.message_metadata_replacement, url)\n url = urlparse(url)\n params = parse_qs(url.query)\n for key, [param] in params.iteritems():\n params[key] = param\n forward_url = \"%s://%s%s?%s\" % (url.scheme, url.netloc, url.path, urllib.urlencode(params))\n \n log.msg('Hitting %s' % forward_url)\n \n response = yield http_request_full(\n forward_url.encode('ASCII', 'replace'),\n \"\",\n {'User-Agent': ['Vusion ForwardHttp Transport'],\n 'Content-Type': ['application/json,charset=UTF-8']},\n 'GET')\n \n if response.code != 200:\n log.msg(\"Http Error %s: %s\"\n % (response.code, response.delivered_body))\n yield self.publish_delivery_report(\n user_message_id=message['message_id'],\n delivery_status='failed',\n failure_level='http',\n failure_code=response.code,\n failure_reason=response.delivered_body,\n transport_metadata={'transport_type':'http_forward'})\n return\n \n yield self.publish_ack(\n user_message_id=message['message_id'],\n sent_message_id=message['message_id'],\n transport_metadata={'transport_type': 'http_forward'})\n except Exception as ex:\n log.msg(\"Unexpected error %s\" % repr(ex))\n yield self.publish_delivery_report(\n user_message_id=message['message_id'],\n delivery_status='failed',\n failure_level='transport',\n failure_code=None,\n failure_reason=repr(ex),\n transport_metadata={'transport_type':'http_forward'})\n\n def stopWorker(self):\n log.msg(\"stop forward http transport\")", "sub_path": "transports/forward_http.py", "file_name": "forward_http.py", "file_ext": "py", "file_size_in_byte": 3489, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "vumi.transports.Transport", "line_number": 13, "usage_type": "name"}, {"api_name": "vumi.log.msg", "line_number": 16, "usage_type": "call"}, {"api_name": "vumi.log", "line_number": 16, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 24, "usage_type": "call"}, {"api_name": "urllib.quote", "line_number": 29, "usage_type": "call"}, {"api_name": "vumi.log.msg", "line_number": 36, "usage_type": "call"}, {"api_name": "vumi.log", "line_number": 36, "usage_type": "name"}, {"api_name": "urlparse.urlparse", "line_number": 41, "usage_type": "call"}, {"api_name": "urlparse.parse_qs", "line_number": 42, "usage_type": "call"}, {"api_name": "urllib.urlencode", "line_number": 45, "usage_type": "call"}, {"api_name": "vumi.log.msg", "line_number": 47, "usage_type": "call"}, {"api_name": "vumi.log", "line_number": 47, "usage_type": "name"}, {"api_name": "vumi.utils.http_request_full", "line_number": 49, "usage_type": "call"}, {"api_name": "vumi.log.msg", "line_number": 57, "usage_type": "call"}, {"api_name": "vumi.log", "line_number": 57, "usage_type": "name"}, {"api_name": "vumi.log.msg", "line_number": 73, "usage_type": "call"}, {"api_name": "vumi.log", "line_number": 73, "usage_type": "name"}, {"api_name": "twisted.internet.defer.inlineCallbacks", "line_number": 34, "usage_type": "name"}, {"api_name": "vumi.log.msg", "line_number": 83, "usage_type": "call"}, {"api_name": "vumi.log", "line_number": 83, "usage_type": "name"}]} +{"seq_id": "198496284", "text": "import sys\nimport pprint\nimport collections \nsys.stdin = open('톱니바퀴.txt','r')\n \nwheels = []\nturns = []\n \ndef turnLeft(i, d):\n if i < 0:\n return\n \n if wheels[i][2] != wheels[i+1][6]:\n turnLeft(i-1, -d)\n wheels[i].rotate(d)\n \ndef turnRight(i, d):\n if i > 3:\n return\n \n if wheels[i][6] != wheels[i-1][2]:\n turnRight(i+1, -d)\n wheels[i].rotate(d)\n \ndef solve():\n for turn in turns:\n [idx, direction] = turn\n \n turnLeft(idx-1, -direction)\n turnRight(idx+1, -direction)\n \n wheels[idx].rotate(direction)\n\nfor i in range(4):\n wheels.append(collections.deque(list(sys.stdin.readline())[:8]))\n\nK = int(sys.stdin.readline())\n\nfor i in range(K):\n v1, v2 = map(int, sys.stdin.readline().split())\n turns.append([v1-1, v2])\n\nsolve()\nsumVal = 0\nfor i, wheel in enumerate(wheels):\n sumVal += int(wheel[0]) * (1 << i)\nprint(sumVal)", "sub_path": "9월/0919/톱니바퀴3.py", "file_name": "톱니바퀴3.py", "file_ext": "py", "file_size_in_byte": 925, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "sys.stdin", "line_number": 4, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 35, "usage_type": "call"}, {"api_name": "sys.stdin.readline", "line_number": 35, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 35, "usage_type": "attribute"}, {"api_name": "sys.stdin.readline", "line_number": 37, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 37, "usage_type": "attribute"}, {"api_name": "sys.stdin.readline", "line_number": 40, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 40, "usage_type": "attribute"}]} +{"seq_id": "307117848", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport math\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom tkinter import *\r\nfrom tkinter import messagebox\r\n\r\n\r\n\r\nCONTROL_POINTS_33 = [5.269,5.561,6.462,6.787,9.220,9.421,9.747,9.783,\r\n 9.808, 12.425, 12.604, 13.512, 14.224]\r\n\r\ndef func_33(x):\r\n return round(np.exp(x) + x * np.log10(x) + x,3)\r\n\r\ndef get_finite_diff(y):\r\n diff = []\r\n for i in range(1, len(y)):\r\n diff.append(round(y[i] - y[i - 1],3))\r\n return diff\r\n\r\n\r\ndef get_finite_diff_matrix(y):\r\n diffs = [y]\r\n next_diff = y\r\n\r\n for i in range(len(y) - 1):\r\n next_diff = get_finite_diff(next_diff)\r\n diffs.append(next_diff)\r\n return diffs\r\n\r\n\r\n\r\n\r\ndef gauss(x_list,y_list,x):\r\n pos_0 = int(len(y_list)/2)\r\n x_0 = x_list[pos_0]\r\n print(\"pos_middle\",pos_0)\r\n h = x_list[1] - x_list[0]\r\n res = y_list[pos_0] #y0\r\n print(\"first \", res)\r\n q = (x - x_0) / h\r\n print(\"q = \", round(q,3))\r\n\r\n finite_diffs = get_finite_diff_matrix(y_list)\r\n n = len(finite_diffs)\r\n print(finite_diffs[0])\r\n print(finite_diffs[1])\r\n pos_0 = int(len(finite_diffs[1])/2)\r\n first_iter = finite_diffs[1][pos_0] * q #q*delta(y_0)\r\n res+=first_iter\r\n print(\"first =\", first_iter)\r\n print(\"res 0 + 1 = \", res)\r\n for i in range(2,n):\r\n print(\" iteration:\", i)\r\n print(finite_diffs[i])\r\n pos_0 = int(len(finite_diffs[i])/2)\r\n print(\"pos:\",pos_0)\r\n y_0 = finite_diffs[i][pos_0]\r\n print(\"delta y = \", y_0)\r\n if (i%2==0):\r\n tmp = (g_m(i, q + i/2) * y_0) / math.factorial(i)\r\n else:\r\n tmp = (g_m(i, q + i/2 + 0.5) * y_0) / math.factorial(i)\r\n\r\n res += tmp\r\n print(\"tmp = \", round(tmp,3))\r\n return res\r\n\r\n\r\ndef g_m(m,q):\r\n res = 1\r\n for i in range(1,m+1):\r\n tmp = q-i\r\n res *= tmp\r\n print(i,tmp)\r\n print(\"g_m = \", res)\r\n return res\r\n\r\n\r\ndef start():\r\n res = {}\r\n for i in range(len(CONTROL_POINTS_33)):\r\n tmp = CONTROL_POINTS_33[i]\r\n print(\"Value =\", tmp)\r\n x_start = int(tmp)\r\n x_end = x_start + 1\r\n h = 0.2\r\n x_list = np.arange(x_start, x_end,h).tolist()\r\n y_list = []\r\n for i in range(len(x_list)):\r\n y_list.append(func_33(x_list[i]))\r\n y = func_33(tmp)\r\n res[y] = round(gauss(x_list, y_list, tmp),3)\r\n print(res)\r\n messagebox.showinfo(\"Результат за завданням\", res)\r\n\r\ndef plot():\r\n res = {}\r\n mas = []\r\n y_plot = []\r\n for i in range(len(CONTROL_POINTS_33)):\r\n y_plot.append(func_33(CONTROL_POINTS_33[i]))\r\n for i in range(len(CONTROL_POINTS_33)):\r\n tmp = CONTROL_POINTS_33[i]\r\n print(\"Value =\", tmp)\r\n x_start = int(tmp)\r\n x_end = x_start + 1\r\n h = 0.2\r\n x_list = np.arange(x_start, x_end, h).tolist()\r\n y_list = []\r\n for i in range(len(x_list)):\r\n y_list.append(func_33(x_list[i]))\r\n y = func_33(tmp)\r\n res[y] = round(gauss(x_list, y_list, tmp), 3)\r\n mas.append(res[y])\r\n\r\n plt.figure()\r\n plt.plot(CONTROL_POINTS_33, y_plot, '-b', label='Задані точки')\r\n plt.plot(CONTROL_POINTS_33, mas, '*r', label='Перша інтерполяційна формула Гаусса')\r\n plt.xlabel('Значення х')\r\n plt.ylabel('Значення y')\r\n plt.title('Перша інтерполяційна формула Гаусса')\r\n plt.show()\r\n\r\ndef start_data():\r\n x_list = np.arange(x_start.get(), x_end.get(), h.get()).tolist()\r\n y_list = []\r\n for i in range(len(x_list)):\r\n y_list.append(func_33(x_list[i]))\r\n key = x.get()\r\n y = func_33(key)\r\n res = {}\r\n res[y] = round(gauss(x_list, y_list, key), 3)\r\n messagebox.showinfo(\"Результат за заданими данними\", res)\r\n\r\nroot = Tk()\r\nroot.title(\"Перша інтерполяційна формула Гаусса\")\r\nroot.geometry(\"400x300+300+250\")\r\nx_start = DoubleVar()\r\nx_end = DoubleVar()\r\nh = DoubleVar()\r\nx = DoubleVar()\r\nx_start_label = Label(text=\"Початковий x:\")\r\nx_end_label = Label(text=\"Кінцевий x:\")\r\nh_label = Label(text=\"Введіть крок\")\r\nx_label = Label(text='Введіть х')\r\n\r\nx_start_label.grid(row=0, column=0, sticky=\"w\")\r\nx_end_label.grid(row=1, column=0, sticky=\"w\")\r\nh_label.grid(row=2, column=0, sticky=\"w\")\r\nx_label.grid(row=3, column=0, sticky=\"w\")\r\n\r\nx_start_entry = Entry(textvariable=x_start)\r\nx_end_entry = Entry(textvariable=x_end)\r\nh_entry = Entry(textvariable=h)\r\nx_entry = Entry(textvariable=x)\r\n\r\nx_start_entry.grid(row=0, column=1, padx=5, pady=5)\r\nx_end_entry.grid(row=1, column=1, padx=5, pady=5)\r\nh_entry.grid(row=2, column=1, padx=5, pady=5)\r\nx_entry.grid(row=3, column=1, padx=5, pady=5)\r\n\r\nresult_data = Button(text=\"Обрахувати\", command=start_data)\r\nresult_data.grid(row=4, column=1, padx=5, pady=5, sticky=\"e\")\r\n\r\nresult = Button(text=\"Завдання\", command=start)\r\nresult.grid(row=5, column=1, padx=5, pady=5, sticky=\"e\")\r\n\r\nplot = Button(text=\"Графік\", command=plot)\r\nplot.grid(row=6, column=1, padx=5, pady=5, sticky=\"e\")\r\n\r\nroot.mainloop()", "sub_path": "courses/2/gauss.py", "file_name": "gauss.py", "file_ext": "py", "file_size_in_byte": 5239, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "numpy.exp", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 15, "usage_type": "call"}, {"api_name": "math.factorial", "line_number": 63, "usage_type": "call"}, {"api_name": "math.factorial", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 90, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 97, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 97, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 128, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 136, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 136, "usage_type": "name"}]} +{"seq_id": "5576862", "text": "import re\nimport collections\n\npattern1 = re.compile('\\[(.+)\\]')\npattern2 = re.compile('#(\\d+)')\n\nwith open('input.txt') as file:\n tracker = {}\n guard_ids = {}\n lines = file.read().splitlines()\n for line in lines:\n matches = pattern1.search(line)\n if matches:\n # print(\"{}\".format(matches.group(1)))\n tracker[matches.group(1)] = line\n matches = pattern2.search(line)\n if matches:\n # print(\"{}\".format(matches.group(1)))\n guard_ids[matches.group(1)] = 1\n sorted_tracker = collections.OrderedDict(sorted(tracker.items()))\n for k, v in sorted_tracker.items():\n print(\"{}\".format(v))\n # matches = pattern2.search(v)\n # if matches:\n # # print(\"{}\".format(matches.group(1)))\n # guard_id = matches.group(1)\n # guard_ids[guard_id] = []\n # # print(\"{} {}\".format(k, guard_id))\n # else:\n # #\n", "sub_path": "day_4_1.py", "file_name": "day_4_1.py", "file_ext": "py", "file_size_in_byte": 954, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "re.compile", "line_number": 4, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 5, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "54944194", "text": "import base64\nimport json\nimport logging\nfrom functools import wraps\n\nfrom aiohttp import web\n\n\nlog = logging.getLogger(__name__)\n\n\nclass User(object):\n\n def __init__(self, user_id):\n self.user_id = user_id\n # this is a hack to get rid of DB\n self.username = user_id.replace('_', ' ').title()\n\n def __repr__(self):\n return \"\".format(self.user_id)\n\n\nclass Connection(object):\n\n def __init__(self, connection_id):\n self.connection_id = connection_id\n\n def __repr__(self):\n return \"\".format(self.connection_id)\n\n\nclass Request(object):\n\n def __init__(self, auth, app, *,\n request_id=None, connection_id, **_unused):\n self.request_id = request_id\n self.connection = Connection(connection_id)\n self.app = app\n if auth:\n kind, value = auth.split(' ')\n assert kind == 'Tangle'\n auth = json.loads(\n base64.b64decode(value.encode('ascii')).decode('utf-8'))\n self.user = User(**auth)\n\n def __repr__(self):\n return \"\".format(\n getattr(self, 'user', self.connection))\n\n\n\ndef swindon_convention(f):\n @wraps(f)\n async def swindon_call_method(request):\n req = None\n try:\n metadata, args, kwargs = await request.json()\n req = Request(request.headers.get(\"Authorization\"),\n request.app, **metadata)\n result = await f(req, *args, **kwargs)\n return web.json_response(result)\n except Exception as e:\n log.exception(\"Error for %r\", req or request, exc_info=e)\n raise\n return swindon_call_method\n\n", "sub_path": "examples/presence/presence/convention.py", "file_name": "convention.py", "file_ext": "py", "file_size_in_byte": 1718, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 42, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 43, "usage_type": "call"}, {"api_name": "aiohttp.web.json_response", "line_number": 61, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 61, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "119054770", "text": "# Copyright (c) Yugabyte, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under the License\n# is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing permissions and limitations\n# under the License.\n\nfrom sys_detection import local_sys_conf\n\nfrom typing import Optional\n\nfrom yb.os_versions import is_compatible_os\n\n\ndef _llvm_url_for_tag(tag: str) -> str:\n return 'https://github.com/yugabyte/build-clang/releases/download/%s/yb-llvm-%s.tar.gz' % (\n tag, tag)\n\n\nCOMPILER_TYPE_TO_ARCH_TO_OS_TYPE_TO_LLVM_URL = {\n 'clang11': {\n 'x86_64': {\n 'centos7': _llvm_url_for_tag('v11.1.0-yb-1-1633099975-130bd22e-centos7-x86_64'),\n 'almalinux8': _llvm_url_for_tag('v11.1.0-yb-1-1633143292-130bd22e-almalinux8-x86_64'),\n },\n 'aarch64': {\n 'centos8': _llvm_url_for_tag('v11.1.0-yb-1-1633544021-130bd22e-centos8-aarch64'),\n },\n },\n 'clang12': {\n 'x86_64': {\n 'centos7': _llvm_url_for_tag('v12.0.1-yb-1-1633099823-bdb147e6-centos7-x86_64'),\n 'almalinux8': _llvm_url_for_tag('v12.0.1-yb-1-1633143152-bdb147e6-almalinux8-x86_64'),\n },\n 'aarch64': {\n 'almalinux8': _llvm_url_for_tag('v12.0.1-yb-1-1648458260-bdb147e6-almalinux8-aarch64'),\n },\n }\n}\n\n\ndef get_llvm_url(compiler_type: str) -> Optional[str]:\n os_type_to_llvm_url = (\n COMPILER_TYPE_TO_ARCH_TO_OS_TYPE_TO_LLVM_URL.get(compiler_type) or {}\n ).get(local_sys_conf().architecture)\n if os_type_to_llvm_url is None:\n return None\n\n os_type = local_sys_conf().short_os_name_and_version()\n if os_type in os_type_to_llvm_url:\n return os_type_to_llvm_url[os_type]\n\n candidate_urls = [\n os_type_to_llvm_url[os_type_key]\n for os_type_key in os_type_to_llvm_url\n if is_compatible_os(os_type_key, os_type)\n ]\n if len(candidate_urls) > 1:\n raise ValueError(\"Ambiguous LLVM URLs: %s\" % candidate_urls)\n if not candidate_urls:\n return None\n candidate_url = candidate_urls[0]\n return candidate_url\n", "sub_path": "python/yb/llvm_urls.py", "file_name": "llvm_urls.py", "file_ext": "py", "file_size_in_byte": 2433, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "sys_detection.local_sys_conf", "line_number": 50, "usage_type": "call"}, {"api_name": "sys_detection.local_sys_conf", "line_number": 54, "usage_type": "call"}, {"api_name": "yb.os_versions.is_compatible_os", "line_number": 61, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "406586456", "text": "#!/usr/bin/env python\nimport commands\nimport thread\nimport wx\nimport os\nfrom PTDBManager import PTDBManager\n\nclass PTCommand:\n __instance = None\n\n def __init__(self):\n pass\n\n def __new__(cls, *args, **kwargs):\n if cls.__instance == None:\n cls.__instance=object.__new__(cls, *args, **kwargs)\n return cls.__instance\n\n def publishModule(self, module, logCallback, completeCallback):\n thread.start_new_thread(self.publishModuleWithShell, (module, logCallback, completeCallback))\n\n def publishModuleWithShell(self, module, logCallback, completeCallback):\n codeRepoInfo = PTDBManager().getCodeRepo(module.codeRepoId)\n specRepoInfo = PTDBManager().getSpecRepo(module.specRepoId)\n if codeRepoInfo != None and specRepoInfo != None:\n moduelRemotePath = os.path.join(codeRepoInfo.remotePath, module.name)\n svnCopyToTag = \"svn copy %s/trunk %s/tags/%s -m \\\"release to %s\\\"\" % (moduelRemotePath, moduelRemotePath, module.localVersion, module.localVersion)\n self.logCommand(svnCopyToTag, logCallback)\n copyRet, copyOutput = commands.getstatusoutput(svnCopyToTag)\n self.logOutput(copyRet, copyOutput, logCallback)\n if copyRet == 0:\n podPush = \"cd %s; /usr/local/bin/pod repo-svn push %s %s.podspec\" % (\n module.localPath, specRepoInfo.name, module.name)\n self.logCommand(podPush, logCallback)\n pushRet, pushOutput = commands.getstatusoutput(podPush)\n self.logOutput(pushRet, pushOutput, logCallback)\n\n if pushRet == 0:\n wx.CallAfter(logCallback, \"publish module %s successfully!!!\\n\" % module.name)\n wx.CallAfter(completeCallback, True, module)\n else:\n wx.CallAfter(logCallback, \"push %s's podspec to repo failed!!!\\n\" % module.name)\n wx.CallAfter(completeCallback, False, module)\n else:\n wx.CallAfter(logCallback, \"copy module %s trunk to tags error!!!\\n\" % module.name)\n wx.CallAfter(completeCallback, False, module)\n else:\n wx.CallAfter(logCallback, \"podspec repo not exist!!!\\n\")\n wx.CallAfter(completeCallback, False, module)\n\n def addSpecRepo(self, specRepo, logCallback, completeCallback):\n thread.start_new_thread(self.addSpecRepoWithShell, (specRepo, logCallback, completeCallback))\n\n def addSpecRepoWithShell(self, specRepo, logCallback, completeCallback):\n addSpecRepo = \"/usr/local/bin/pod repo-svn add %s %s\" % (specRepo.name, specRepo.remotePath)\n self.logCommand(addSpecRepo, logCallback)\n copyRet, copyOutput = commands.getstatusoutput(addSpecRepo)\n self.logOutput(copyRet, copyOutput, logCallback)\n wx.CallAfter(completeCallback, specRepo)\n\n # Check pod command\n def checkPodCommand(self, logCallback, completeCallback):\n thread.start_new_thread(self.checkPodCommandWithShell, (logCallback, completeCallback))\n\n def checkPodCommandWithShell(self, logCallback, completeCallback):\n testPod = \"cd $HOME; /usr/local/bin/pod --version\"\n self.logCommand(testPod, logCallback)\n copyRet, copyOutput = commands.getstatusoutput(testPod)\n self.logOutput(copyRet, copyOutput, logCallback)\n wx.CallAfter(completeCallback, True)\n\n # log caller\n def logCommand(self, command, callback):\n wx.CallAfter(callback, \"%s\\n\" % command)\n\n def logOutput(self, stats, output, callback):\n wx.CallAfter(callback, \"status : %s\\n\" % stats)\n if len(output) > 0:\n wx.CallAfter(callback, \"%s\\n\" % output)", "sub_path": "PTCommand.py", "file_name": "PTCommand.py", "file_ext": "py", "file_size_in_byte": 3710, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "thread.start_new_thread", "line_number": 20, "usage_type": "call"}, {"api_name": "PTDBManager.PTDBManager", "line_number": 23, "usage_type": "call"}, {"api_name": "PTDBManager.PTDBManager", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "commands.getstatusoutput", "line_number": 29, "usage_type": "call"}, {"api_name": "commands.getstatusoutput", "line_number": 35, "usage_type": "call"}, {"api_name": "wx.CallAfter", "line_number": 39, "usage_type": "call"}, {"api_name": "wx.CallAfter", "line_number": 40, "usage_type": "call"}, {"api_name": "wx.CallAfter", "line_number": 42, "usage_type": "call"}, {"api_name": "wx.CallAfter", "line_number": 43, "usage_type": "call"}, {"api_name": "wx.CallAfter", "line_number": 45, "usage_type": "call"}, {"api_name": "wx.CallAfter", "line_number": 46, "usage_type": "call"}, {"api_name": "wx.CallAfter", "line_number": 48, "usage_type": "call"}, {"api_name": "wx.CallAfter", "line_number": 49, "usage_type": "call"}, {"api_name": "thread.start_new_thread", "line_number": 52, "usage_type": "call"}, {"api_name": "commands.getstatusoutput", "line_number": 57, "usage_type": "call"}, {"api_name": "wx.CallAfter", "line_number": 59, "usage_type": "call"}, {"api_name": "thread.start_new_thread", "line_number": 63, "usage_type": "call"}, {"api_name": "commands.getstatusoutput", "line_number": 68, "usage_type": "call"}, {"api_name": "wx.CallAfter", "line_number": 70, "usage_type": "call"}, {"api_name": "wx.CallAfter", "line_number": 74, "usage_type": "call"}, {"api_name": "wx.CallAfter", "line_number": 77, "usage_type": "call"}, {"api_name": "wx.CallAfter", "line_number": 79, "usage_type": "call"}]} +{"seq_id": "445581993", "text": "import time\nfrom datetime import datetime\nfrom typing import Any, Dict, Optional\n\nimport requests\nfrom requests_oauthlib import OAuth2Session\n\n_API_HOST = 'https://api.aife.economie.gouv.fr/dila/legifrance-beta/lf-engine-app'\n_TOKEN_URL = 'https://oauth.aife.economie.gouv.fr/api/oauth/token'\n\n\ndef _get_legifrance_client(client_id: str, client_secret: str) -> OAuth2Session:\n data = {\n 'grant_type': 'client_credentials',\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'scope': 'openid',\n }\n response = requests.post(_TOKEN_URL, data=data)\n if 200 <= response.status_code < 300:\n token = response.json()\n client = OAuth2Session(client_id, token=token)\n return client\n raise LegifranceRequestError(f'Error when retrieving token: {response.json()}')\n\n\nHOUR = 60 * 60\n\n\ndef _extract_response_content(response: requests.Response) -> Dict:\n if 200 <= response.status_code < 300:\n return response.json()\n raise LegifranceRequestError(\n f'Request has status_code {response.status_code} and content {response.content.decode()}'\n )\n\n\ndef _request_consult_law_decree(cid: str, date: datetime, client: OAuth2Session) -> requests.Response:\n json_ = {'date': int(date.timestamp()) * 1000, 'textId': cid}\n url = _API_HOST + '/consult/lawDecree'\n return client.post(url, json=json_)\n\n\ndef _article_by_id(article_id: str, client: OAuth2Session) -> Dict:\n json_ = {'id': article_id}\n url = _API_HOST + '/consult/getArticle'\n response = client.post(url, json=json_)\n return _extract_response_content(response)\n\n\ndef _consult_law_decree(cid: str, date: Optional[datetime], client: OAuth2Session) -> Dict:\n return _extract_response_content(_request_consult_law_decree(cid, date or datetime.now(), client))\n\n\nclass LegifranceClient:\n def __init__(self, client_id: str, client_secret: str):\n \"\"\"\n Initialize Legifrance client with client_id and client_secret.\n\n Parameters\n ----------\n client_id: str\n client_id provided by Legifrance\n client_secret: str\n client_secret provided by Legifrance\n \"\"\"\n self._client_id: str = client_id\n self._client_secret: str = client_secret\n self._client: OAuth2Session = _get_legifrance_client(client_id, client_secret)\n self._last_compute_time = time.time()\n\n def __exit__(self, *args):\n self._client.close()\n\n def _update_client_if_necessary(self) -> None:\n elapsed = time.time() - self._last_compute_time\n if elapsed >= HOUR:\n self._last_compute_time = time.time()\n self._client = _get_legifrance_client(self._client_id, self._client_secret)\n\n def consult_law_decree(self, text_id: str, date: Optional[datetime] = None) -> Dict[str, Any]:\n \"\"\"\n Fetches the version of a law/decree/arrete by text identifier for a specific date.\n\n Parameters\n ----------\n text_id: str\n Identifier of the text\n date: Optional[datetime]\n Date of the version to retrieve. Default to datetime.now()\n \"\"\"\n self._update_client_if_necessary()\n return _consult_law_decree(text_id, date, self._client)\n\n def consult_article(self, article_id: str) -> Dict[str, Any]:\n \"\"\"\n Fetches article by id\n\n Parameters\n ----------\n article_id: str\n Identifier of the article\n \"\"\"\n self._update_client_if_necessary()\n return _article_by_id(article_id, self._client)\n\n\nclass LegifranceRequestError(Exception):\n pass\n", "sub_path": "leginorma/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 3626, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "requests.post", "line_number": 19, "usage_type": "call"}, {"api_name": "requests_oauthlib.OAuth2Session", "line_number": 22, "usage_type": "call"}, {"api_name": "requests_oauthlib.OAuth2Session", "line_number": 12, "usage_type": "name"}, {"api_name": "requests.Response", "line_number": 30, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 30, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 38, "usage_type": "name"}, {"api_name": "requests_oauthlib.OAuth2Session", "line_number": 38, "usage_type": "name"}, {"api_name": "requests.Response", "line_number": 38, "usage_type": "attribute"}, {"api_name": "requests_oauthlib.OAuth2Session", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 51, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 51, "usage_type": "name"}, {"api_name": "requests_oauthlib.OAuth2Session", "line_number": 51, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 52, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 52, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 51, "usage_type": "name"}, {"api_name": "requests_oauthlib.OAuth2Session", "line_number": 69, "usage_type": "name"}, {"api_name": "time.time", "line_number": 70, "usage_type": "call"}, {"api_name": "time.time", "line_number": 76, "usage_type": "call"}, {"api_name": "time.time", "line_number": 78, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 81, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 81, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 81, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 81, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 95, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 95, "usage_type": "name"}]} +{"seq_id": "128493911", "text": "'''\n@Author: your name\n@Date: 2020-05-25 10:31:35\n@LastEditTime: 2020-05-25 10:53:10\n@LastEditors: Please set LastEditors\n@Description: In User Settings Edit\n@FilePath: /dataprocess/transform/rowconcat-neu.py\n'''\nimport pandas as pd\nimport argparse\nimport os\nimport shutil\nimport random\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--up_data_dir\", type=str)\nparser.add_argument(\"--down_data_dir\", type=str)\nparser.add_argument(\"--output_data_dir\", type=str)\nparser.add_argument(\"--cols\", type=str, default=None)\nparser.add_argument(\"--target\", type=str, default=None)\nargs = parser.parse_args()\n\nif os.path.exists(os.path.join(args.up_data_dir,'train.csv')) and os.path.exists(os.path.join(args.down_data_dir,'train.csv')):\n mode = 'train'\n up_dataset = pd.read_csv(os.path.join(args.up_data_dir,'train.csv'))\n down_dataset = pd.read_csv(os.path.join(args.down_data_dir,'train.csv'))\nelif os.path.exists(os.path.join(args.up_data_dir,'val.csv')) and os.path.exists(os.path.join(args.down_data_dir,'val.csv')):\n mode = 'val'\n up_dataset = pd.read_csv(os.path.join(args.up_data_dir,'val.csv'))\n down_dataset = pd.read_csv(os.path.join(args.down_data_dir,'val.csv'))\nelse:\n raise IOError('接受的文件中,既没有train.csv也没有val.csv') \n\nif len(up_dataset.columns) != len(down_dataset.columns):\n raise ValueError('两个输入文件列结构不同,不能进行行的拼接')\nelif (up_dataset.columns == down_dataset.columns).sum() != len(up_dataset.columns):\n raise ValueError('两个输入文件列结构不同,不能进行行的拼接')\n\nup_data = pd.read_csv(up_dataset)\ndown_data = pd.read_csv(down_dataset)\nprint('======================= 处理前 =======================')\nprint(up_data.head().append(up_data.tail()))\nprint(down_data.head().append(down_data.tail()))\n\nout_data = up_data.append(down_data)\nprint('======================= 处理后 =======================')\nprint(out_data.head().append(out_data.tail()))\n\nif not os.path.exists(args.output_data_dir):\n os.makedirs(args.output_data_dir)\n\nout_data.to_csv(os.path.join(args.output_data_dir, '%s.csv' % mode), index=False)\n\n\nprint(\"行拼接完成,采样结果存储在 %s\" % (args.output_data_dir))\n", "sub_path": "datapreprocess/transform/rowconcat-neu.py", "file_name": "rowconcat-neu.py", "file_ext": "py", "file_size_in_byte": 2221, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 39, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}]} +{"seq_id": "116669081", "text": "from pyinotify import ProcessEvent\nfrom common import logger\nfrom os.path import isdir, exists\nfrom lib.common.filelib import filelist, modifiedlist\nfrom lib.common.config import config\nfrom threading import Thread\nimport pyinotify\n\n\nclass FileEvtHandler(ProcessEvent):\n def __init__(self, localpath, commander):\n self.localPath = localpath\n self.commander = commander\n self.__lastmovefrom = None\n self.__lastmovefrompath = None\n\n # this function should be called in various handlers\n # when a folder is moved from a under-watching folder to\n # an outsider position, this function is used to remove\n # the source path\n def checkmvsource(self):\n if self.__lastmovefrom is not None:\n self.__lastmovefrom = None\n self.deletehandler(self.__lastmovefrompath)\n\n def uploadhandler(self, event):\n self.checkmvsource()\n if isinstance(event, pyinotify.Event):\n localpath = event.pathname\n elif isinstance(event, str):\n localpath = event\n else:\n logger.critical('uploader cannot handle event type %s' % str(event))\n return\n\n if exists(localpath):\n if isdir(localpath):\n self.commander.push(\"update dir remote\", localpath, None)\n else:\n self.commander.push(\"update file remote\", localpath, None)\n else:\n logger.warning('try to upload from an invalid path %s' % localpath)\n\n def deletehandler(self, event):\n self.checkmvsource()\n localpath = event.pathname if isinstance(event, pyinotify.Event) else str(event)\n self.commander.push(\"rm remote\", localpath, None)\n\n def movehandler(self, event, type):\n \"\"\"\n **important** movehandler will not be able to handle a move operation between\n different monitoring folders\n :param event:\n :param type:\n :return:\n \"\"\"\n if type == 'from':\n self.__lastmovefrom = event.cookie\n self.__lastmovefrompath = event.pathname\n elif type == 'to':\n if event.cookie == self.__lastmovefrom:\n # the file is moved from the same monitoring folder\n self.commander.push(\"mv remote\", self.__lastmovefrompath, event.pathname)\n # reset the move cache\n self.__lastmovefrom = None\n else:\n self.checkmvsource()\n localpath = event.pathname\n if isdir(localpath):\n items = filelist(localpath)\n for item in items:\n self.uploadhandler(item)\n else:\n self.uploadhandler(localpath)\n\n # when we create a file, a CLOSE_WRITE event will always be raised\n # and hence the IN_CREATE may lead to redundant operations. such\n # operations may invalidate the list __justuploaded/__justdownloaded\n\n def process_IN_CREATE(self, event):\n logger.debug(\"IN_CREATE event:\" + event.pathname)\n # when copying a file, two signals will be raised\n # - a create signal when start copying this file\n # - a close_write signal when finish\n self.uploadhandler(event.pathname)\n\n # def process_IN_MODIFY(self, event):\n # logger.debug(\"IN_MODIFY event:\" + event.pathname)\n # self.uploadhandler(event)\n\n def process_IN_CLOSE_WRITE(self, event):\n logger.debug(\"IN_CLOSE_WRITE event:\" + event.pathname)\n self.uploadhandler(event)\n\n def process_IN_MOVED_FROM(self, event):\n logger.debug(\"IN_MOVED_FROM event:\" + event.pathname)\n self.movehandler(event, 'from')\n\n def process_IN_MOVED_TO(self, event):\n logger.debug(\"IN_MOVED_TO event:\" + event.pathname)\n self.movehandler(event, 'to')\n\n def process_IN_DELETE(self, event):\n logger.debug(\"IN_DELETE event:\" + event.pathname)\n self.deletehandler(event)\n\n\ndef local_monitor(path, commander):\n wm = pyinotify.WatchManager()\n wm.add_watch(path, pyinotify.ALL_EVENTS, rec=True, auto_add=True)\n hnd = FileEvtHandler(path, commander)\n\n # before start, we scan local changes\n files = modifiedlist(path, config('bsync', 'lastupdated'))\n for f in files:\n hnd.uploadhandler(f)\n\n notifier = pyinotify.Notifier(wm, hnd)\n proc = Thread(\n target=notifier.loop,\n )\n proc.start()\n logger.info(\"local monitor started on %s\" % path)\n return proc", "sub_path": "lib/monitor/FileEvtHandler.py", "file_name": "FileEvtHandler.py", "file_ext": "py", "file_size_in_byte": 4480, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pyinotify.ProcessEvent", "line_number": 10, "usage_type": "name"}, {"api_name": "pyinotify.Event", "line_number": 28, "usage_type": "attribute"}, {"api_name": "common.logger.critical", "line_number": 33, "usage_type": "call"}, {"api_name": "common.logger", "line_number": 33, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 37, "usage_type": "call"}, {"api_name": "common.logger.warning", "line_number": 42, "usage_type": "call"}, {"api_name": "common.logger", "line_number": 42, "usage_type": "name"}, {"api_name": "pyinotify.Event", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 69, "usage_type": "call"}, {"api_name": "lib.common.filelib.filelist", "line_number": 70, "usage_type": "call"}, {"api_name": "common.logger.debug", "line_number": 81, "usage_type": "call"}, {"api_name": "common.logger", "line_number": 81, "usage_type": "name"}, {"api_name": "common.logger.debug", "line_number": 92, "usage_type": "call"}, {"api_name": "common.logger", "line_number": 92, "usage_type": "name"}, {"api_name": "common.logger.debug", "line_number": 96, "usage_type": "call"}, {"api_name": "common.logger", "line_number": 96, "usage_type": "name"}, {"api_name": "common.logger.debug", "line_number": 100, "usage_type": "call"}, {"api_name": "common.logger", "line_number": 100, "usage_type": "name"}, {"api_name": "common.logger.debug", "line_number": 104, "usage_type": "call"}, {"api_name": "common.logger", "line_number": 104, "usage_type": "name"}, {"api_name": "pyinotify.WatchManager", "line_number": 109, "usage_type": "call"}, {"api_name": "pyinotify.ALL_EVENTS", "line_number": 110, "usage_type": "attribute"}, {"api_name": "lib.common.filelib.modifiedlist", "line_number": 114, "usage_type": "call"}, {"api_name": "lib.common.config.config", "line_number": 114, "usage_type": "call"}, {"api_name": "pyinotify.Notifier", "line_number": 118, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 119, "usage_type": "call"}, {"api_name": "common.logger.info", "line_number": 123, "usage_type": "call"}, {"api_name": "common.logger", "line_number": 123, "usage_type": "name"}]} +{"seq_id": "274845383", "text": "from flask import Flask, request, jsonify\nimport numpy as np\nimport math\nnp.warnings.filterwarnings('ignore')\n\napp = Flask(__name__)\nglobal model_features\n\n# root\n@app.route(\"/\")\ndef index():\n \"\"\"\n this is a root dir of my server\n :return: str\n \"\"\"\n return \"This is root!!!!\"\n\n\n# GET\n@app.route('/users/')\ndef hello_user(user):\n \"\"\"\n this serves as a demo purpose\n :param user:\n :return: str\n \"\"\"\n return \"Hello %s!\" % user\n\n\n# POST\n@app.route('/api/post_comparing_mat', methods=['POST'])\ndef set_comparing_mat():\n \"\"\"\n predicts requested text whether it is ham or spam\n :return: json\n \"\"\"\n json = request.get_json()\n model_x_array = np.fromstring(json['model_x_array'], dtype=float, sep=' ')\n model_y_array = np.fromstring(json['model_y_array'], dtype=float, sep=' ')\n global model_features\n model_features = np.vstack((model_x_array, model_y_array)).T\n print(\"base x array = \" + \" \".join(map(str,model_features[:,0])))\n print(\"base y array = \" + \" \".join(map(str,model_features[:,1])))\n return jsonify({'msg': 'SUCCESS'})\n\n# POST\n@app.route('/api/post_some_data', methods=['POST'])\ndef get_text_prediction():\n \"\"\"\n predicts requested text whether it is ham or spam\n :return: json\n \"\"\"\n json = request.get_json()\n print(json)\n input_x_array = np.fromstring(json['input_x_array'], dtype=float, sep=' ')\n input_y_array = np.fromstring(json['input_y_array'], dtype=float, sep=' ')\n input_features = np.vstack((input_x_array, input_y_array)).T\n pad = lambda x: np.hstack([x, np.ones((x.shape[0], 1))])\n unpad = lambda x: x[:, :-1]\n Y = pad(model_features)\n X = pad(input_features)\n A, res, rank, s = np.linalg.lstsq(X, Y)\n A[np.abs(A) < 1e-10] = 0 # set really small values to zero\n transform = lambda x: unpad(np.dot(pad(x), A))\n\n #Image of input pose onto model pose\n input_transform = transform(input_features)\n output_x_array = \" \".join(map(str,input_transform[:,0]))\n output_y_array = \" \".join(map(str,input_transform[:,1]))\n print(\"input_x_array = \"+ json['input_x_array'])\n print(\"input_x_array = \"+ json['input_y_array'])\n print(\"output_x_array = \" + output_x_array)\n print(\"output_y_array = \" + output_y_array)\n return jsonify({'output_x_array': output_x_array, 'output_y_array':output_y_array})\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)", "sub_path": "app/src/main/res/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2420, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "numpy.warnings.filterwarnings", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.warnings", "line_number": 4, "usage_type": "attribute"}, {"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 37, "usage_type": "name"}, {"api_name": "numpy.fromstring", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.fromstring", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 53, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 53, "usage_type": "name"}, {"api_name": "numpy.fromstring", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.fromstring", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.linalg.lstsq", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 62, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "97490495", "text": "# encoding=utf8\n\nimport os\nimport re\nimport scipy\nimport pickle\nimport numpy as np\nfrom sqlitedict import SqliteDict\n\n# Paths for all resources for the bot.\nRESOURCE_PATH = {\n 'INTENT_RECOGNIZER': 'models/intent_recognizer.pkl',\n 'TAG_CLASSIFIER': 'models/tag_classifier.pkl',\n 'TEXT_VECTORIZER': 'models/text_vectorizer.pkl',\n 'THREAD_EMBEDDINGS_FOLDER': 'data/thread_embeddings_by_tags',\n 'WORD_EMBEDDINGS': 'data/starspace_embeddings.sqlite',\n 'STOP_WORDS': 'data/stopwords.pkl',\n 'CHIT-CHAT_MODEL_WEIGHTS': 'models/chit-chat_model_weights.hdf5'\n}\n\nfor key, value in RESOURCE_PATH.items():\n RESOURCE_PATH[key] = os.path.join(os.path.dirname(__file__), value)\n\ndef text_prepare(text, stopwords_set):\n \"\"\"Performs tokenization and simple preprocessing.\"\"\"\n \n replace_by_space_re = re.compile('[/(){}\\[\\]|@,;]')\n bad_symbols_re = re.compile('[^0-9a-z #+_]')\n\n text = text.lower()\n text = replace_by_space_re.sub(' ', text)\n text = bad_symbols_re.sub('', text)\n text = ' '.join([x for x in text.split() if x and x not in stopwords_set])\n\n return text.strip()\n\ndef load_embeddings(embeddings_path):\n \"\"\"Loads pre-trained word embeddings from tsv file.\n\n Args:\n embeddings_path - path to the embeddings file.\n\n Returns:\n embeddings - dict mapping words to vectors;\n embeddings_dim - dimension of the vectors.\n \"\"\"\n embeddings = SqliteDict(RESOURCE_PATH['WORD_EMBEDDINGS'])\n embeddings_dim = embeddings[next(embeddings.keys())].shape[0]\n \n return embeddings, embeddings_dim\n\ndef question_to_vec(question, embeddings, dim):\n \"\"\"\n question: a string\n embeddings: dict where the key is a word and a value is its' embedding\n dim: size of the representation\n\n result: vector representation for the question\n \"\"\"\n result = np.zeros(dim)\n n = 0\n for word in question.split(' '):\n if word in embeddings:\n result += embeddings[word]\n n += 1\n \n return result / max(1, n)\n\ndef pickle_object(obj, filename, protocol=4):\n \"\"\"Pickles file to the file.\"\"\"\n with open(filename, 'wb') as f:\n pickle.dump(obj, f, protocol)\n\ndef unpickle_file(filename):\n \"\"\"Returns the result of unpickling the file content.\"\"\"\n with open(filename, 'rb') as f:\n return pickle.load(f)\n \ndef cos_cdist(matrix, vector):\n \"\"\"Computes the cosine distances between each row of matrix and vector.\"\"\"\n v = vector.reshape(1, -1)\n return scipy.spatial.distance.cdist(matrix, v, 'cosine').reshape(-1)\n\n# chit-chat utils\n\nMAX_LEN=32\n\nALPHABET = [' ', '!', '\"', '#', '$', '%', '&', \"'\", ')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '=', '?', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', ']', '_', '`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '~', '\\x92', '\\x93', '\\x94', '\\x96', '\\x97', '£', '¹', 'Ç', 'Õ', 'à', 'ä', 'ç', 'è', 'é', 'ê', 'í', 'ñ', 'ó', 'ù', 'û']\n\nSTART_SYMBOL = 'START'\nEND_SYMBOL = 'END'\nPAD_SYMBOL = 'PAD'\nSPECIAL_CHARACHTERS = [PAD_SYMBOL, START_SYMBOL, END_SYMBOL]\n\nchar2id = {c:i for i, c in enumerate(SPECIAL_CHARACHTERS + ALPHABET)}\nid2char = {i:c for i, c in enumerate(SPECIAL_CHARACHTERS + ALPHABET)}\n\nLATENT_DIM = 384\nEMBEDDINGS_DIM = 16\nVOCAB_SIZE = len(char2id)\n\ndef text2seq(text, max_len):\n \"\"\"Converts sequence of chars to sequence of indices and preserves special characters.\"\"\"\n start = [char2id[START_SYMBOL]]\n chars_ids = [char2id.get(text[i], char2id[' ']) for i in range(min(max_len - 2, len(text)))]\n end = [char2id[END_SYMBOL]]\n padding = [char2id[PAD_SYMBOL]] * max(0, max_len - len(text) - 2) \n\n return start + chars_ids + end + padding\n\ndef seq2text(seq, remove_special=True):\n \"\"\"Converts sequence of indices to sequence of char and removes special characters.\"\"\"\n text = ''.join(map(id2char.get, seq))\n\n if remove_special:\n for spc in SPECIAL_CHARACHTERS:\n text = text.replace(spc, ' ')\n\n text = re.sub(r'\\s+', ' ', text).strip()\n \n return text\n\ndef GCA_response(encoder, decoder, context, max_steps=MAX_LEN):\n \"\"\"Uses keras encoder and decoder models for generating response using context.\"\"\"\n rnn_state = [np.zeros((1, LATENT_DIM))]\n\n context = np.array(text2seq(context, MAX_LEN)).reshape(1, -1)\n rnn_state = [encoder.predict([context] + rnn_state)]\n \n response_partial = np.full((1, MAX_LEN), char2id[PAD_SYMBOL])\n response_partial[0, 0] = char2id[START_SYMBOL]\n \n response = []\n for i in range(1, min(max_steps, MAX_LEN)):\n output_tokens, *rnn_state = decoder.predict([response_partial] + rnn_state)\n \n sampled_token_index = np.argmax(output_tokens[0, 0])\n if sampled_token_index == char2id[END_SYMBOL]: break\n \n response.append(sampled_token_index)\n response_partial[0, 0] = sampled_token_index\n \n text = seq2text(response, remove_special=False)\n\n return text\n", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 5177, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 22, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 27, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlitedict.SqliteDict", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 60, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 72, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 77, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cdist", "line_number": 82, "usage_type": "call"}, {"api_name": "scipy.spatial", "line_number": 82, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "496320467", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass co_train_classifier(nn.Module):\n def __init__(self):\n super(co_train_classifier, self).__init__()\n self.c1 = nn.Conv2d(3, 128, kernel_size=3, padding=1)\n self.b1 = nn.BatchNorm2d(128)\n self.r1 = nn.LeakyReLU(negative_slope=0.1, inplace=False)\n self.c2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)\n self.b2 = nn.BatchNorm2d(128)\n self.r2 = nn.LeakyReLU(negative_slope=0.1, inplace=False)\n self.c3 = nn.Conv2d(128, 128, kernel_size=3, padding=1)\n self.b3 = nn.BatchNorm2d(128)\n self.r3 = nn.LeakyReLU(negative_slope=0.1, inplace=False)\n self.m1 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.d1 = nn.Dropout2d(p=0.5)\n\n self.c4 = nn.Conv2d(128, 256, kernel_size=3, padding=1)\n self.b4 = nn.BatchNorm2d(256)\n self.r4 = nn.LeakyReLU(negative_slope=0.1, inplace=False)\n self.c5 = nn.Conv2d(256, 256, kernel_size=3, padding=1)\n self.b5 = nn.BatchNorm2d(256)\n self.r5 = nn.LeakyReLU(negative_slope=0.1, inplace=False)\n self.c6 = nn.Conv2d(256, 256, kernel_size=3, padding=1)\n self.b6 = nn.BatchNorm2d(256)\n self.r6 = nn.LeakyReLU(negative_slope=0.1, inplace=False)\n self.m2 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.d2 = nn.Dropout2d(p=0.5)\n\n self.c7 = nn.Conv2d(256, 512, kernel_size=3, padding=1)\n self.b7 = nn.BatchNorm2d(512)\n self.r7 = nn.LeakyReLU(negative_slope=0.1, inplace=False)\n self.c8 = nn.Conv2d(512, 256, kernel_size=3, padding=1)\n self.b8 = nn.BatchNorm2d(256)\n self.r8 = nn.LeakyReLU(negative_slope=0.1, inplace=False)\n self.c9 = nn.Conv2d(256, 128, kernel_size=3, padding=1)\n self.b9 = nn.BatchNorm2d(128)\n self.r9 = nn.LeakyReLU(negative_slope=0.1, inplace=False)\n\n self.fc = nn.Linear(128, 10)\n self.sf = nn.Softmax(dim = 1)\n\n def forward(self, x):\n x = self.c1(x)\n x = self.b1(x)\n x = self.r1(x)\n x = self.c2(x)\n x = self.b2(x)\n x = self.r2(x)\n x = self.c3(x)\n x = self.b3(x)\n x = self.r3(x)\n x = self.m1(x)\n x = self.d1(x)\n\n\n x = self.c4(x)\n x = self.b4(x)\n x = self.r4(x)\n x = self.c5(x)\n x = self.b5(x)\n x = self.r5(x)\n x = self.c6(x)\n x = self.b6(x)\n x = self.r6(x)\n x = self.m2(x)\n x = self.d2(x)\n\n x = self.c7(x)\n x = self.b7(x)\n x = self.r7(x)\n x = self.c8(x)\n x = self.b8(x)\n x = self.r8(x)\n x = self.c9(x)\n x = self.b9(x)\n x = self.r9(x)\n x = torch.mean(torch.mean(x, dim=3), dim=2)\n x_logit = self.fc(x)\n return x_logit", "sub_path": "model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 2804, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "torch.nn.Module", "line_number": 5, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 5, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 10, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.Dropout2d", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.Dropout2d", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 40, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.nn.Softmax", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.mean", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "469604828", "text": "import requests\nfrom bs4 import BeautifulSoup\n\ndef download_page(url):\n headers = {\"User-Agent\" : \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0\"} #带头访问\n r = requests.get(url, headers=headers) #request方法获取url\n return r.text #返回下载的页面\n\ndef get_content(html, page):\n output = \"\"\"第{}页 作者:{} 性别:{} 年龄:{} 点赞:{} 评论:{}\\n{}\\n------------\\n\"\"\" #输出,{}占位,format 填入\n soup = BeautifulSoup(html , 'html.parser') #bautifulsoup 读取已下载的页面(字符串),指定解析器 html.parser\n con = soup.find(id='content-left') #find 主体范围\n con_list = con.find_all('div', class_=\"article\") #找到文章列表\n for i in con_list:\n author = i.find('h2').string #作者名称\n content = i.find('div', class_='content').find('span').get_text() #获取内容 \n stats = i.find('div', class_='stats') \n vote = stats.find('span', class_='stats-vote').find('i', class_='number').string #评论\n comment = stats.find('span', class_='stats-comments').find('i', class_='number').string #点赞数\n author_info = i.find('div', class_='articleGender') #获取作者年龄\n if author_info is not None:\n calss_list = author_info['class']\n if 'womenIcon' in calss_list:\n gender = '女'\n elif 'manIcon' in calss_list:\n gender = '男'\n else:\n gender = ''\n age = author_info.string \n else:\n gender = ''\n age = ''\n\n save_text(output.format(page, author, gender, age, vote, comment, content))\n\ndef save_text(*args): #保存文件到txt\n for i in args:\n with open('qiubai.txt', 'a', encoding='utf-8') as f:\n f.write(i)\n\ndef main():\n for i in range(1, 14):\n url = 'https://qiushibaike.com/text/page/{}'.format(i) \n html = download_page(url)\n get_content(html, i)\n\nif __name__ == '__main__':\n main()", "sub_path": "Download/DownBaike.py", "file_name": "DownBaike.py", "file_ext": "py", "file_size_in_byte": 2351, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "requests.get", "line_number": 6, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "537904016", "text": "#!/usr/bin/env python\n\nimport re\nimport string\nfrom urlparse import urlparse, urljoin\nfrom functools import wraps\nfrom threading import Thread\n\nfrom app import db\nfrom app.models import Annotation\n\nfrom flask import request, url_for, current_app\n\n\ndef land_url():\n return url_for(\"main.land\")\n\n\ndef home_url():\n return url_for(\"main.dashboard\")\n\n\ndef is_safe_url(target):\n ref_url = urlparse(request.host_url)\n test_url = urlparse(urljoin(request.host_url, target))\n return test_url.scheme in (\"http\", \"https\") and ref_url.netloc == test_url.netloc\n\n\ndef async_threaded(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n thread = Thread(target=func, args=args, kwargs=kwargs)\n thread.start()\n return wrapper\n\n\nclass ContextThread(Thread):\n def __init__(self, *args, **kwargs):\n super(ContextThread, self).__init__(*args, **kwargs)\n\n self.app = current_app._get_current_object()\n\n def run(self):\n with self.app.app_context():\n super(ContextThread, self).run()\n\n\nclass AsyncImport(object):\n\n # WIPASYNC\n\n def __init__(self, context):\n\n self.app = context\n\n @async_threaded\n def refresh(self, annotations):\n\n with self.app.app_context():\n\n for annotation in annotations:\n\n if not annotation[\"id\"]:\n annotation[\"id\"] = None\n\n existing = Annotation.query_by_id(annotation[\"id\"])\n\n if existing:\n\n if existing.is_protected:\n\n continue\n\n elif not existing.is_protected:\n\n db.session.delete(existing)\n db.session.commit()\n\n importing = Annotation()\n importing.deserialize(annotation)\n\n try:\n db.session.add(importing)\n db.session.commit()\n\n except:\n db.session.rollback()\n\n @async_threaded\n def add(self, annotations):\n\n with self.app.app_context():\n\n for annotation in annotations:\n\n if not annotation[\"id\"]:\n annotation[\"id\"] = None\n\n existing = Annotation.query_by_id(annotation[\"id\"])\n\n if existing:\n\n continue\n\n elif not existing:\n\n importing = Annotation()\n importing.deserialize(annotation)\n\n try:\n db.session.add(importing)\n db.session.commit()\n\n except:\n db.session.rollback()\n\n\nclass SortIt(object):\n\n @staticmethod\n def build_alphabetized_index(results,\n indexing_key=\"name\",\n ignored_articles=[\"the\", \"an\", \"a\"]):\n \"\"\" Builds an alphabetized index i.e.\n {\n 'A':\n [{'name': u'Apple'}],\n 'B':\n [{'name': u'Banana'}],\n 'Z':\n [{'name': u'Zucchini'}],\n '0':\n [{'name': u'007'}],\n '9':\n [{'name': u'9000'}]\n }\n results: List[dict] - Must be a list of dictionaries.\n \"\"\"\n\n index_characters = string.ascii_uppercase + string.digits\n\n raw_punctuation = string.punctuation\n re_punctuation = \"[{0}]\".format(re.escape(raw_punctuation))\n re_compiled_punctuation = re.compile(re_punctuation)\n\n raw_articles = ignored_articles\n re_articles = \"|\".join([r\"\\b^{0}\\b\".format(a) for a in raw_articles])\n re_compiled_articles = re.compile(re_articles, re.IGNORECASE)\n\n index = {}\n\n for character in index_characters:\n\n index[character] = []\n\n for item in results:\n\n # Restrive sorting name from current dictionary\n sorting_name = item[indexing_key]\n\n # Normalize sorting_name\n sorting_name = re_compiled_punctuation.sub(\"\", sorting_name)\n sorting_name = re_compiled_articles.sub(\"\", sorting_name)\n sorting_name = sorting_name.strip().upper()\n\n if sorting_name.startswith(character):\n\n index[character].append(item)\n\n # Remove empty index items\n filtered_index = dict((k, v) for k, v in index.iteritems() if v)\n\n return filtered_index\n\n @staticmethod\n def by_name(results, sort_key=\"name\"):\n \"\"\" results: List[dict] - Must be a list of dictionaries.\n \"\"\"\n return sorted(results, key=lambda k: k[sort_key], reverse=False)\n\n @staticmethod\n def by_frequency(results, sort_key=\"frequency\"):\n \"\"\" results: List[dict] - Must be a list of dictionaries.\n \"\"\"\n return sorted(results, key=lambda k: k[sort_key], reverse=True)\n", "sub_path": "app/tools.py", "file_name": "tools.py", "file_ext": "py", "file_size_in_byte": 4886, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "flask.url_for", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 20, "usage_type": "call"}, {"api_name": "urlparse.urlparse", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.request.host_url", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "urlparse.urlparse", "line_number": 25, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.request.host_url", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 25, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 32, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 30, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 37, "usage_type": "name"}, {"api_name": "flask.current_app._get_current_object", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.current_app", "line_number": 41, "usage_type": "name"}, {"api_name": "app.models.Annotation.query_by_id", "line_number": 66, "usage_type": "call"}, {"api_name": "app.models.Annotation", "line_number": 66, "usage_type": "name"}, {"api_name": "app.db.session.delete", "line_number": 76, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 76, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 76, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 77, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 77, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 77, "usage_type": "name"}, {"api_name": "app.models.Annotation", "line_number": 79, "usage_type": "call"}, {"api_name": "app.db.session.add", "line_number": 83, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 83, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 83, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 84, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 84, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 84, "usage_type": "name"}, {"api_name": "app.db.session.rollback", "line_number": 87, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 87, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 87, "usage_type": "name"}, {"api_name": "app.models.Annotation.query_by_id", "line_number": 99, "usage_type": "call"}, {"api_name": "app.models.Annotation", "line_number": 99, "usage_type": "name"}, {"api_name": "app.models.Annotation", "line_number": 107, "usage_type": "call"}, {"api_name": "app.db.session.add", "line_number": 111, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 111, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 111, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 112, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 112, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 112, "usage_type": "name"}, {"api_name": "app.db.session.rollback", "line_number": 115, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 115, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 115, "usage_type": "name"}, {"api_name": "string.ascii_uppercase", "line_number": 140, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 140, "usage_type": "attribute"}, {"api_name": "string.punctuation", "line_number": 142, "usage_type": "attribute"}, {"api_name": "re.escape", "line_number": 143, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 144, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 148, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 148, "usage_type": "attribute"}]} +{"seq_id": "399693823", "text": "\nimport re\nfrom collections import namedtuple\n\nMp3 = namedtuple(\"Mp3\", \"artist title track album\")\n\nar = re.compile(r\"^(.*?(@|On) )?(.*?)( \\(.*\\))?$\")\n\nrepltmpl = [\n \"%artist% $iflonger($2,0,$2,-) $3\",\n \"$if(%track%,$fmtNum(%track%,2),) - $caps(%artist%)\"\n]\n\n\ndef printmatches(ml):\n print(\"\\n\".join(\"{0:>4} = {1!r}\".format(\"$\" + str(i), v) for i, v in enumerate(ml)))\n\ndef getmatches(track, regex=ar):\n #track = mp3s[id]\n #print(\"track: {0}\".format(track))\n m = re.search(regex, track.title)\n if not m:\n print(\"no match!\")\n return None\n matches = [m.string] + list(m.groups())\n print(\"getmatches():\")\n printmatches(matches)\n print\n return matches\n\nsplitre = re.compile(r\"([(),]|%[\\w_]+%|\\$\\d+|\\$[\\w_]+)\")\nrepre = re.compile(r\"%(\\w+)%|\\$(\\d+)|\\$(\\w+)\")\n\noutfmts = ['field(\"{0}\")+', 'match({0})+', 'func(\"{0}\")']\nlfilt = lambda l: [outfmts[i].format(v) for i, v in enumerate(l) if v][0]\n\ndef process(s):\n if s == \"(\":\n return '('\n elif s == \",\":\n return '\"\",'\n elif s ==\")\":\n return '\"\")+'\n m = repre.match(s)\n if not m:\n return '\"' + s + '\"+'\n res = lfilt(m.groups())\n print(\"process('{0}') => '{1}'\".format(s, res))\n return res\n\ndef parserepl(tmpl):\n #tmpl = repltmpl[id]\n #print(\"template: {0}\".format(tmpl))\n\n bits = splitre.split(tmpl)\n printmatches(bits)\n print\n\n obits = [process(v) for v in bits if v]\n printmatches(obits)\n print\n\n res = \"\".join(obits).strip(\"+\")\n print(\"res = {0}\".format(res))\n\n return res\n\nmp3s = [\n Mp3(\"Crookers\", \"Animals - Micromix 3\", \"1\", \"\"),\n Mp3(\"Crookers\", \"Live @ Flash in Punk (08.08.2009)\", \"\", \"\"),\n Mp3(\"Crookers\", \"Live On JJJ Mixup (01.03.2007)\", \"\", \"\")\n]\nregexes = [\n (r\"^(.*?(@|On) )?(.*?)( \\(.*\\))?$\", \"%artist% $iflonger($2,0,$2,-) $3 - js\"),\n (r\"^(.*?(@|On) )?(.*?)( \\(.*\\))?$\", \"$if(%track%,$fmtNum(%track%,2),) - $caps(%artist%) = $caps(%title%)\")\n]\n\n_funcs = {\n \"if\": lambda c, t, f: t if c else f,\n \"iflonger\": lambda s, l, t, f: t if len(s) > l else f,\n \"caps\": lambda s: s.capitalize(),\n \"fmtNum\": lambda s, w: (s if s.isdigit() else \"0\").zfill(int(w))\n\n}\n\ndef fn(track, ml):\n\n def execfn(fstr):\n def field(key):\n res = getattr(track, key, \"\")\n print(\"field(key='{0}') => {1}\".format(key, res))\n return res\n\n def match(index):\n res = ml[index] if index < len(ml) else \"\"\n if res is None:\n res = \"\"\n print(\"match(index={0}) => {1}\".format(index, res))\n return res\n\n def func(name):\n if name not in _funcs:\n raise NameError(\"No function {0}!\".format(name))\n res = _funcs[name]\n print(\"func(name='{0}') => {1}\".format(name, res))\n return res\n\n return eval(fstr)\n return execfn\n\ndef context(mp3id, reid):\n track = mp3s[mp3id]\n inre, rtmpl = regexes[reid]\n print\n print(\"track: {0}\".format(track))\n print(\"inre: {0}\".format(inre))\n print(\"rtmpl: {0}\".format(rtmpl))\n print\n\n repfunc = parserepl(rtmpl)\n ml = getmatches(track, inre)\n #printmatches(ml)\n #print\n\n exfn = fn(track, ml)\n\n res = exfn(repfunc)\n print(\"** wooo **, res = '{0}'\".format(res))\n print\n\n\ncontext(0, 1)\n#m = getmatches(id=0)\n#m = getmatches(id=1)\n#m = getmatches(id=2)\n", "sub_path": "mypy/spiralx/fileproc/regex-mp3tag.py", "file_name": "regex-mp3tag.py", "file_ext": "py", "file_size_in_byte": 3147, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "collections.namedtuple", "line_number": 5, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 7, "usage_type": "call"}, {"api_name": "re.search", "line_number": 21, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 31, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "363286779", "text": "#!/usr/bin/env python3\n\nimport connexion\nimport neo4j\n\nfrom flask import g, render_template\n\nfrom improving_agent import encoder\nfrom improving_agent.src import config\nfrom improving_agent.src.spoke_constants import BIOLINK_SPOKE_NODE_MAPPINGS\nfrom improving_agent.util import get_evidara_logger\n\ndriver = neo4j.GraphDatabase.driver(\n config.NEO4J_URI,\n auth=(config.NEO4J_USER, config.NEO4J_PASS),\n max_connection_lifetime=200,\n)\nlogger = get_evidara_logger(__name__)\n\n\ndef get_db():\n \"\"\"Returns a neo4j driver.session object connected to the SPOKE\n database\n\n Parameters\n ----------\n None\n\n Returns\n -------\n g.db (driver.session): active neo4j database session\n \"\"\"\n if not hasattr(g, \"db\"):\n g.db = driver.session()\n return g.db\n\n\nfrom improving_agent.src import core # noqa: #E402, E401 \n\napp = connexion.App(__name__, specification_dir=\"./openapi/\")\napp.app.json_encoder = encoder.JSONEncoder\napp.add_api(\n \"openapi.yaml\",\n arguments={\"title\": \"imProving Agent - a query (im)proving Autonomous Relay Agent\"},\n pythonic_params=True,\n)\n\n\n@app.route(\"/\")\ndef index():\n \"\"\"returns welcome home page\"\"\"\n node_types = list(BIOLINK_SPOKE_NODE_MAPPINGS.keys())\n return render_template(\"home.html\", node_types=node_types)\n\n\n@app.app.teardown_appcontext\ndef close_db(error):\n if hasattr(g, \"db\"):\n g.db.close()\n\n\ndef main():\n logger.info(\"starting improving agent!\")\n app.run(port=8080)\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "app/improving_agent/__main__.py", "file_name": "__main__.py", "file_ext": "py", "file_size_in_byte": 1506, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "neo4j.GraphDatabase.driver", "line_number": 13, "usage_type": "call"}, {"api_name": "neo4j.GraphDatabase", "line_number": 13, "usage_type": "attribute"}, {"api_name": "improving_agent.src.config.NEO4J_URI", "line_number": 14, "usage_type": "attribute"}, {"api_name": "improving_agent.src.config", "line_number": 14, "usage_type": "name"}, {"api_name": "improving_agent.src.config.NEO4J_USER", "line_number": 15, "usage_type": "attribute"}, {"api_name": "improving_agent.src.config", "line_number": 15, "usage_type": "name"}, {"api_name": "improving_agent.src.config.NEO4J_PASS", "line_number": 15, "usage_type": "attribute"}, {"api_name": "improving_agent.util.get_evidara_logger", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.g", "line_number": 33, "usage_type": "argument"}, {"api_name": "flask.g.db", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.g.db", "line_number": 35, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 35, "usage_type": "name"}, {"api_name": "connexion.App", "line_number": 40, "usage_type": "call"}, {"api_name": "improving_agent.encoder.JSONEncoder", "line_number": 41, "usage_type": "attribute"}, {"api_name": "improving_agent.encoder", "line_number": 41, "usage_type": "name"}, {"api_name": "improving_agent.src.spoke_constants.BIOLINK_SPOKE_NODE_MAPPINGS.keys", "line_number": 52, "usage_type": "call"}, {"api_name": "improving_agent.src.spoke_constants.BIOLINK_SPOKE_NODE_MAPPINGS", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 53, "usage_type": "call"}, {"api_name": "flask.g", "line_number": 58, "usage_type": "argument"}, {"api_name": "flask.g.db.close", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.g.db", "line_number": 59, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 59, "usage_type": "name"}]} +{"seq_id": "422403152", "text": "import camelot\r\nimport spacy\r\nimport pandas as pd \r\nnlp = spacy.load('en_core_web_sm')\r\nimport fitz\r\nimport re\r\nfrom spacy.matcher import Matcher\r\nimport datefinder\r\nimport urllib.request\r\n\r\n\r\ndef mutual_funds(doc):\r\n \r\n \r\n tables = camelot.read_pdf(doc,pages='all')\r\n document = fitz.open(doc)\r\n\r\n page1 = document.loadPage(0)\r\n page1text = page1.getText(\"text\")\r\n text_extracted = nlp(page1text)\r\n \r\n matcher_rDate = Matcher(nlp.vocab)\r\n matcher_mDate = Matcher(nlp.vocab)\r\n \r\n pattern1 = [{'LOWER': 'record'},{'LOWER': 'date'}]\r\n pattern2 = [{'LOWER': 'book'},{'LOWER': 'closure'}]\r\n matcher_rDate.add('RecordDate', None, pattern1, pattern2)\r\n\r\n pattern1 = [{'LOWER': 'maturity'},{'LOWER': 'date'}]\r\n pattern2 = [{'LOWER': 'maturation'},{'LOWER': 'date'}]\r\n matcher_mDate.add('MaturityDate', None, pattern1, pattern2)\r\n \r\n found_matches_rDate = matcher_rDate(text_extracted)\r\n\r\n found_matches_mDate = matcher_mDate(text_extracted)\r\n \r\n sents = [sent for sent in text_extracted.sents]\r\n \r\n if(len(tables)==1):\r\n #df = pd.DataFrame()\r\n d=tables[0].df\r\n new_header = d.iloc[0] #grab the first row for the header\r\n d = d[1:] #take the data less the header row\r\n d.columns = new_header #set the header row as the df header\r\n for col in d.columns:\r\n if(bool(re.search(r\"ISIN\", col))):\r\n isin=d[col]\r\n data = {'ISIN': isin}\r\n df = pd.DataFrame(data)\r\n \r\n \r\n for col in d.columns:\r\n if(bool(re.search(r\"Record\\D\", col))):\r\n #record_date=d.iloc[:,4]\r\n df['Record_Date']=d[col]\r\n \r\n for col in d.columns:\r\n if(bool(re.search(r\"Matur\\D\", col))):\r\n #mature_date=d.iloc[:,6]\r\n df['Maturity_Date']=d[col]\r\n \r\n if not 'Record_Date' in df.columns: \r\n if(len(found_matches_rDate)==1):\r\n for match_id, start, end in found_matches_rDate:\r\n string_id = nlp.vocab.strings[match_id] # get string representation\r\n span = text_extracted[start:end] \r\n if(string_id=='RecordDate'):\r\n for sent in text_extracted.sents:\r\n if (found_matches_rDate[0][1] < sent.end and found_matches_rDate[0][1] > sent.start) : # this is the fifth match, that starts at doc3[673]\r\n matches = list(datefinder.find_dates(sent.text))\r\n if(len(matches)==1):\r\n r_date=matches[0].date().strftime('%m/%d/%Y')\r\n df['Record_Date']=r_date\r\n \r\n if not 'Maturity_Date' in df.columns:\r\n if(len(found_matches_mDate)==1):\r\n for match_id, start, end in found_matches_mDate:\r\n string_id = nlp.vocab.strings[match_id] # get string representation\r\n span = text_extracted[start:end]\r\n if(string_id=='MaturityDate'):\r\n for sent in text_extracted.sents:\r\n if (found_matches_mDate[0][1] < sent.end and found_matches_mDate[0][1] > sent.start) : # this is the fifth match, that starts at doc3[673]\r\n matches = list(datefinder.find_dates(sent.text))\r\n if(len(matches)==1):\r\n m_date=matches[0].date().strftime('%m/%d/%Y')\r\n df['Maturity_Date']=m_date\r\n \r\n \r\n #out = df.to_dict(orient='records')\r\n \r\n out=df.to_dict(orient='records') \r\n document.close()\r\n \r\n else:\r\n out=({'false'})\r\n document.close()\r\n return out\r\n ", "sub_path": "Client Side/file_upload/mutual_funds.py", "file_name": "mutual_funds.py", "file_ext": "py", "file_size_in_byte": 3837, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "spacy.load", "line_number": 4, "usage_type": "call"}, {"api_name": "camelot.read_pdf", "line_number": 15, "usage_type": "call"}, {"api_name": "fitz.open", "line_number": 16, "usage_type": "call"}, {"api_name": "spacy.matcher.Matcher", "line_number": 22, "usage_type": "call"}, {"api_name": "spacy.matcher.Matcher", "line_number": 23, "usage_type": "call"}, {"api_name": "re.search", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 49, "usage_type": "call"}, {"api_name": "re.search", "line_number": 53, "usage_type": "call"}, {"api_name": "re.search", "line_number": 58, "usage_type": "call"}, {"api_name": "datefinder.find_dates", "line_number": 70, "usage_type": "call"}, {"api_name": "datefinder.find_dates", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "537687983", "text": "\"\"\"\r\nInteractive selectors and viewers?\r\n\r\n\"\"\"\r\n\r\nimport time\r\n\r\nimport numpy as np\r\nfrom scipy import stats\r\n\r\nimport pyqtgraph as pg\r\nfrom pyqtgraph import QtCore as qc, QtGui as qg\r\nfrom pyqtgraph import console\r\n\r\nfrom . import stat\r\n\r\n# from . import util\r\n\r\n\r\nclass CloudPicker(pg.PlotWidget):\r\n \"\"\"\r\n Widget to display a points in 2D and a ROI to select points of interest.\r\n\r\n - A ScatterItem shows points in 2D\r\n - A mouse-draggable circle selects points\r\n - If selection changes, sigMaskChanged is emitted with the mask\r\n\r\n \"\"\"\r\n\r\n sigMaskChanged = qc.Signal(object)\r\n\r\n def __init__(self, xy, labels=None, hlabel=None, parent=None):\r\n \"Create CloudPicker from (n_obs, 2+) size array of points\"\r\n\r\n pg.PlotWidget.__init__(self, parent=parent)\r\n\r\n # unpack two dimensions of input data to x & y\r\n self.x, self.y = self.xy = xy[:2]\r\n\r\n # class labels optional\r\n # hlabel will be highlighted, rest not\r\n if labels is not None and hlabel is not None:\r\n _ = labels.copy()\r\n _[labels == hlabel] = 0\r\n _[labels != hlabel] = 1\r\n labels = _\r\n if labels is None:\r\n labels = np.zeros(self.x.size, np.int32)\r\n self.labels = labels\r\n self.ulabels = np.unique(labels)\r\n\r\n self.setup_plot()\r\n self.setup_roi()\r\n\r\n def setup_plot(self):\r\n self.plot = self # .plot()\r\n self.showGrid(x=True, y=True)\r\n\r\n # don't show the default left & bottom axes\r\n [self.plot.showAxis(o, False) for o in (\"left\", \"bottom\")]\r\n\r\n # create & add scatter item to plot\r\n self.scatters = []\r\n for i, ul in enumerate(self.ulabels):\r\n mask = self.labels == ul\r\n brush = pg.intColor(i, hues=len(self.ulabels), alpha=125)\r\n spi = pg.ScatterPlotItem(\r\n x=self.x[mask], y=self.y[mask], pen=None, brush=brush\r\n )\r\n self.scatters.append(spi)\r\n\r\n list(map(self.plot.addItem, self.scatters))\r\n\r\n def setup_roi(self):\r\n self.roi = pg.CircleROI([0, 0], [1, 1])\r\n self.plot.addItem(self.roi)\r\n self.roi.sigRegionChanged.connect(self.check_mask_change)\r\n\r\n _old_mask = None\r\n\r\n def check_mask_change(self, roi):\r\n \"See if mask change, if so, emit sigMaskChanged\"\r\n new = self.compute_mask()\r\n if self._old_mask is not None and (new == self._old_mask).all():\r\n return\r\n self._old_mask = new\r\n self.sigMaskChanged.emit(new)\r\n\r\n def compute_mask(self):\r\n \"Compute & return bool mask selecting points inside roi\"\r\n (cx, cy), (r, _) = self.roi.pos(), self.roi.size()\r\n return np.sqrt((self.x - cx) ** 2 + (self.y - cy) ** 2) <= r\r\n\r\n\r\nclass CloudStats(pg.PlotWidget):\r\n \"\"\"\r\n Widget to display epoch stats (interactively)\r\n\r\n - Holds a 2/3D array of epoch data\r\n - Plot with 2D image item\r\n - On update, stats across epochs calculated\r\n - Interactive update is rate limited, does partial then full update\r\n\r\n If epochs are 2D, assume we're showing a density plot\r\n\r\n \"\"\"\r\n\r\n alpha = 0.05\r\n n_ep_fast = 200\r\n\r\n def __init__(self, fs, ep, labels, parent=None):\r\n pg.PlotWidget.__init__(self, parent=None)\r\n self.fs = fs\r\n self.ep = ep\r\n self.labels = labels\r\n self.setup_lut()\r\n self.setup_plot()\r\n\r\n def setup_lut(self):\r\n \"Create typical blue neg, black zero, red positive color map\"\r\n self.lut = np.r_[np.zeros(128), np.r_[:128]] * 2\r\n self.lut = np.c_[self.lut, self.lut * 0.0, self.lut[::-1]].astype(np.ubyte)\r\n\r\n def setup_plot(self):\r\n \"Scaffold plot and image item\"\r\n self.plot = self # .plot()\r\n self.showGrid(x=True, y=True)\r\n\r\n if self.ep.ndim == 3:\r\n self.plot.getAxis(\"left\").setTicks(\r\n [[(i + 0.5, l) for i, l in enumerate(self.labels)]]\r\n )\r\n self.plot.getAxis(\"left\").setWidth(100)\r\n\r\n wnsamp = self.ep.shape[1]\r\n winsz = wnsamp * 1.0 / self.fs\r\n xts = [\r\n (int(wnsamp * ph), \"%0.3f\" % ((ph - 0.5) * winsz,))\r\n for ph in [0.0, 0.5, 1.0]\r\n ]\r\n self.plot.getAxis(\"bottom\").setTicks([xts])\r\n\r\n # self.plot.getAxis('left').setTicks(util.seeg_major_minor_ch_labels(self.labels))\r\n\r\n self.image = pg.ImageItem()\r\n if self.ep.ndim == 3:\r\n self.image.setLookupTable(self.lut)\r\n self.plot.addItem(self.image)\r\n\r\n def select_ep(self, mask, skip=True):\r\n \"Select epochs on mask, maybe skipping some if too many\"\r\n if skip:\r\n mask = np.r_[: mask.size][mask][:: (mask.sum() / self.n_ep_fast + 1)]\r\n return self.ep[mask]\r\n\r\n def compute_map(self, epi):\r\n \"Compute stat map for display from mask\"\r\n _, P = stats.ttest_1samp(epi, 0.0, axis=0)\r\n PT, _ = stat.fdr(P, alpha=self.alpha)\r\n return (P < PT) * epi.mean(axis=0)\r\n\r\n _level_scale = 1.0\r\n\r\n def update_image(self, mask, skip=True):\r\n \"Update stat map on displayed image based on mask\"\r\n epi = self.select_ep(mask, skip=skip)\r\n if epi.ndim == 3:\r\n std = epi.std() / self._level_scale\r\n self.image.setLevels([std, -std])\r\n self.image.setImage(self.compute_map(epi), autoLevels=False)\r\n elif epi.ndim == 2:\r\n T = np.tile(np.r_[: epi.shape[1]], (epi.shape[0], 1)).ravel()\r\n Y = epi.ravel()\r\n q5, q95 = np.percentile(Y, [0.1, 99.9])\r\n H, _, _ = np.histogram2d(T, np.clip(Y, q5, q95), (epi.shape[1], 200))\r\n self.image.setImage(np.log(H + 1))\r\n else:\r\n raise ValueError(\"ep must have 2 or 3 dim, found %d\" % (epi.ndim,))\r\n\r\n # help with interactive update\r\n _full_timer = None\r\n _last_update = 0.0\r\n\r\n def interactive_update(self, mask):\r\n \"Update map, first quickly, then with full stats\"\r\n\r\n # rate limit interactive updates to 5 hz\r\n now = time.time()\r\n if now - self._last_update < 0.2:\r\n return\r\n self._last_update = now\r\n\r\n # cancel pending full update timer\r\n if self._full_timer is not None:\r\n self._full_timer.cancel()\r\n\r\n # partial update\r\n self.update_image(mask)\r\n\r\n # schedule full update after 200 ms\r\n self._full_timer = qc.QTimer.singleShot(\r\n 200, lambda: self.update_image(mask, skip=False)\r\n )\r\n\r\n\r\nclass ClassAtlas(qg.QMainWindow):\r\n def __init__(self, fs, events, ep, xy, labels, ch_labels, parent=None):\r\n\r\n qg.QMainWindow.__init__(self, parent=parent)\r\n\r\n self.fs = fs\r\n\r\n self.split_main = qg.QSplitter(qc.Qt.Horizontal)\r\n self.setCentralWidget(self.split_main)\r\n self.setWindowTitle(\"Class Atlas\")\r\n\r\n self.split_ctrl = qg.QSplitter(qc.Qt.Vertical)\r\n \"\"\"\r\n self.cli = console.ConsoleWidget(namespace={'ca':self}, editor='gvim {fileName} +{lineNum}')\r\n self.split_ctrl.addWidget(self.cli)\r\n \"\"\"\r\n self.b_add_class = qg.QPushButton(\"Add class\")\r\n self.b_add_class.clicked.connect(self._b_add_class_cb)\r\n self.lay_ctrl = qg.QVBoxLayout()\r\n self.split_ctrl.setLayout(self.lay_ctrl)\r\n self.lay_ctrl.addWidget(self.b_add_class)\r\n self.lay_ctrl.insertStretch(-1)\r\n self.split_main.addWidget(self.split_ctrl)\r\n\r\n self.xy = xy\r\n self.events = events\r\n self.ep = ep\r\n self.labels = labels\r\n self.ch_labels = ch_labels\r\n\r\n self.split_views = qg.QSplitter(qc.Qt.Vertical)\r\n self.split_main.addWidget(self.split_views)\r\n\r\n self.lay_pickers = qg.QHBoxLayout()\r\n self.lay_stats = qg.QHBoxLayout()\r\n self.pw_timeline = pg.PlotWidget()\r\n\r\n self.w_lay_pickers = qg.QWidget()\r\n self.w_lay_pickers.setLayout(self.lay_pickers)\r\n self.w_lay_stats = qg.QWidget()\r\n self.w_lay_stats.setLayout(self.lay_stats)\r\n\r\n self.split_views.addWidget(self.w_lay_pickers)\r\n self.split_views.addWidget(self.w_lay_stats)\r\n self.split_views.setSizes([150, 700])\r\n # self.split_views.addWidget(self.pw_timeline)\r\n\r\n self.classes = []\r\n for i in np.unique(labels):\r\n self.add_class(hlabel=i)\r\n self.classes[-1][\"stats\"].interactive_update(labels == i)\r\n\r\n def _b_add_class_cb(self, *args):\r\n self.add_class()\r\n\r\n def add_class(self, hlabel=None):\r\n\r\n class_id = len(self.classes)\r\n\r\n # setup picker and dock\r\n picker = CloudPicker(self.xy, self.labels, hlabel=hlabel)\r\n self.lay_pickers.addWidget(picker)\r\n\r\n # setup stats and dock\r\n stats = CloudStats(self.fs, self.ep, self.ch_labels)\r\n self.lay_stats.addWidget(stats)\r\n\r\n # connect pick updates to stat display\r\n picker.sigMaskChanged.connect(stats.interactive_update)\r\n\r\n # store all this\r\n self.classes.append(locals())\r\n\r\n\r\nclass ScatterStat(pg.QtGui.QWidget):\r\n def __init__(self, *args, **kwds):\r\n pg.QtGui.QWidget.__init__(self)\r\n self.lay = pg.QtGui.QVBoxLayout()\r\n self.setLayout(self.lay)\r\n self.gw = pg.GraphicsWindow() # .__init__(self, *args, **kwds)\r\n self.lay.addWidget(self.gw)\r\n self.rows = []\r\n self.lut = np.r_[np.zeros(128), np.r_[:128]] * 2\r\n self.lut = np.c_[self.lut, self.lut * 0.0, self.lut[::-1]].astype(np.ubyte)\r\n self.b_add_row = pg.QtGui.QPushButton(\"Add row with same data\")\r\n self.b_add_row.clicked.connect(self._add_row_same_data)\r\n self.lay.addWidget(self.b_add_row)\r\n self.add_row(*args)\r\n self.n_ep_fast = kwds.pop(\"n_ep_fast\", 50)\r\n self.alpha = kwds.pop(\"alpha\", 0.05)\r\n self.show()\r\n\r\n def _add_row_same_data(self):\r\n self.add_row(*[self.rows[-1][k] for k in \"xi labels ep ch_labels\".split()])\r\n\r\n def add_row(self, xi, labels, ep, ch_labels):\r\n\r\n row_idx = len(self.rows)\r\n\r\n # prep data\r\n x, y = xi\r\n ulabels = np.unique(labels)\r\n emin, emax = ep.min(), ep.max()\r\n\r\n # plot embedded data, classes in different colors\r\n p_xi = self.gw.addPlot()\r\n [p_xi.showAxis(o, False) for o in (\"left\", \"bottom\")]\r\n s_xi = pg.ScatterPlotItem()\r\n for i, ulab in enumerate(ulabels):\r\n mask = labels == ulab\r\n s_xi.addPoints(\r\n x[mask], y[mask], pen=None, symbol=\"+\"\r\n ) # , brush=(i, len(ulabels)))\r\n p_xi.addItem(s_xi)\r\n\r\n # setup roi\r\n r_xi = pg.CircleROI([0, 0], [1, 1])\r\n p_xi.addItem(r_xi)\r\n\r\n # setup stat plot\r\n p_stat = self.gw.addPlot()\r\n p_stat.getAxis(\"left\").setTicks(\r\n [[], [(i + 0.5, l) for i, l in enumerate(ch_labels)]]\r\n )\r\n i_stat = pg.ImageItem()\r\n i_stat.setLookupTable(self.lut)\r\n p_stat.addItem(i_stat)\r\n\r\n # setup callback to update i_stat on r_xi selection of s_xi points\r\n tic = [time.time(), None]\r\n\r\n def update_stats_of_roi(roi, noskip=False):\r\n if time.time() - tic[0] < 0.1:\r\n return\r\n else:\r\n tic[0] = time.time()\r\n cx, cy = roi.pos()\r\n r, _ = roi.size()\r\n mask = np.sqrt((x - cx) ** 2 + (y - cy) ** 2) <= r\r\n skip = 1 if noskip else mask.sum() / self.n_ep_fast + 1\r\n maski = np.r_[: mask.shape[0]][mask]\r\n epi = ep[mask[::skip]]\r\n std = epi.std()\r\n i_stat.setLevels([std, -std])\r\n try:\r\n _, P = stats.ttest_1samp(epi, 0.0, axis=0)\r\n PT, _ = stat.fdr(P, alpha=self.alpha)\r\n i_stat.setImage((P < PT) * epi.mean(axis=0), autoLevels=False)\r\n except Exception as e:\r\n print(\r\n \"%r row[%d] update_stats_of_roi failed with %r\" % (self, row_idx, e)\r\n )\r\n\r\n # if we skipped data in fast interactive, wait 500 ms and update with full stats\r\n if skip > 1:\r\n\r\n def fullupdate():\r\n update_stats_of_roi(r_xi, noskip=True)\r\n\r\n if tic[1] is not None: # TODO REFACTOR\r\n tic[1].stop()\r\n tic[1] = pg.QtCore.QTimer.singleShot(200, fullupdate)\r\n\r\n r_xi.sigRegionChanged.connect(update_stats_of_roi)\r\n\r\n self.rows.append(locals())\r\n self.gw.nextRow()\r\n return locals()\r\n", "sub_path": "icdc/atlas.py", "file_name": "atlas.py", "file_ext": "py", "file_size_in_byte": 12560, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pyqtgraph.PlotWidget", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pyqtgraph.QtCore.Signal", "line_number": 30, "usage_type": "call"}, {"api_name": "pyqtgraph.QtCore", "line_number": 30, "usage_type": "name"}, {"api_name": "pyqtgraph.PlotWidget.__init__", "line_number": 35, "usage_type": "call"}, {"api_name": "pyqtgraph.PlotWidget", "line_number": 35, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 48, "usage_type": "attribute"}, {"api_name": "numpy.unique", "line_number": 50, "usage_type": "call"}, {"api_name": "pyqtgraph.intColor", "line_number": 66, "usage_type": "call"}, {"api_name": "pyqtgraph.ScatterPlotItem", "line_number": 67, "usage_type": "call"}, {"api_name": "pyqtgraph.CircleROI", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 92, "usage_type": "call"}, {"api_name": "pyqtgraph.PlotWidget", "line_number": 95, "usage_type": "attribute"}, {"api_name": "pyqtgraph.PlotWidget.__init__", "line_number": 112, "usage_type": "call"}, {"api_name": "pyqtgraph.PlotWidget", "line_number": 112, "usage_type": "attribute"}, {"api_name": "numpy.r_", "line_number": 121, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 122, "usage_type": "attribute"}, {"api_name": "numpy.ubyte", "line_number": 122, "usage_type": "attribute"}, {"api_name": "pyqtgraph.ImageItem", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 153, "usage_type": "attribute"}, {"api_name": "scipy.stats.ttest_1samp", "line_number": 158, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 158, "usage_type": "name"}, {"api_name": "numpy.tile", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 172, "usage_type": "attribute"}, {"api_name": "numpy.percentile", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.histogram2d", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 176, "usage_type": "call"}, {"api_name": "time.time", "line_number": 188, "usage_type": "call"}, {"api_name": "pyqtgraph.QtCore.QTimer.singleShot", "line_number": 201, "usage_type": "call"}, {"api_name": "pyqtgraph.QtCore.QTimer", "line_number": 201, "usage_type": "attribute"}, {"api_name": "pyqtgraph.QtCore", "line_number": 201, "usage_type": "name"}, {"api_name": "pyqtgraph.QtGui.QMainWindow", "line_number": 206, "usage_type": "attribute"}, {"api_name": "pyqtgraph.QtGui", "line_number": 206, "usage_type": "name"}, {"api_name": "pyqtgraph.QtGui.QMainWindow.__init__", "line_number": 209, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui.QMainWindow", "line_number": 209, "usage_type": "attribute"}, {"api_name": "pyqtgraph.QtGui", "line_number": 209, "usage_type": "name"}, {"api_name": "pyqtgraph.QtGui.QSplitter", "line_number": 213, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui", "line_number": 213, "usage_type": "name"}, {"api_name": "pyqtgraph.QtCore.Qt", "line_number": 213, "usage_type": "attribute"}, {"api_name": "pyqtgraph.QtCore", "line_number": 213, "usage_type": "name"}, {"api_name": "pyqtgraph.QtGui.QSplitter", "line_number": 217, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui", "line_number": 217, "usage_type": "name"}, {"api_name": "pyqtgraph.QtCore.Qt", "line_number": 217, "usage_type": "attribute"}, {"api_name": "pyqtgraph.QtCore", "line_number": 217, "usage_type": "name"}, {"api_name": "pyqtgraph.QtGui.QPushButton", "line_number": 222, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui", "line_number": 222, "usage_type": "name"}, {"api_name": "pyqtgraph.QtGui.QVBoxLayout", "line_number": 224, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui", "line_number": 224, "usage_type": "name"}, {"api_name": "pyqtgraph.QtGui.QSplitter", "line_number": 236, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui", "line_number": 236, "usage_type": "name"}, {"api_name": "pyqtgraph.QtCore.Qt", "line_number": 236, "usage_type": "attribute"}, {"api_name": "pyqtgraph.QtCore", "line_number": 236, "usage_type": "name"}, {"api_name": "pyqtgraph.QtGui.QHBoxLayout", "line_number": 239, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui", "line_number": 239, "usage_type": "name"}, {"api_name": "pyqtgraph.QtGui.QHBoxLayout", "line_number": 240, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui", "line_number": 240, "usage_type": "name"}, {"api_name": "pyqtgraph.PlotWidget", "line_number": 241, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui.QWidget", "line_number": 243, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui", "line_number": 243, "usage_type": "name"}, {"api_name": "pyqtgraph.QtGui.QWidget", "line_number": 245, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui", "line_number": 245, "usage_type": "name"}, {"api_name": "numpy.unique", "line_number": 254, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 270, "usage_type": "name"}, {"api_name": "scipy.stats", "line_number": 271, "usage_type": "argument"}, {"api_name": "scipy.stats.interactive_update", "line_number": 274, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 274, "usage_type": "name"}, {"api_name": "pyqtgraph.QtGui", "line_number": 280, "usage_type": "attribute"}, {"api_name": "pyqtgraph.QtGui.QWidget.__init__", "line_number": 282, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui", "line_number": 282, "usage_type": "attribute"}, {"api_name": "pyqtgraph.QtGui.QVBoxLayout", "line_number": 283, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui", "line_number": 283, "usage_type": "attribute"}, {"api_name": "pyqtgraph.GraphicsWindow", "line_number": 285, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 288, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 289, "usage_type": "attribute"}, {"api_name": "numpy.ubyte", "line_number": 289, "usage_type": "attribute"}, {"api_name": "pyqtgraph.QtGui.QPushButton", "line_number": 290, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui", "line_number": 290, "usage_type": "attribute"}, {"api_name": "numpy.unique", "line_number": 307, "usage_type": "call"}, {"api_name": "pyqtgraph.ScatterPlotItem", "line_number": 313, "usage_type": "call"}, {"api_name": "pyqtgraph.CircleROI", "line_number": 322, "usage_type": "call"}, {"api_name": "pyqtgraph.ImageItem", "line_number": 330, "usage_type": "call"}, {"api_name": "time.time", "line_number": 335, "usage_type": "call"}, {"api_name": "time.time", "line_number": 338, "usage_type": "call"}, {"api_name": "time.time", "line_number": 341, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 344, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 346, "usage_type": "attribute"}, {"api_name": "scipy.stats.ttest_1samp", "line_number": 351, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 351, "usage_type": "name"}, {"api_name": "pyqtgraph.QtCore.QTimer.singleShot", "line_number": 367, "usage_type": "call"}, {"api_name": "pyqtgraph.QtCore", "line_number": 367, "usage_type": "attribute"}]} +{"seq_id": "551135094", "text": "from __future__ import division\nfrom __future__ import print_function\nimport numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\nimport bottleneck as bn\nimport sys\nfrom pysnptools.snpreader import Bed\nfrom pysnptools.standardizer import Unit\n#from IPython import embed\nfrom time import time\n\ncompliment = {'A':'T','T':'A','G':'C','C':'G',\n 'a':'t','t':'a','g':'c','c':'g',\n 1:1,2:2}\n\nclass covariance_scores_1_pop(object):\n '''\n Class for storing covariance score objects and computing covariance scores\n Paramters\n ---------\n bfile: bed file name\n block_size: block size for computing scores\n block_type: type of block - SNP or KBP\n\n Attributes\n ----------\n blocks: block boundaries\n block_type: type of block used to create the object\n N: sample size of panel used to determine scores\n M: number of SNPs\n scores: the covariance scores\n\n Methods\n -------\n get_blocks: determine the block boundaries\n compute: compute the covariance scores\n '''\n def __init__(self,args):\n if args.window_type not in ['KBP','SNP']:\n raise ValueError('Window type not supported')\n bed_1 = Bed(args.bfile,count_A1=False) #\n af1 = self.get_allele_frequency(bed_1,args) #\n print(len(af1), \"SNPs in file 1\")\n snps_1 = (af1>args.maf)&(af1<1-args.maf) #\n print(np.sum(snps_1), \"SNPs in file 1 after MAF filter\")\n if (args.from_bp is not None) and (args.to_bp is not None):\n k = (bed_1.pos[:,2]>args.from_bp)&(bed_1.pos[:,2]ws:\n j+=1\n wl.append(j)\n j=0\n for i in xrange(self.M):\n while j=args.from_bp))[0][0]\n k1 = np.where((bed.pos[:,2]<=args.to_bp))[0][-1]\n X = bed[:,k0:k1].read().val\n af[k0:k1] = bn.nanmean(X,0)/2.0\n var[k0:k1] = self._fast_var(X,2*af[k0:k1])\n else:\n for i in xrange(int(np.ceil(bed.sid_count/s))):\n X = bed[:,i*s:(i+1)*s].read().val\n af[i*s:(i+1)*s] = bn.nanmean(X,0)/2.0\n var[i*s:(i+1)*s] = self._fast_var(X,2*af[i*s:(i+1)*s])\n af[var==0]=0\n return af\n\n # replace missing genotypes with mean\n def _norm_data(self,X):\n m = bn.nanmean(X,0)\n inds = np.where(np.isnan(X))\n X[inds]=np.take(m,inds[1])\n np.subtract(X,m,out=X)\n v = X.var(0)\n np.divide(X,np.sqrt(v),out=X)\n return X\n\n def _condition_ancestry(self,X,a):\n return sm.OLS(X,sm.add_constant(a)).fit().resid\n\n def compute(self,bed_1,bed_1_index,af,a1,args):\n N = bed_1.iid_count\n if args.gen_effect:\n v1m = np.mean(2*af*(1-af))\n def func(a,i,j):\n af1 = af[i:j]\n v = (2*af1*(1-af1))/v1m\n v1j = 2*(af1[-1]*(1-af1[-1]))/v1m\n c = a**2\n return (v*c).sum(1), (v1j*(c)).sum(0)[0:-1]\n else:\n def func(a,i,j):\n c = a**2\n return c.sum(1), c.sum(0)[0:-1]\n t=time()\n scores = np.zeros((self.M))\n li,ri = self.windows[0]\n A1 = self._norm_data(bed_1[:,bed_1_index[li:ri]].read().val)\n if a1 is not None:\n A1 = self._condition_ancestry(A1,a1['theta'].values)\n R1 = np.dot(A1.T,A1/N)\n scores[li:ri] += func(R1,li,ri)[0]\n nstr = np.max((1000,ri-li))\n #nstr = ri-li\n offset = 0\n #out1 = np.zeros((1,nstr-1))\n for i in xrange(ri,self.M,nstr):\n sys.stdout.write(\"SNP: %d, %f\\r\" % (i, time()-t))\n sys.stdout.flush()\n X1n= self._norm_data(bed_1[:,bed_1_index[i:(i+nstr)]].read().val)\n A1 = np.hstack((A1,X1n))\n if a1 is not None:\n A1 = self._condition_ancestry(A1,a1['theta'].values)\n for j in xrange(i,np.min((i+nstr,self.M))):\n lb,rb = self.windows[j]\n lbp = lb-offset\n jp = j-offset\n out1 = np.dot(np.atleast_2d(A1[:,jp]/N),A1[:,lbp:jp])\n # np.dot(np.atleast_2d(A1[:,jp]/N),A1[:,lbp:jp],out=out1)\n _out1 = np.hstack((out1,[[1]]))\n func_ret = func(_out1,lb,j+1)\n try:\n scores[lb:j] += func_ret[1]\n except ValueError:\n print(\"Error when setting scores.\"\n \" Block width may exceed number of SNPs being stored\"\n \" Try increasing --SNPs_to_store\")\n scores[j] += func_ret[0]\n if A1.shape[1] > args.SNPs_to_store:\n A1 = A1[:,nstr:]\n offset += nstr\n print(time()-t)\n return scores\n\n def write(self,args):\n f = open(args.out,'w')\n f.write('# M = '+str(self.M)+'\\n')\n for l in zip(self.chr,self.pos,self.id,self.A1,self.A2,self.af,\n self.scores):\n f.write('\\t'.join(map(str,l))+'\\n')\n\nclass covariance_scores_2_pop(covariance_scores_1_pop):\n def __init__(self,args):\n if args.window_type not in ['KBP','SNP']:\n raise ValueError('Window type not supported')\n bed_1 = Bed(args.bfile1,count_A1=False) #\n bed_2 = Bed(args.bfile2,count_A1=False)\n af1 = self.get_allele_frequency(bed_1,args) #\n af2 = self.get_allele_frequency(bed_2,args)\n print(len(af1), \"SNPs in file 1\")\n print(len(af2), \"SNPs in file 2\")\n snps_1 = (af1>args.maf)&(af1<1-args.maf) #\n snps_2 = (af2>args.maf)&(af2<1-args.maf)\n print(np.sum(snps_1), \"SNPs in file 1 after MAF filter\")\n print(np.sum(snps_2), \"SNPs in file 2 after MAF filter\")\n if (args.from_bp is not None) and (args.to_bp is not None):\n k1 = (bed_1.pos[:,2]>args.from_bp)&(bed_1.pos[:,2]args.from_bp)&(bed_2.pos[:,2] args.SNPs_to_store:\n A1 = A1[:,nstr:]\n A2 = A2[:,nstr:]\n offset += nstr\n print(time()-t)\n return scores\n", "sub_path": "popcorn/compute.py", "file_name": "compute.py", "file_ext": "py", "file_size_in_byte": 16262, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "pysnptools.snpreader.Bed", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.intersect1d", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 55, "usage_type": "call"}, {"api_name": "pandas.read_table", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 105, "usage_type": "call"}, {"api_name": "bottleneck.nanmean", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 110, "usage_type": "call"}, {"api_name": "bottleneck.nanmean", "line_number": 112, "usage_type": "call"}, {"api_name": "bottleneck.nanmean", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.take", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.subtract", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 124, "usage_type": "call"}, {"api_name": "statsmodels.api.OLS", "line_number": 128, "usage_type": "call"}, {"api_name": "statsmodels.api", "line_number": 128, "usage_type": "name"}, {"api_name": "statsmodels.api.add_constant", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 133, "usage_type": "call"}, {"api_name": "time.time", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 152, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 157, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 157, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 157, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 158, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 158, "usage_type": "attribute"}, {"api_name": "numpy.hstack", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.atleast_2d", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 169, "usage_type": "call"}, {"api_name": "time.time", "line_number": 181, "usage_type": "call"}, {"api_name": "pysnptools.snpreader.Bed", "line_number": 195, "usage_type": "call"}, {"api_name": "pysnptools.snpreader.Bed", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.intersect1d", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.intersect1d", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 218, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 223, "usage_type": "call"}, {"api_name": "pandas.read_table", "line_number": 225, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 263, "usage_type": "call"}, {"api_name": "pandas.read_table", "line_number": 266, "usage_type": "call"}, {"api_name": "pandas.read_table", "line_number": 270, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 279, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 327, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 334, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 338, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 342, "usage_type": "call"}, {"api_name": "time.time", "line_number": 347, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 348, "usage_type": "call"}, {"api_name": "pysnptools.standardizer.Unit", "line_number": 350, "usage_type": "call"}, {"api_name": "pysnptools.standardizer.Unit", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 356, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 357, "usage_type": "call"}, {"api_name": "numpy.outer", "line_number": 358, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 360, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 366, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 366, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 366, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 367, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 367, "usage_type": "attribute"}, {"api_name": "pysnptools.standardizer.Unit", "line_number": 368, "usage_type": "call"}, {"api_name": "pysnptools.standardizer.Unit", "line_number": 369, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 370, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 371, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 376, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 382, "usage_type": "call"}, {"api_name": "numpy.atleast_2d", "line_number": 382, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 383, "usage_type": "call"}, {"api_name": "numpy.atleast_2d", "line_number": 383, "usage_type": "call"}, {"api_name": "numpy.atleast_2d", "line_number": 384, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 385, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 386, "usage_type": "call"}, {"api_name": "time.time", "line_number": 394, "usage_type": "call"}]} +{"seq_id": "11882818", "text": "import glob\nimport logging\nimport os\nimport shutil\nimport time\nfrom datetime import datetime, timedelta\nfrom test.support import EnvironmentVarGuard # Python >=3\nfrom unittest import skip\nfrom unittest.mock import patch\n\nfrom django.test import TransactionTestCase, tag\nfrom django.utils import timezone\n\nimport vcr\n\nfrom data_refinery_common.job_lookup import ProcessorPipeline\nfrom data_refinery_common.message_queue import send_job\nfrom data_refinery_common.models import (\n ComputationalResult,\n ComputationalResultAnnotation,\n ComputedFile,\n DownloaderJob,\n DownloaderJobOriginalFileAssociation,\n Experiment,\n ExperimentAnnotation,\n ExperimentSampleAssociation,\n Organism,\n OriginalFile,\n OriginalFileSampleAssociation,\n ProcessorJob,\n ProcessorJobOriginalFileAssociation,\n Sample,\n SampleAnnotation,\n SampleComputedFileAssociation,\n SampleResultAssociation,\n)\nfrom data_refinery_common.utils import get_env_variable\nfrom data_refinery_foreman.foreman.main import retry_lost_downloader_jobs\nfrom data_refinery_foreman.surveyor import surveyor\nfrom data_refinery_foreman.surveyor.management.commands.unsurvey import purge_experiment\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\nlogging.getLogger(\"vcr\").setLevel(logging.WARN)\nLOCAL_ROOT_DIR = get_env_variable(\"LOCAL_ROOT_DIR\", \"/home/user/data_store\")\nCASSETTES_DIR = \"/home/user/data_store/cassettes/\"\nLOOP_TIME = 5 # seconds\nMAX_WAIT_TIME = timedelta(minutes=60)\n\n# We don't want our end-to-end tests to be dependent upon external\n# services, so we host the files we would download fromt hem on S3. We\n# then have to replace the source_urls of downloader jobs that are\n# generated for end-to-end tests to pull from S3 instead of the\n# original location. The following constants and function wait until\n# the original files have been created and then swap out their\n# source_urls based on the mapping below.\n# We have to save references to the actual surveyor classes before\n# they get overwritten with mocks.\nORIGINAL_ARRAY_EXPRESS_SURVEYOR = surveyor.ArrayExpressSurveyor\nORIGINAL_SRA_SURVEYOR = surveyor.SraSurveyor\nORIGINAL_TRANSCRIPTOME_SURVEYOR = surveyor.TranscriptomeIndexSurveyor\nORIGINAL_GEO_SURVEYOR = surveyor.GeoSurveyor\n\nEXTERNAL_FILE_URL_MAPPING = {\n # Transcriptome:\n \"ftp://ftp.ensembl.org/pub/release-99/gtf/caenorhabditis_elegans/Caenorhabditis_elegans.WBcel235.99.gtf.gz\": \"https://data-refinery-test-assets.s3.amazonaws.com/end_to_end_downloads/Caenorhabditis_elegans.WBcel235.99.gtf.gz\", # noqa\n \"ftp://ftp.ensembl.org/pub/release-99/fasta/caenorhabditis_elegans/dna/Caenorhabditis_elegans.WBcel235.dna.toplevel.fa.gz\": \"https://data-refinery-test-assets.s3.amazonaws.com/end_to_end_downloads/Caenorhabditis_elegans.WBcel235.dna.toplevel.fa.gz\", # noqa\n # No Op:\n \"ftp://ftp.ebi.ac.uk/pub/databases/microarray/data/experiment/GEOD/E-GEOD-3303/E-GEOD-3303.processed.1.zip\": \"https://data-refinery-test-assets.s3.amazonaws.com/end_to_end_downloads/E-GEOD-3303.processed.1.zip\", # noqa\n # GEO:\n \"ftp://ftp.ncbi.nlm.nih.gov/geo/series/GSE102nnn/GSE102571/miniml/GSE102571_family.xml.tgz\": \"https://data-refinery-test-assets.s3.amazonaws.com/end_to_end_downloads/GSE102571_family.xml.tgz\", # noqa\n}\n\n\ndef build_surveyor_init_mock(source_type):\n if source_type == \"ARRAY_EXPRESS\":\n original_surveyor = ORIGINAL_ARRAY_EXPRESS_SURVEYOR\n if source_type == \"SRA\":\n original_surveyor = ORIGINAL_SRA_SURVEYOR\n if source_type == \"TRANSCRIPTOME_INDEX\":\n original_surveyor = ORIGINAL_TRANSCRIPTOME_SURVEYOR\n if source_type == \"GEO\":\n original_surveyor = ORIGINAL_GEO_SURVEYOR\n\n def mock_init_surveyor(survey_job):\n ret_value = original_surveyor(survey_job)\n\n def mock_queue_downloader_job_for_original_files(\n original_files, experiment_accession_code: str = None, is_transcriptome: bool = False,\n ):\n for original_file in original_files:\n original_file.source_url = EXTERNAL_FILE_URL_MAPPING[original_file.source_url]\n original_file.save()\n\n return original_surveyor.queue_downloader_job_for_original_files(\n ret_value, original_files, experiment_accession_code, is_transcriptome\n )\n\n def mock_queue_downloader_jobs(experiment, samples):\n # We don't want to change the same file's url more than\n # once and sometimes multiple samples are associated with\n # the same file.\n original_file_ids = set()\n for sample in samples:\n for original_file in sample.original_files.all():\n if original_file.id not in original_file_ids:\n try:\n original_file.source_url = EXTERNAL_FILE_URL_MAPPING[\n original_file.source_url\n ]\n except KeyError as e:\n log_message = (\n \"The tests attempted to access a URL that is not in\"\n \" EXTERNAL_FILE_URL_MAPPING. This is most likely because you've\"\n \" added a test that mocks the surveyor so that the DownloaderJobs\"\n \" download from S3 instead of the external service. To fix this\"\n \" you should download the file, upload it to S3, and then add a\"\n \" mapping from its original URL to its URL in S3.\"\n )\n logger.warn(log_message)\n raise e\n\n original_file.save()\n original_file_ids.add(original_file.id)\n\n return original_surveyor.queue_downloader_jobs(ret_value, experiment, samples)\n\n ret_value.queue_downloader_job_for_original_files = (\n mock_queue_downloader_job_for_original_files\n )\n ret_value.queue_downloader_jobs = mock_queue_downloader_jobs\n return ret_value\n\n return mock_init_surveyor\n\n\ndef wait_for_job(job, job_class: type, start_time: datetime, loop_time: int = None):\n \"\"\"Monitors the `job_class` table for when `job` is done.\"\"\"\n loop_time = loop_time if loop_time else LOOP_TIME\n last_log_time = timezone.now()\n while job.success is None and timezone.now() - start_time < MAX_WAIT_TIME:\n time.sleep(loop_time)\n\n # Don't log statuses more often than every 5 seconds.\n if timezone.now() - last_log_time > timedelta(seconds=5):\n logger.info(\"Still polling the %s with ID %s.\", job_class.__name__, job.nomad_job_id)\n last_log_time = timezone.now()\n\n job.refresh_from_db()\n\n if timezone.now() - start_time > MAX_WAIT_TIME:\n logger.error(\"%s job timed out!\", job_class.__name__)\n\n return job\n\n\n# TransactionTestCase makes database calls complete before the test\n# ends. Otherwise the workers wouldn't actually be able to find the\n# job in the database because it'd be stuck in a transaction.\n#\n# Unfortunately, it's more unreliable and tends to leave things in the\n# database, so let's manually clear it before every test\nclass EndToEndTestCase(TransactionTestCase):\n def setUp(self):\n Experiment.objects.all().delete()\n ExperimentAnnotation.objects.all().delete()\n ExperimentSampleAssociation.objects.all().delete()\n Sample.objects.all().delete()\n SampleAnnotation.objects.all().delete()\n OriginalFile.objects.all().delete()\n OriginalFileSampleAssociation.objects.all().delete()\n SampleResultAssociation.objects.all().delete()\n ComputationalResult.objects.all().delete()\n ComputationalResultAnnotation.objects.all().delete()\n SampleComputedFileAssociation.objects.all().delete()\n ComputedFile.objects.all().delete()\n DownloaderJob.objects.all().delete()\n DownloaderJobOriginalFileAssociation.objects.all().delete()\n ProcessorJob.objects.all().delete()\n ProcessorJobOriginalFileAssociation.objects.all().delete()\n\n\nclass NoOpEndToEndTestCase(EndToEndTestCase):\n @tag(\"slow\")\n @vcr.use_cassette(\n os.path.join(CASSETTES_DIR, \"surveyor.test_end_to_end.no_op.yaml\"), ignore_hosts=[\"nomad\"],\n )\n @patch(\"data_refinery_foreman.surveyor.surveyor.ArrayExpressSurveyor\")\n def test_no_op(self, mock_surveyor):\n \"\"\"Survey, download, then process an experiment we know is NO_OP.\"\"\"\n\n mock_surveyor.side_effect = build_surveyor_init_mock(\"ARRAY_EXPRESS\")\n\n # Clear out pre-existing work dirs so there's no conflicts:\n self.env = EnvironmentVarGuard()\n self.env.set(\"RUNING_IN_CLOUD\", \"False\")\n with self.env:\n for work_dir in glob.glob(LOCAL_ROOT_DIR + \"/processor_job_*\"):\n shutil.rmtree(work_dir)\n\n # Prevent a call being made to NCBI's API to determine\n # organism name/id.\n organism = Organism(name=\"HOMO_SAPIENS\", taxonomy_id=9606, is_scientific_name=True)\n organism.save()\n\n accession_code = \"E-GEOD-3303\"\n survey_job = surveyor.survey_experiment(accession_code, \"ARRAY_EXPRESS\")\n\n self.assertTrue(survey_job.success)\n\n downloader_jobs = DownloaderJob.objects.all()\n self.assertGreater(downloader_jobs.count(), 0)\n\n logger.info(\"Survey Job finished, waiting for Downloader Jobs to complete.\")\n start_time = timezone.now()\n for downloader_job in downloader_jobs:\n downloader_job = wait_for_job(downloader_job, DownloaderJob, start_time)\n self.assertTrue(downloader_job.success)\n\n processor_jobs = ProcessorJob.objects.all().exclude(\n abort=True\n ) # exclude aborted processor jobs\n self.assertGreater(processor_jobs.count(), 0)\n\n logger.info(\"Downloader Jobs finished, waiting for processor Jobs to complete.\")\n start_time = timezone.now()\n for processor_job in processor_jobs:\n processor_job = wait_for_job(processor_job, ProcessorJob, start_time)\n if not processor_job.success:\n logger.error(processor_job.failure_reason)\n self.assertTrue(processor_job.success)\n\n # Test that the unsurveyor deletes all objects related to the experiment\n purge_experiment(accession_code)\n\n self.assertEqual(Experiment.objects.all().count(), 0)\n self.assertEqual(ExperimentAnnotation.objects.all().count(), 0)\n self.assertEqual(ExperimentSampleAssociation.objects.all().count(), 0)\n self.assertEqual(Sample.objects.all().count(), 0)\n self.assertEqual(SampleAnnotation.objects.all().count(), 0)\n self.assertEqual(OriginalFile.objects.all().count(), 0)\n self.assertEqual(OriginalFileSampleAssociation.objects.all().count(), 0)\n self.assertEqual(SampleResultAssociation.objects.all().count(), 0)\n self.assertEqual(ComputationalResult.objects.all().count(), 0)\n self.assertEqual(ComputationalResultAnnotation.objects.all().count(), 0)\n self.assertEqual(SampleComputedFileAssociation.objects.all().count(), 0)\n self.assertEqual(ComputedFile.objects.all().count(), 0)\n self.assertEqual(DownloaderJob.objects.all().count(), 0)\n self.assertEqual(DownloaderJobOriginalFileAssociation.objects.all().count(), 0)\n self.assertEqual(ProcessorJob.objects.all().count(), 0)\n self.assertEqual(ProcessorJobOriginalFileAssociation.objects.all().count(), 0)\n\n\nclass ArrayexpressRedownloadingTestCase(EndToEndTestCase):\n @tag(\"slow\")\n @vcr.use_cassette(\n os.path.join(CASSETTES_DIR, \"surveyor.test_end_to_end.array_express_redownloading.yaml\"),\n ignore_hosts=[\"nomad\"],\n )\n @patch(\"data_refinery_foreman.surveyor.surveyor.ArrayExpressSurveyor\")\n def test_array_express_redownloading(self, mock_surveyor):\n \"\"\"Survey, download, then process an experiment we know is NO_OP.\"\"\"\n\n mock_surveyor.side_effect = build_surveyor_init_mock(\"ARRAY_EXPRESS\")\n # Clear out pre-existing work dirs so there's no conflicts:\n self.env = EnvironmentVarGuard()\n self.env.set(\"RUNING_IN_CLOUD\", \"False\")\n with self.env:\n for work_dir in glob.glob(LOCAL_ROOT_DIR + \"/processor_job_*\"):\n shutil.rmtree(work_dir)\n\n # Prevent a call being made to NCBI's API to determine\n # organism name/id.\n organism = Organism(name=\"HOMO_SAPIENS\", taxonomy_id=9606, is_scientific_name=True)\n organism.save()\n\n NUM_SAMPLES_IN_EXPERIMENT = 12\n accession_code = \"E-GEOD-3303\"\n survey_job = surveyor.survey_experiment(accession_code, \"ARRAY_EXPRESS\")\n\n self.assertTrue(survey_job.success)\n\n # All of this experiment's samples are contained in the\n # same archive, so only one job is needed.\n downloader_jobs = DownloaderJob.objects.all()\n self.assertEqual(downloader_jobs.count(), 1)\n\n logger.info(\"Survey Job finished, waiting for Downloader Jobs to complete.\")\n start_time = timezone.now()\n # We want to try and delete the file as quickly as\n # possible, so pass a short loop time and let the waiting\n # loop spin really fast so we lose as little time as\n # possible.\n downloader_job = wait_for_job(downloader_jobs[0], DownloaderJob, start_time, 0.1)\n self.assertTrue(downloader_job.success)\n\n # Now we're going to delete one of the extracted files but not the other.\n deleted_file = OriginalFile.objects.filter(is_archive=False).first()\n self.assertIsNotNone(deleted_file)\n deleted_file.delete_local_file()\n\n # The one downloader job should have extracted all the files\n # and created as many processor jobs.\n processor_jobs = ProcessorJob.objects.all()\n self.assertEqual(processor_jobs.count(), NUM_SAMPLES_IN_EXPERIMENT)\n\n doomed_processor_job = deleted_file.processor_jobs.all()[0]\n logger.info(\n \"Waiting on processor Nomad job %s to fail because it realized it is missing a file.\",\n doomed_processor_job.nomad_job_id,\n )\n\n start_time = timezone.now()\n doomed_processor_job = wait_for_job(doomed_processor_job, ProcessorJob, start_time)\n self.assertTrue(doomed_processor_job.abort)\n\n # The processor job that had a missing file will have\n # recreated its DownloaderJob, which means there should now be two.\n downloader_jobs = DownloaderJob.objects.all().order_by(\"-id\")\n self.assertEqual(downloader_jobs.count(), 2)\n\n # However DownloaderJobs don't get queued immediately, so\n # we have to run a foreman function to make it happen:\n retry_lost_downloader_jobs()\n\n # And we can check that the most recently created\n # DownloaderJob was successful as well:\n recreated_job = downloader_jobs[0]\n recreated_job.refresh_from_db()\n logger.info(\"Waiting on downloader Nomad job %s\", recreated_job.nomad_job_id)\n recreated_job = wait_for_job(recreated_job, DownloaderJob, start_time)\n self.assertTrue(recreated_job.success)\n\n # Once the Downloader job succeeds, it should create one\n # and only one processor job, after which the total goes back up\n # to NUM_SAMPLES_IN_EXPERIMENT:\n processor_jobs = ProcessorJob.objects.all().exclude(\n abort=True\n ) # exclude aborted processor jobs\n logger.error(processor_jobs)\n self.assertEqual(processor_jobs.count(), NUM_SAMPLES_IN_EXPERIMENT)\n\n # And finally we can make sure that all of the\n # processor jobs were successful, including the one that\n # got recreated.\n logger.info(\"Downloader Jobs finished, waiting for processor Jobs to complete.\")\n for processor_job in processor_jobs:\n processor_job = wait_for_job(processor_job, ProcessorJob, start_time)\n self.assertTrue(processor_job.success)\n\n\nclass GeoArchiveRedownloadingTestCase(EndToEndTestCase):\n @tag(\"slow\")\n @vcr.use_cassette(\n os.path.join(CASSETTES_DIR, \"surveyor.test_end_to_end.geo_archive_redownloading.yaml\"),\n ignore_hosts=[\"nomad\"],\n )\n def test_geo_archive_redownloading(self):\n \"\"\"Survey, download, then process an experiment we know is NO_OP.\n\n All the data for the experiment are in the same archive, which\n is one of ways we expect GEO data to come.\n\n This is another test which uses Aspera so it unfortunately\n cannot be made to run without relying on NCBI's aspera server.\n \"\"\"\n # Clear out pre-existing work dirs so there's no conflicts:\n self.env = EnvironmentVarGuard()\n self.env.set(\"RUNING_IN_CLOUD\", \"False\")\n with self.env:\n for work_dir in glob.glob(LOCAL_ROOT_DIR + \"/processor_job_*\"):\n shutil.rmtree(work_dir)\n\n # Prevent a call being made to NCBI's API to determine\n # organism name/id.\n organism = Organism(name=\"HOMO_SAPIENS\", taxonomy_id=9606, is_scientific_name=True)\n organism.save()\n\n accession_code = \"GSE102571\"\n survey_job = surveyor.survey_experiment(accession_code, \"GEO\")\n\n self.assertTrue(survey_job.success)\n\n # This experiment has multiple samples that are contained in the\n # same archive, so only one job is needed.\n downloader_jobs = DownloaderJob.objects.all()\n self.assertEqual(downloader_jobs.count(), 1)\n\n logger.info(\n \"Survey Job finished, waiting for Downloader Job with Nomad ID %s to complete.\",\n downloader_jobs[0].nomad_job_id,\n )\n\n # We're going to spin as fast as we can so we can delete\n # the file in between when the downloader job finishes and\n # the processor job starts.\n start_time = timezone.now()\n file_deleted = False\n while not file_deleted and timezone.now() - start_time < MAX_WAIT_TIME:\n non_archive_files = OriginalFile.objects.filter(is_archive=False)\n for original_file in non_archive_files:\n if original_file.absolute_file_path and os.path.exists(\n original_file.absolute_file_path\n ):\n os.remove(original_file.absolute_file_path)\n file_deleted = True\n break\n\n downloader_job = wait_for_job(downloader_jobs[0], DownloaderJob, start_time)\n self.assertTrue(downloader_job.success)\n\n try:\n doomed_processor_job = original_file.processor_jobs.all()[0]\n except Exception:\n # The doomed job may be aborted before we can get\n # it. This is fine, we just can't look at it.\n doomed_processor_job = None\n\n if doomed_processor_job:\n logger.info(\n \"Waiting on processor Nomad job %s to fail because it realized it is missing a file.\",\n doomed_processor_job.nomad_job_id,\n )\n\n start_time = timezone.now()\n doomed_processor_job = wait_for_job(doomed_processor_job, ProcessorJob, start_time)\n self.assertTrue(doomed_processor_job.abort)\n\n # The processor job that had a missing file will have\n # recreated its DownloaderJob, which means there should now be two.\n downloader_jobs = DownloaderJob.objects.all().order_by(\"-id\")\n self.assertEqual(downloader_jobs.count(), 2)\n\n # However DownloaderJobs don't get queued immediately, so\n # we have to run a foreman function to make it happen:\n retry_lost_downloader_jobs()\n\n # And we can check that the most recently created\n # DownloaderJob was successful as well:\n recreated_job = downloader_jobs[0]\n recreated_job.refresh_from_db()\n logger.info(\"Waiting on downloader Nomad job %s\", recreated_job.nomad_job_id)\n recreated_job = wait_for_job(recreated_job, DownloaderJob, start_time)\n self.assertTrue(recreated_job.success)\n\n # And finally we can make sure that all of the\n # processor jobs were successful, including the one that\n # got recreated.\n logger.info(\"Downloader Jobs finished, waiting for processor Jobs to complete.\")\n processor_jobs = ProcessorJob.objects.all().exclude(\n abort=True\n ) # exclude aborted processor jobs\n for processor_job in processor_jobs:\n processor_job = wait_for_job(processor_job, ProcessorJob, start_time)\n if not processor_job.success:\n logger.error(processor_job.failure_reason)\n self.assertTrue(processor_job.success)\n\n # Apparently this experiment has a variable number of\n # files because GEO processed experiments sometimes do...\n # However this is okay because there's at least one file\n # per sample, so each sample will get processed at least\n # once and it's the best we can do with the state of GEO.\n # Anyway, all of that is an explanation for why we count\n # how many samples there are rather than just expecting\n # how many we know the experiment has.\n self.assertEqual(processor_jobs.count(), Sample.objects.all().count())\n\n\nclass GeoCelgzRedownloadingTestCase(EndToEndTestCase):\n @tag(\"slow\")\n @tag(\"affymetrix\")\n @vcr.use_cassette(\n os.path.join(CASSETTES_DIR, \"surveyor.test_end_to_end.geo_celgz_redownloading.yaml\"),\n ignore_hosts=[\"nomad\"],\n )\n def test_geo_celgz_redownloading(self):\n \"\"\"Survey, download, then process an experiment we know is Affymetrix.\n\n Each of the experiment's samples are in their own .cel.gz\n file, which is another way we expect GEO data to come.\n\n This is another test which uses Aspera so it unfortunately\n cannot be made to run without relying on NCBI's aspera server.\n \"\"\"\n self.env = EnvironmentVarGuard()\n self.env.set(\"RUNING_IN_CLOUD\", \"False\")\n with self.env:\n # Clear out pre-existing work dirs so there's no conflicts:\n for work_dir in glob.glob(LOCAL_ROOT_DIR + \"/processor_job_*\"):\n shutil.rmtree(work_dir)\n\n # Prevent a call being made to NCBI's API to determine\n # organism name/id.\n organism = Organism(name=\"MUS_MUSCULUS\", taxonomy_id=10090, is_scientific_name=True)\n organism.save()\n\n accession_code = \"GSE100388\"\n survey_job = surveyor.survey_experiment(accession_code, \"GEO\")\n\n SAMPLES_IN_EXPERIMENT = 15\n\n self.assertTrue(survey_job.success)\n\n # This experiment's samples each have their own file so\n # they each get their own downloader job.\n downloader_jobs = DownloaderJob.objects.all()\n self.assertEqual(downloader_jobs.count(), SAMPLES_IN_EXPERIMENT)\n\n logger.info(\"Survey Job finished, waiting for Downloader Jobs to complete.\")\n\n # We're going to spin as fast as we can so we can delete\n # the file in between when the downloader jobs finishes and\n # the processor job starts.\n start_time = timezone.now()\n file_deleted = False\n while not file_deleted and timezone.now() - start_time < MAX_WAIT_TIME:\n non_archive_files = OriginalFile.objects.filter(is_archive=False)\n for original_file in non_archive_files:\n if original_file.absolute_file_path and os.path.exists(\n original_file.absolute_file_path\n ):\n os.remove(original_file.absolute_file_path)\n file_deleted = True\n break\n\n # Wait for each of the DownloaderJobs to finish\n for downloader_job in downloader_jobs:\n downloader_job = wait_for_job(downloader_job, DownloaderJob, start_time)\n self.assertTrue(downloader_job.success)\n\n try:\n doomed_processor_job = original_file.processor_jobs.all()[0]\n except Exception:\n # The doomed job may be aborted before we can get\n # it. This is fine, we just can't look at it.\n doomed_processor_job = None\n\n if doomed_processor_job:\n logger.info(\n \"Waiting on processor Nomad job %s to fail because it realized it is missing a file.\",\n doomed_processor_job.nomad_job_id,\n )\n\n start_time = timezone.now()\n doomed_processor_job = wait_for_job(doomed_processor_job, ProcessorJob, start_time)\n self.assertTrue(doomed_processor_job.abort)\n\n # The processor job that had a missing file will have\n # recreated its DownloaderJob, which means there should\n # now be SAMPLES_IN_EXPERIMENT + 1 downloader jobs.\n downloader_jobs = DownloaderJob.objects.all().order_by(\"-id\")\n self.assertEqual(downloader_jobs.count(), SAMPLES_IN_EXPERIMENT + 1)\n\n # However DownloaderJobs don't get queued immediately, so\n # we have to run a foreman function to make it happen:\n retry_lost_downloader_jobs()\n\n # And we can check that the most recently created\n # DownloaderJob was successful as well:\n recreated_job = downloader_jobs[0]\n recreated_job.refresh_from_db()\n logger.info(\"Waiting on downloader Nomad job %s\", recreated_job.nomad_job_id)\n recreated_job = wait_for_job(recreated_job, DownloaderJob, start_time)\n self.assertTrue(recreated_job.success)\n\n # And finally we can make sure that all of the processor\n # jobs were successful, including the one that got\n # recreated. The processor job that recreated that job has\n # abort=True\n logger.info(\"Downloader Jobs finished, waiting for processor Jobs to complete.\")\n processor_jobs = ProcessorJob.objects.all().exclude(abort=True) # exclude aborted jobs\n for processor_job in processor_jobs:\n processor_job = wait_for_job(processor_job, ProcessorJob, start_time)\n self.assertTrue(processor_job.success)\n\n self.assertEqual(processor_jobs.count(), SAMPLES_IN_EXPERIMENT)\n\n\nclass TranscriptomeRedownloadingTestCase(EndToEndTestCase):\n @tag(\"slow\")\n @tag(\"transcriptome\")\n @vcr.use_cassette(\n os.path.join(CASSETTES_DIR, \"surveyor.test_end_to_end.transcriptome_redownloading.yaml\"),\n ignore_hosts=[\"nomad\"],\n )\n @patch(\"data_refinery_foreman.surveyor.surveyor.TranscriptomeIndexSurveyor\")\n def test_transcriptome_redownloading(self, mock_surveyor):\n \"\"\"Survey, download, then process a transcriptome index. \"\"\"\n\n mock_surveyor.side_effect = build_surveyor_init_mock(\"TRANSCRIPTOME_INDEX\")\n\n # Clear out pre-existing work dirs so there's no conflicts:\n self.env = EnvironmentVarGuard()\n self.env.set(\"RUNING_IN_CLOUD\", \"False\")\n with self.env:\n # I'm not sure why, but sometimes there are already downloader jobs\n # in the database from previous tests even though they should be\n # removed, so pause a bit\n time.sleep(10)\n downloader_jobs = DownloaderJob.objects.all()\n for job in downloader_jobs:\n print(job)\n print(job.accession_code)\n self.assertEqual(downloader_jobs.count(), 0)\n\n for length in [\"LONG\", \"SHORT\"]:\n work_dir_glob = (\n LOCAL_ROOT_DIR + \"/Caenorhabditis_elegans/\" + length + \"/processor_job_*\"\n )\n for work_dir in glob.glob(work_dir_glob):\n shutil.rmtree(work_dir)\n\n # Prevent a call being made to NCBI's API to determine\n # organism name/id.\n organism = Organism(\n name=\"CAENORHABDITIS_ELEGANS\", taxonomy_id=6239, is_scientific_name=True\n )\n organism.save()\n\n # Make sure that we can delete the file before the processors begin\n # by preventing the downloaders from sending the processors\n # automatically. We send the jobs manually later\n no_dispatch = EnvironmentVarGuard()\n no_dispatch.set(\"AUTO_DISPATCH_NOMAD_JOBS\", \"False\")\n with no_dispatch:\n survey_job = surveyor.survey_transcriptome_index(\n \"Caenorhabditis elegans\", \"Ensembl\"\n )\n\n self.assertTrue(survey_job.success)\n\n downloader_jobs = DownloaderJob.objects.all()\n self.assertEqual(downloader_jobs.count(), 1)\n\n logger.info(\n \"Survey Job finished, waiting for Downloader Job with Nomad ID %s to complete.\",\n downloader_jobs[0].nomad_job_id,\n )\n\n downloader_job = wait_for_job(downloader_jobs[0], DownloaderJob, timezone.now())\n self.assertTrue(downloader_job.success)\n\n og_file_to_delete = OriginalFile.objects.all()[0]\n os.remove(og_file_to_delete.absolute_file_path)\n\n processor_jobs = ProcessorJob.objects.all()\n for processor_job in processor_jobs:\n # FIXME: we run these in serial because of\n # https://github.com/AlexsLemonade/refinebio/issues/2321\n send_job(\n ProcessorPipeline[processor_job.pipeline_applied],\n job=processor_job,\n is_dispatch=True,\n )\n try:\n wait_for_job(processor_job, ProcessorJob, timezone.now())\n except Exception:\n pass\n\n # The processor job that had a missing file will have\n # recreated its DownloaderJob, which means there should now be two.\n downloader_jobs = DownloaderJob.objects.all().order_by(\"-id\")\n self.assertEqual(downloader_jobs.count(), 2)\n\n # However DownloaderJobs don't get queued immediately, so\n # we have to run a foreman function to make it happen:\n retry_lost_downloader_jobs()\n\n # And we can check that the most recently created\n # DownloaderJob was successful as well:\n recreated_job = downloader_jobs[0]\n recreated_job.refresh_from_db()\n logger.info(\"Waiting on downloader Nomad job %s\", recreated_job.nomad_job_id)\n recreated_job = wait_for_job(recreated_job, DownloaderJob, timezone.now())\n self.assertTrue(recreated_job.success)\n\n # Once the Downloader job succeeds, it should create two\n # processor jobs, one for long and one for short indices.:\n processor_jobs = ProcessorJob.objects.all()\n self.assertEqual(processor_jobs.count(), 4)\n\n # Wait for the processor jobs to be dispatched\n time.sleep(15)\n\n # And finally we can make sure that both of the\n # processor jobs were successful, including the one that\n # got recreated.\n logger.info(\"Downloader Jobs finished, waiting for processor Jobs to complete.\")\n successful_processor_jobs = []\n for processor_job in processor_jobs:\n processor_job.refresh_from_db()\n # One of the calls to wait_for_job will fail if the\n # job aborts before it we selected all the\n # processor jobs.\n processor_job = wait_for_job(processor_job, ProcessorJob, timezone.now())\n if processor_job.success:\n successful_processor_jobs.append(processor_job)\n\n # While one of the original ProcessorJobs will be aborted\n # it is hard to be sure of what will happen\n # to the other because of the racing that happens between\n # processor jobs getting started and us deleting the files\n # they need.\n # Therefore, we're just going to verify that one processor\n # job completed successfully for each length, since that\n # is the main thing we need.\n has_long = False\n has_short = False\n for processor_job in successful_processor_jobs:\n if processor_job.pipeline_applied == \"TRANSCRIPTOME_INDEX_LONG\":\n has_long = True\n elif processor_job.pipeline_applied == \"TRANSCRIPTOME_INDEX_SHORT\":\n has_short = True\n\n self.assertTrue(has_long)\n self.assertTrue(has_short)\n\n\nclass SraRedownloadingTestCase(EndToEndTestCase):\n @tag(\"slow\")\n @tag(\"salmon\")\n @skip(\"This test is timing out I think.\")\n def test_sra_redownloading(self):\n \"\"\"Survey, download, then process an experiment we know is SRA.\"\"\"\n # Clear out pre-existing work dirs so there's no conflicts:\n self.env = EnvironmentVarGuard()\n self.env.set(\"RUNING_IN_CLOUD\", \"False\")\n with self.env:\n for work_dir in glob.glob(LOCAL_ROOT_DIR + \"/processor_job_*\"):\n shutil.rmtree(work_dir)\n\n # prevent a call being made to NCBI's API to determine\n # organism name/id.\n organism = Organism(name=\"HOMO_SAPIENS\", taxonomy_id=9606, is_scientific_name=True)\n organism.save()\n\n survey_job = surveyor.survey_experiment(\"SRP040623\", \"SRA\")\n\n self.assertTrue(survey_job.success)\n\n # This experiment has 4 samples that each need a downloader job.\n downloader_jobs = DownloaderJob.objects.all()\n self.assertEqual(downloader_jobs.count(), 4)\n\n # We want one ProcessorJob to fail because it doesn't have\n # the file it was expecting, so we need to wait until one\n # DownloaderJob finishes, delete a file that is\n # downloaded, and then not delete any more.\n logger.info(\"Survey Job finished, waiting for Downloader Jobs to complete.\")\n start_time = timezone.now()\n file_deleted = False\n for downloader_job in downloader_jobs:\n # We want to try and delete the file as quickly as\n # possible, so pass a short loop time and let the waiting\n # loop spin really fast so we lose as little time as\n # possible.\n downloader_job = wait_for_job(downloader_job, DownloaderJob, start_time, 0.1)\n self.assertTrue(downloader_job.success)\n if not file_deleted:\n for original_file in OriginalFile.objects.filter(is_downloaded=True):\n if not original_file.is_archive:\n original_file.delete_local_file()\n file_deleted = True\n\n # And then to make sure that we can handle\n # cases where the downloader job is missing:\n downloader_job.delete()\n break\n\n # There's a chance that the processor job with a missing\n # file is aborted before the last downloader job\n # completes, therefore just check that there's at least 3\n # processor jobs.\n processor_jobs = ProcessorJob.objects.all()\n self.assertGreater(processor_jobs.count(), 2)\n\n doomed_processor_job = original_file.processor_jobs.all()[0]\n logger.info(\n \"Waiting on processor Nomad job %s to fail because it realized it is missing a file.\",\n doomed_processor_job.nomad_job_id,\n )\n\n start_time = timezone.now()\n wait_for_job(doomed_processor_job, ProcessorJob, start_time)\n\n # The processor job that had a missing file will have\n # recreated its DownloaderJob, which means there should\n # now be 5, but we also deleted on on purpose so there's 4.\n downloader_jobs = DownloaderJob.objects.all().order_by(\"-id\")\n self.assertEqual(downloader_jobs.count(), 4)\n\n # However DownloaderJobs don't get queued immediately, so\n # we have to run a foreman function to make it happen:\n retry_lost_downloader_jobs()\n\n # And we can check that the most recently created\n # DownloaderJob was successful as well:\n recreated_job = downloader_jobs[0]\n recreated_job.refresh_from_db()\n logger.info(\"Waiting on downloader Nomad job %s\", recreated_job.nomad_job_id)\n recreated_job = wait_for_job(recreated_job, DownloaderJob, start_time)\n self.assertTrue(recreated_job.success)\n\n # Once the Downloader job succeeds, it should create one\n # and only one processor job, which the total goes back up to 4:\n self.assertEqual(ProcessorJob.objects.all().count(), 4)\n\n # And finally we can make sure that all of the processor\n # jobs got started correctly, including the one that got\n # recreated. However in order to save time when running\n # tests, we don't actually want to run the full salmon\n # processor. Therefore we don't have the transcriptome\n # index that is needed for this organism so the jobs will\n # fail, but that failure happens past the point that we're\n # testing.\n # So we're gonna check for the correct failure_reason.\n logger.info(\"Downloader Jobs finished, waiting for processor Jobs to complete.\")\n good_failure_reason = \"Missing transcriptome index.\"\n successful_processor_jobs = []\n for processor_job in processor_jobs:\n # One of the two calls to wait_for_job will fail\n # because the job is going to abort when it\n # finds that the file it wants to process is missing.\n try:\n processor_job = wait_for_job(processor_job, ProcessorJob, start_time)\n if not processor_job.success and processor_job.failure_reason.startswith(\n good_failure_reason\n ):\n successful_processor_jobs.append(processor_job)\n except Exception:\n pass\n\n self.assertEqual(len(successful_processor_jobs), 4)\n\n\nclass EnaFallbackTestCase(EndToEndTestCase):\n @tag(\"slow\")\n @tag(\"salmon\")\n @vcr.use_cassette(\n os.path.join(CASSETTES_DIR, \"surveyor.test_end_to_end.unmated_reads.yaml\"),\n ignore_hosts=[\"nomad\"],\n )\n def test_unmated_reads(self):\n \"\"\"Survey, download, then process a sample we know is SRA and has unmated reads.\n\n This test uses VCR to remove the dependence upon NCBI's\n servers, but the downloader job hits ENA's FTP and aspera\n servers. Unfortunately there's not much that can be done to\n avoid that behavior from here because the downloader jobs\n always check ENA's FTP server to see if the file has an\n unmated read. For now we'll just have to be content with the\n fact that NCBI going down won't affect this test.\n \"\"\"\n # Clear out pre-existing work dirs so there's no conflicts:\n self.env = EnvironmentVarGuard()\n self.env.set(\"RUNING_IN_CLOUD\", \"False\")\n with self.env:\n for work_dir in glob.glob(LOCAL_ROOT_DIR + \"/processor_job_*\"):\n shutil.rmtree(work_dir)\n\n # prevent a call being made to NCBI's API to determine\n # organism name/id.\n organism = Organism(name=\"HOMO_SAPIENS\", taxonomy_id=9606, is_scientific_name=True)\n organism.save()\n\n # Survey just a single run to make things faster!\n # This sample has unmated reads!\n survey_job = surveyor.survey_experiment(\"SRR1603661\", \"SRA\")\n\n self.assertTrue(survey_job.success)\n\n # Let's give the downloader a little bit to get started\n # and to update the OriginalFiles' source_urls.\n time.sleep(60)\n\n downloader_jobs = DownloaderJob.objects.all()\n self.assertEqual(downloader_jobs.count(), 1)\n downloader_job = downloader_jobs.first()\n\n self.assertIsNotNone(downloader_job.start_time)\n\n for original_file in downloader_job.original_files.all():\n self.assertTrue(\".fastq.gz\" in original_file.source_url)\n\n # The downloader job will take a while to complete. Let's not wait.\n print(downloader_job.kill_nomad_job())\n\n\n# This test uses the special tag \"manual\" because it should only be run from the \"test_survey.sh\" script\nclass SurveyTestCase(EndToEndTestCase):\n @tag(\"manual\")\n def test_survey(self):\n \"\"\"Survey the given sample\"\"\"\n\n # Clear out pre-existing work dirs so there's no conflicts:\n self.env = EnvironmentVarGuard()\n self.env.set(\"RUNING_IN_CLOUD\", \"False\")\n with self.env:\n for work_dir in glob.glob(LOCAL_ROOT_DIR + \"/processor_job_*\"):\n shutil.rmtree(work_dir)\n\n survey_job = surveyor.survey_experiment(\n get_env_variable(\"ACCESSION\"), get_env_variable(\"SURVEYOR\")\n )\n\n self.assertTrue(survey_job.success)\n\n downloader_jobs = DownloaderJob.objects.all()\n self.assertGreater(downloader_jobs.count(), 0)\n\n logger.info(\"Survey Job finished, waiting for Downloader Jobs to complete.\")\n start_time = timezone.now()\n for downloader_job in downloader_jobs:\n downloader_job = wait_for_job(downloader_job, DownloaderJob, start_time)\n self.assertTrue(downloader_job.success)\n\n processor_jobs = ProcessorJob.objects.all().exclude(\n abort=True\n ) # exclude aborted processor jobs\n self.assertGreater(processor_jobs.count(), 0)\n\n logger.info(\"Downloader Jobs finished, waiting for processor Jobs to complete.\")\n start_time = timezone.now()\n for processor_job in processor_jobs:\n processor_job = wait_for_job(processor_job, ProcessorJob, start_time)\n if not processor_job.success:\n logger.error(processor_job.failure_reason)\n self.assertTrue(processor_job.success)\n", "sub_path": "foreman/data_refinery_foreman/surveyor/test_end_to_end.py", "file_name": "test_end_to_end.py", "file_ext": "py", "file_size_in_byte": 43545, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "logging.basicConfig", "line_number": 42, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 42, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 43, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 44, "usage_type": "call"}, {"api_name": "logging.WARN", "line_number": 44, "usage_type": "attribute"}, {"api_name": "data_refinery_common.utils.get_env_variable", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 48, "usage_type": "call"}, {"api_name": "data_refinery_foreman.surveyor.surveyor.ArrayExpressSurveyor", "line_number": 59, "usage_type": "attribute"}, {"api_name": "data_refinery_foreman.surveyor.surveyor", "line_number": 59, "usage_type": "name"}, {"api_name": "data_refinery_foreman.surveyor.surveyor.SraSurveyor", "line_number": 60, "usage_type": "attribute"}, {"api_name": "data_refinery_foreman.surveyor.surveyor", "line_number": 60, "usage_type": "name"}, {"api_name": "data_refinery_foreman.surveyor.surveyor.TranscriptomeIndexSurveyor", "line_number": 61, "usage_type": "attribute"}, {"api_name": "data_refinery_foreman.surveyor.surveyor", "line_number": 61, "usage_type": "name"}, {"api_name": "data_refinery_foreman.surveyor.surveyor.GeoSurveyor", "line_number": 62, "usage_type": "attribute"}, {"api_name": "data_refinery_foreman.surveyor.surveyor", "line_number": 62, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 137, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 140, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 140, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 141, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 141, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 142, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 145, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 145, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 145, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 147, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 147, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 151, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 151, "usage_type": "name"}, {"api_name": "django.test.TransactionTestCase", "line_number": 163, "usage_type": "name"}, {"api_name": "data_refinery_common.models.Experiment.objects.all", "line_number": 165, "usage_type": "call"}, {"api_name": "data_refinery_common.models.Experiment.objects", "line_number": 165, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.Experiment", "line_number": 165, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ExperimentAnnotation.objects.all", "line_number": 166, "usage_type": "call"}, {"api_name": "data_refinery_common.models.ExperimentAnnotation.objects", "line_number": 166, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.ExperimentAnnotation", "line_number": 166, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ExperimentSampleAssociation.objects.all", "line_number": 167, "usage_type": "call"}, {"api_name": "data_refinery_common.models.ExperimentSampleAssociation.objects", "line_number": 167, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.ExperimentSampleAssociation", "line_number": 167, "usage_type": "name"}, {"api_name": "data_refinery_common.models.Sample.objects.all", "line_number": 168, "usage_type": "call"}, {"api_name": "data_refinery_common.models.Sample.objects", "line_number": 168, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.Sample", "line_number": 168, "usage_type": "name"}, {"api_name": "data_refinery_common.models.SampleAnnotation.objects.all", "line_number": 169, "usage_type": "call"}, {"api_name": "data_refinery_common.models.SampleAnnotation.objects", "line_number": 169, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.SampleAnnotation", "line_number": 169, "usage_type": "name"}, {"api_name": "data_refinery_common.models.OriginalFile.objects.all", "line_number": 170, "usage_type": "call"}, {"api_name": "data_refinery_common.models.OriginalFile.objects", "line_number": 170, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.OriginalFile", "line_number": 170, "usage_type": "name"}, {"api_name": "data_refinery_common.models.OriginalFileSampleAssociation.objects.all", "line_number": 171, "usage_type": "call"}, {"api_name": "data_refinery_common.models.OriginalFileSampleAssociation.objects", "line_number": 171, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.OriginalFileSampleAssociation", "line_number": 171, "usage_type": "name"}, {"api_name": "data_refinery_common.models.SampleResultAssociation.objects.all", "line_number": 172, "usage_type": "call"}, {"api_name": "data_refinery_common.models.SampleResultAssociation.objects", "line_number": 172, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.SampleResultAssociation", "line_number": 172, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ComputationalResult.objects.all", "line_number": 173, "usage_type": "call"}, {"api_name": "data_refinery_common.models.ComputationalResult.objects", "line_number": 173, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.ComputationalResult", "line_number": 173, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ComputationalResultAnnotation.objects.all", "line_number": 174, "usage_type": "call"}, {"api_name": "data_refinery_common.models.ComputationalResultAnnotation.objects", "line_number": 174, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.ComputationalResultAnnotation", "line_number": 174, "usage_type": "name"}, {"api_name": "data_refinery_common.models.SampleComputedFileAssociation.objects.all", "line_number": 175, "usage_type": "call"}, {"api_name": "data_refinery_common.models.SampleComputedFileAssociation.objects", "line_number": 175, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.SampleComputedFileAssociation", "line_number": 175, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ComputedFile.objects.all", "line_number": 176, "usage_type": "call"}, {"api_name": "data_refinery_common.models.ComputedFile.objects", "line_number": 176, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.ComputedFile", "line_number": 176, "usage_type": "name"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects.all", "line_number": 177, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects", "line_number": 177, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 177, "usage_type": "name"}, {"api_name": "data_refinery_common.models.DownloaderJobOriginalFileAssociation.objects.all", "line_number": 178, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJobOriginalFileAssociation.objects", "line_number": 178, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.DownloaderJobOriginalFileAssociation", "line_number": 178, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ProcessorJob.objects.all", "line_number": 179, "usage_type": "call"}, {"api_name": "data_refinery_common.models.ProcessorJob.objects", "line_number": 179, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.ProcessorJob", "line_number": 179, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ProcessorJobOriginalFileAssociation.objects.all", "line_number": 180, "usage_type": "call"}, {"api_name": "data_refinery_common.models.ProcessorJobOriginalFileAssociation.objects", "line_number": 180, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.ProcessorJobOriginalFileAssociation", "line_number": 180, "usage_type": "name"}, {"api_name": "test.support.EnvironmentVarGuard", "line_number": 195, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 198, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 199, "usage_type": "call"}, {"api_name": "data_refinery_common.models.Organism", "line_number": 203, "usage_type": "call"}, {"api_name": "data_refinery_foreman.surveyor.surveyor.survey_experiment", "line_number": 207, "usage_type": "call"}, {"api_name": "data_refinery_foreman.surveyor.surveyor", "line_number": 207, "usage_type": "name"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects.all", "line_number": 211, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects", "line_number": 211, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 211, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 215, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 215, "usage_type": "name"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 217, "usage_type": "argument"}, {"api_name": "data_refinery_common.models.ProcessorJob.objects.all", "line_number": 220, "usage_type": "call"}, {"api_name": "data_refinery_common.models.ProcessorJob.objects", "line_number": 220, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.ProcessorJob", "line_number": 220, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 226, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 226, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ProcessorJob", "line_number": 228, "usage_type": "argument"}, {"api_name": "data_refinery_foreman.surveyor.management.commands.unsurvey.purge_experiment", "line_number": 234, "usage_type": "call"}, {"api_name": "data_refinery_common.models.Experiment.objects.all", "line_number": 236, "usage_type": "call"}, {"api_name": "data_refinery_common.models.Experiment.objects", "line_number": 236, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.Experiment", "line_number": 236, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ExperimentAnnotation.objects.all", "line_number": 237, "usage_type": "call"}, {"api_name": "data_refinery_common.models.ExperimentAnnotation.objects", "line_number": 237, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.ExperimentAnnotation", "line_number": 237, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ExperimentSampleAssociation.objects.all", "line_number": 238, "usage_type": "call"}, {"api_name": "data_refinery_common.models.ExperimentSampleAssociation.objects", "line_number": 238, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.ExperimentSampleAssociation", "line_number": 238, "usage_type": "name"}, {"api_name": "data_refinery_common.models.Sample.objects.all", "line_number": 239, "usage_type": "call"}, {"api_name": "data_refinery_common.models.Sample.objects", "line_number": 239, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.Sample", "line_number": 239, "usage_type": "name"}, {"api_name": "data_refinery_common.models.SampleAnnotation.objects.all", "line_number": 240, "usage_type": "call"}, {"api_name": "data_refinery_common.models.SampleAnnotation.objects", "line_number": 240, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.SampleAnnotation", "line_number": 240, "usage_type": "name"}, {"api_name": "data_refinery_common.models.OriginalFile.objects.all", "line_number": 241, "usage_type": "call"}, {"api_name": "data_refinery_common.models.OriginalFile.objects", "line_number": 241, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.OriginalFile", "line_number": 241, "usage_type": "name"}, {"api_name": "data_refinery_common.models.OriginalFileSampleAssociation.objects.all", "line_number": 242, "usage_type": "call"}, {"api_name": "data_refinery_common.models.OriginalFileSampleAssociation.objects", "line_number": 242, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.OriginalFileSampleAssociation", "line_number": 242, "usage_type": "name"}, {"api_name": "data_refinery_common.models.SampleResultAssociation.objects.all", "line_number": 243, "usage_type": "call"}, {"api_name": "data_refinery_common.models.SampleResultAssociation.objects", "line_number": 243, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.SampleResultAssociation", "line_number": 243, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ComputationalResult.objects.all", "line_number": 244, "usage_type": "call"}, {"api_name": "data_refinery_common.models.ComputationalResult.objects", "line_number": 244, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.ComputationalResult", "line_number": 244, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ComputationalResultAnnotation.objects.all", "line_number": 245, "usage_type": "call"}, {"api_name": "data_refinery_common.models.ComputationalResultAnnotation.objects", "line_number": 245, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.ComputationalResultAnnotation", "line_number": 245, "usage_type": "name"}, {"api_name": "data_refinery_common.models.SampleComputedFileAssociation.objects.all", "line_number": 246, "usage_type": "call"}, {"api_name": "data_refinery_common.models.SampleComputedFileAssociation.objects", "line_number": 246, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.SampleComputedFileAssociation", "line_number": 246, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ComputedFile.objects.all", "line_number": 247, "usage_type": "call"}, {"api_name": "data_refinery_common.models.ComputedFile.objects", "line_number": 247, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.ComputedFile", "line_number": 247, "usage_type": "name"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects.all", "line_number": 248, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects", "line_number": 248, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 248, "usage_type": "name"}, {"api_name": "data_refinery_common.models.DownloaderJobOriginalFileAssociation.objects.all", "line_number": 249, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJobOriginalFileAssociation.objects", "line_number": 249, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.DownloaderJobOriginalFileAssociation", "line_number": 249, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ProcessorJob.objects.all", "line_number": 250, "usage_type": "call"}, {"api_name": "data_refinery_common.models.ProcessorJob.objects", "line_number": 250, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.ProcessorJob", "line_number": 250, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ProcessorJobOriginalFileAssociation.objects.all", "line_number": 251, "usage_type": "call"}, {"api_name": "data_refinery_common.models.ProcessorJobOriginalFileAssociation.objects", "line_number": 251, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.ProcessorJobOriginalFileAssociation", "line_number": 251, "usage_type": "name"}, {"api_name": "django.test.tag", "line_number": 184, "usage_type": "call"}, {"api_name": "vcr.use_cassette", "line_number": 185, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 186, "usage_type": "call"}, {"api_name": "os.path", "line_number": 186, "usage_type": "attribute"}, {"api_name": "unittest.mock.patch", "line_number": 188, "usage_type": "call"}, {"api_name": "test.support.EnvironmentVarGuard", "line_number": 266, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 269, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 270, "usage_type": "call"}, {"api_name": "data_refinery_common.models.Organism", "line_number": 274, "usage_type": "call"}, {"api_name": "data_refinery_foreman.surveyor.surveyor.survey_experiment", "line_number": 279, "usage_type": "call"}, {"api_name": "data_refinery_foreman.surveyor.surveyor", "line_number": 279, "usage_type": "name"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects.all", "line_number": 285, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects", "line_number": 285, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 285, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 289, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 289, "usage_type": "name"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 294, "usage_type": "argument"}, {"api_name": "data_refinery_common.models.OriginalFile.objects.filter", "line_number": 298, "usage_type": "call"}, {"api_name": "data_refinery_common.models.OriginalFile.objects", "line_number": 298, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.OriginalFile", "line_number": 298, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ProcessorJob.objects.all", "line_number": 304, "usage_type": "call"}, {"api_name": "data_refinery_common.models.ProcessorJob.objects", "line_number": 304, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.ProcessorJob", "line_number": 304, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 313, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 313, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ProcessorJob", "line_number": 314, "usage_type": "argument"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects.all", "line_number": 319, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects", "line_number": 319, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 319, "usage_type": "name"}, {"api_name": "data_refinery_foreman.foreman.main.retry_lost_downloader_jobs", "line_number": 324, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 331, "usage_type": "argument"}, {"api_name": "data_refinery_common.models.ProcessorJob.objects.all", "line_number": 337, "usage_type": "call"}, {"api_name": "data_refinery_common.models.ProcessorJob.objects", "line_number": 337, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.ProcessorJob", "line_number": 337, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ProcessorJob", "line_number": 348, "usage_type": "argument"}, {"api_name": "django.test.tag", "line_number": 255, "usage_type": "call"}, {"api_name": "vcr.use_cassette", "line_number": 256, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 257, "usage_type": "call"}, {"api_name": "os.path", "line_number": 257, "usage_type": "attribute"}, {"api_name": "unittest.mock.patch", "line_number": 260, "usage_type": "call"}, {"api_name": "test.support.EnvironmentVarGuard", "line_number": 368, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 371, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 372, "usage_type": "call"}, {"api_name": "data_refinery_common.models.Organism", "line_number": 376, "usage_type": "call"}, {"api_name": "data_refinery_foreman.surveyor.surveyor.survey_experiment", "line_number": 380, "usage_type": "call"}, {"api_name": "data_refinery_foreman.surveyor.surveyor", "line_number": 380, "usage_type": "name"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects.all", "line_number": 386, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects", "line_number": 386, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 386, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 397, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 397, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 399, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 399, "usage_type": "name"}, {"api_name": "data_refinery_common.models.OriginalFile.objects.filter", "line_number": 400, "usage_type": "call"}, {"api_name": "data_refinery_common.models.OriginalFile.objects", "line_number": 400, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.OriginalFile", "line_number": 400, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 402, "usage_type": "call"}, {"api_name": "os.path", "line_number": 402, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 405, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 409, "usage_type": "argument"}, {"api_name": "django.utils.timezone.now", "line_number": 425, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 425, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ProcessorJob", "line_number": 426, "usage_type": "argument"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects.all", "line_number": 431, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects", "line_number": 431, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 431, "usage_type": "name"}, {"api_name": "data_refinery_foreman.foreman.main.retry_lost_downloader_jobs", "line_number": 436, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 443, "usage_type": "argument"}, {"api_name": "data_refinery_common.models.ProcessorJob.objects.all", "line_number": 450, "usage_type": "call"}, {"api_name": "data_refinery_common.models.ProcessorJob.objects", "line_number": 450, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.ProcessorJob", "line_number": 450, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ProcessorJob", "line_number": 454, "usage_type": "argument"}, {"api_name": "data_refinery_common.models.Sample.objects.all", "line_number": 467, "usage_type": "call"}, {"api_name": "data_refinery_common.models.Sample.objects", "line_number": 467, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.Sample", "line_number": 467, "usage_type": "name"}, {"api_name": "django.test.tag", "line_number": 353, "usage_type": "call"}, {"api_name": "vcr.use_cassette", "line_number": 354, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 355, "usage_type": "call"}, {"api_name": "os.path", "line_number": 355, "usage_type": "attribute"}, {"api_name": "test.support.EnvironmentVarGuard", "line_number": 486, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 490, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 491, "usage_type": "call"}, {"api_name": "data_refinery_common.models.Organism", "line_number": 495, "usage_type": "call"}, {"api_name": "data_refinery_foreman.surveyor.surveyor.survey_experiment", "line_number": 499, "usage_type": "call"}, {"api_name": "data_refinery_foreman.surveyor.surveyor", "line_number": 499, "usage_type": "name"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects.all", "line_number": 507, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects", "line_number": 507, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 507, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 515, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 515, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 517, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 517, "usage_type": "name"}, {"api_name": "data_refinery_common.models.OriginalFile.objects.filter", "line_number": 518, "usage_type": "call"}, {"api_name": "data_refinery_common.models.OriginalFile.objects", "line_number": 518, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.OriginalFile", "line_number": 518, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 520, "usage_type": "call"}, {"api_name": "os.path", "line_number": 520, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 523, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 529, "usage_type": "argument"}, {"api_name": "django.utils.timezone.now", "line_number": 545, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 545, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ProcessorJob", "line_number": 546, "usage_type": "argument"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects.all", "line_number": 552, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects", "line_number": 552, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 552, "usage_type": "name"}, {"api_name": "data_refinery_foreman.foreman.main.retry_lost_downloader_jobs", "line_number": 557, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 564, "usage_type": "argument"}, {"api_name": "data_refinery_common.models.ProcessorJob.objects.all", "line_number": 572, "usage_type": "call"}, {"api_name": "data_refinery_common.models.ProcessorJob.objects", "line_number": 572, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.ProcessorJob", "line_number": 572, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ProcessorJob", "line_number": 574, "usage_type": "argument"}, {"api_name": "django.test.tag", "line_number": 471, "usage_type": "call"}, {"api_name": "django.test.tag", "line_number": 472, "usage_type": "call"}, {"api_name": "vcr.use_cassette", "line_number": 473, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 474, "usage_type": "call"}, {"api_name": "os.path", "line_number": 474, "usage_type": "attribute"}, {"api_name": "test.support.EnvironmentVarGuard", "line_number": 594, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 600, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects.all", "line_number": 601, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects", "line_number": 601, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 601, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 611, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 612, "usage_type": "call"}, {"api_name": "data_refinery_common.models.Organism", "line_number": 616, "usage_type": "call"}, {"api_name": "test.support.EnvironmentVarGuard", "line_number": 624, "usage_type": "call"}, {"api_name": "data_refinery_foreman.surveyor.surveyor.survey_transcriptome_index", "line_number": 627, "usage_type": "call"}, {"api_name": "data_refinery_foreman.surveyor.surveyor", "line_number": 627, "usage_type": "name"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects.all", "line_number": 633, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects", "line_number": 633, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 633, "usage_type": "name"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 641, "usage_type": "argument"}, {"api_name": "django.utils.timezone.now", "line_number": 641, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 641, "usage_type": "name"}, {"api_name": "data_refinery_common.models.OriginalFile.objects.all", "line_number": 644, "usage_type": "call"}, {"api_name": "data_refinery_common.models.OriginalFile.objects", "line_number": 644, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.OriginalFile", "line_number": 644, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 645, "usage_type": "call"}, {"api_name": "data_refinery_common.models.ProcessorJob.objects.all", "line_number": 647, "usage_type": "call"}, {"api_name": "data_refinery_common.models.ProcessorJob.objects", "line_number": 647, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.ProcessorJob", "line_number": 647, "usage_type": "name"}, {"api_name": "data_refinery_common.message_queue.send_job", "line_number": 651, "usage_type": "call"}, {"api_name": "data_refinery_common.job_lookup.ProcessorPipeline", "line_number": 652, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ProcessorJob", "line_number": 657, "usage_type": "argument"}, {"api_name": "django.utils.timezone.now", "line_number": 657, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 657, "usage_type": "name"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects.all", "line_number": 663, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects", "line_number": 663, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 663, "usage_type": "name"}, {"api_name": "data_refinery_foreman.foreman.main.retry_lost_downloader_jobs", "line_number": 668, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 675, "usage_type": "argument"}, {"api_name": "django.utils.timezone.now", "line_number": 675, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 675, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ProcessorJob.objects.all", "line_number": 680, "usage_type": "call"}, {"api_name": "data_refinery_common.models.ProcessorJob.objects", "line_number": 680, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.ProcessorJob", "line_number": 680, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 684, "usage_type": "call"}, {"api_name": "data_refinery_common.models.ProcessorJob", "line_number": 696, "usage_type": "argument"}, {"api_name": "django.utils.timezone.now", "line_number": 696, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 696, "usage_type": "name"}, {"api_name": "django.test.tag", "line_number": 581, "usage_type": "call"}, {"api_name": "django.test.tag", "line_number": 582, "usage_type": "call"}, {"api_name": "vcr.use_cassette", "line_number": 583, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 584, "usage_type": "call"}, {"api_name": "os.path", "line_number": 584, "usage_type": "attribute"}, {"api_name": "unittest.mock.patch", "line_number": 587, "usage_type": "call"}, {"api_name": "test.support.EnvironmentVarGuard", "line_number": 727, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 730, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 731, "usage_type": "call"}, {"api_name": "data_refinery_common.models.Organism", "line_number": 735, "usage_type": "call"}, {"api_name": "data_refinery_foreman.surveyor.surveyor.survey_experiment", "line_number": 738, "usage_type": "call"}, {"api_name": "data_refinery_foreman.surveyor.surveyor", "line_number": 738, "usage_type": "name"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects.all", "line_number": 743, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects", "line_number": 743, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 743, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 751, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 751, "usage_type": "name"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 758, "usage_type": "argument"}, {"api_name": "data_refinery_common.models.OriginalFile.objects.filter", "line_number": 761, "usage_type": "call"}, {"api_name": "data_refinery_common.models.OriginalFile.objects", "line_number": 761, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.OriginalFile", "line_number": 761, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ProcessorJob.objects.all", "line_number": 775, "usage_type": "call"}, {"api_name": "data_refinery_common.models.ProcessorJob.objects", "line_number": 775, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.ProcessorJob", "line_number": 775, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 784, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 784, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ProcessorJob", "line_number": 785, "usage_type": "argument"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects.all", "line_number": 790, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects", "line_number": 790, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 790, "usage_type": "name"}, {"api_name": "data_refinery_foreman.foreman.main.retry_lost_downloader_jobs", "line_number": 795, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 802, "usage_type": "argument"}, {"api_name": "data_refinery_common.models.ProcessorJob.objects.all", "line_number": 807, "usage_type": "call"}, {"api_name": "data_refinery_common.models.ProcessorJob.objects", "line_number": 807, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.ProcessorJob", "line_number": 807, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ProcessorJob", "line_number": 826, "usage_type": "argument"}, {"api_name": "django.test.tag", "line_number": 721, "usage_type": "call"}, {"api_name": "django.test.tag", "line_number": 722, "usage_type": "call"}, {"api_name": "unittest.skip", "line_number": 723, "usage_type": "call"}, {"api_name": "test.support.EnvironmentVarGuard", "line_number": 856, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 859, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 860, "usage_type": "call"}, {"api_name": "data_refinery_common.models.Organism", "line_number": 864, "usage_type": "call"}, {"api_name": "data_refinery_foreman.surveyor.surveyor.survey_experiment", "line_number": 869, "usage_type": "call"}, {"api_name": "data_refinery_foreman.surveyor.surveyor", "line_number": 869, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 875, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects.all", "line_number": 877, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects", "line_number": 877, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 877, "usage_type": "name"}, {"api_name": "django.test.tag", "line_number": 838, "usage_type": "call"}, {"api_name": "django.test.tag", "line_number": 839, "usage_type": "call"}, {"api_name": "vcr.use_cassette", "line_number": 840, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 841, "usage_type": "call"}, {"api_name": "os.path", "line_number": 841, "usage_type": "attribute"}, {"api_name": "test.support.EnvironmentVarGuard", "line_number": 897, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 900, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 901, "usage_type": "call"}, {"api_name": "data_refinery_foreman.surveyor.surveyor.survey_experiment", "line_number": 903, "usage_type": "call"}, {"api_name": "data_refinery_foreman.surveyor.surveyor", "line_number": 903, "usage_type": "name"}, {"api_name": "data_refinery_common.utils.get_env_variable", "line_number": 904, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects.all", "line_number": 909, "usage_type": "call"}, {"api_name": "data_refinery_common.models.DownloaderJob.objects", "line_number": 909, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 909, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 913, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 913, "usage_type": "name"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 915, "usage_type": "argument"}, {"api_name": "data_refinery_common.models.ProcessorJob.objects.all", "line_number": 918, "usage_type": "call"}, {"api_name": "data_refinery_common.models.ProcessorJob.objects", "line_number": 918, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.ProcessorJob", "line_number": 918, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 924, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 924, "usage_type": "name"}, {"api_name": "data_refinery_common.models.ProcessorJob", "line_number": 926, "usage_type": "argument"}, {"api_name": "django.test.tag", "line_number": 892, "usage_type": "call"}]} +{"seq_id": "466058274", "text": "# -*- coding: utf-8 -*-\nimport datetime\n\nfrom constance import config\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth import login\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.mail import send_mail\nfrom django.core.paginator import EmptyPage\nfrom django.core.paginator import PageNotAnInteger\nfrom django.core.paginator import Paginator\nfrom constance import config\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom django.http import JsonResponse\nfrom django.views.generic import TemplateView, View\nfrom rest_framework.renderers import JSONRenderer\nimport json\n\nfrom main.events import take_presents_to_user\nfrom main.models import Script, Table, TableLinksColl, LinkCategory, Link, ScriptAccess, ScriptData, \\\n PageVideoInstruction\nfrom main.serializers.instruction import PageVideoInstructionSerializer\nfrom main.serializers.link import LinkCategorySerializer, LinkSerializer\nfrom main.serializers.script import ScriptSerializer\nfrom main.serializers.table import TableSerializer, TableLinksCollSerializer\nfrom main.utils import create_active_user, get_empty_table, get_empty_coll, get_empty_category, get_empty_link, clone_table\nfrom payment.models import UserScriptDelegationAccess, UserOfflineScriptExportAccess\nfrom payment.serializers import UserScriptDelegationAccessSerializer, UserOfflineScriptExportAccessSerializer\nfrom scripts.settings import DEBUG, YANDEX_SHOPID, YANDEX_SCID\nfrom scripts.tasks import clone_script_with_relations\nfrom users.models import CustomUser, UserAccess\nfrom users.serializers import UserSerializer, UserAccessSerializer\n\n\nclass MainView(TemplateView):\n template_name = 'base.html'\n\n def get(self, *args, **kwargs):\n if self.request.user.is_authenticated():\n return super(MainView, self).get(*args, **kwargs)\n # return HttpResponseRedirect('http://lp.scriptogenerator.ru/')\n return HttpResponseRedirect('http://getproff.ru/scriptogenerator')\n\n def get_context_data(self, **kwargs):\n context = super(MainView, self).get_context_data(**kwargs)\n context['DEBUG'] = DEBUG\n return context\n\n\nclass JSONResponse(HttpResponse):\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n super(JSONResponse, self).__init__(content, **kwargs)\n\n\nclass ScriptsView(View):\n http_method_names = ['get', 'post', 'put', 'delete']\n\n def user_accessable_scripts_ids(self, request):\n access_scripts_ids = []\n for access in ScriptAccess.objects.filter(user=request.user):\n if access.script.owner.positive_balance():\n access_scripts_ids.append(access.script.pk)\n return access_scripts_ids\n\n def get(self, request, *args, **kwargs):\n if not request.GET.get('available_scripts'):\n scripts = ScriptSerializer(Script.objects.filter(owner=request.user), many=True, empty_data=True).data\n else:\n scripts = ScriptSerializer(Script.objects.filter(pk__in=self.user_accessable_scripts_ids(request)), many=True).data\n\n paginator = Paginator(scripts, 20)\n page = request.GET.get('page')\n\n try:\n scripts = paginator.page(page)\n except PageNotAnInteger:\n scripts = paginator.page(1)\n except EmptyPage:\n scripts = paginator.page(paginator.num_pages)\n\n return JSONResponse({\n 'scripts': scripts.object_list,\n 'page': page,\n 'next_page': scripts.has_next()\n })\n\n def post(self, request, *args, **kwargs):\n data = json.loads(request.body)\n script = ScriptSerializer(data=data)\n if script.is_valid():\n script.create(data)\n return JSONResponse({\n 'scripts': ScriptSerializer(Script.objects.filter(owner=request.user), many=True, empty_data=True).data,\n 'session_user': UserSerializer(CustomUser.objects.get(pk=request.user.pk)).data\n })\n return JSONResponse(script.errors, status=400)\n\n def put(self, request, *args, **kwargs):\n data = json.loads(request.body)\n current_script = Script.objects.get(pk=int(data['id']))\n script = ScriptSerializer(current_script, data=data)\n if script.is_valid():\n script.update(current_script, data)\n return JSONResponse({\n 'script': ScriptSerializer(current_script).data\n })\n return JSONResponse(script.errors, status=400)\n\n def delete(self, request, *args, **kwargs):\n data = json.loads(request.body)\n try:\n script = Script.objects.get(pk=int(data['id']))\n script.delete_script()\n return JSONResponse({\n 'scripts': ScriptSerializer(Script.objects.filter(owner=request.user), many=True, empty_data=True).data\n })\n except ObjectDoesNotExist:\n return JSONResponse({'error': 'Object does not exist.'}, status=400)\n\n\nclass ScriptView(ScriptsView):\n http_method_names = ['get']\n\n def get(self, request, *args, **kwargs):\n script = Script.objects.get(pk=int(request.GET['script']))\n if not script.owner == request.user:\n if int(request.GET['script']) in self.user_accessable_scripts_ids(request):\n return JSONResponse({\n 'script': ScriptSerializer(script).data\n })\n else:\n return JSONResponse({'error': 'This scripts is not available for you.'}, status=403)\n else:\n return JSONResponse({\n 'script': ScriptSerializer(script).data\n })\n\n\nclass ScriptDelegationView(View):\n def get(self, request, *args, **kwargs):\n return JSONResponse({\n 'script_delegation_accesses': UserScriptDelegationAccessSerializer(\n UserScriptDelegationAccess.objects.filter(user=request.user, delegated=False), many=True).data\n })\n\n def post(self, request, *args, **kwargs):\n data = json.loads(request.body)\n active_user_script_delegation_access = UserScriptDelegationAccess.objects.get_active_script_delegation_access(request.user)\n if active_user_script_delegation_access:\n new_owner_email = data['email']\n try:\n to_user = CustomUser.objects.get(username=new_owner_email)\n script = Script.objects.get(pk=int(data['script_id']))\n script.owner = to_user\n script.save()\n\n active_user_script_delegation_access.delegate(to_user, script)\n\n for access in script.accesses():\n access.delete()\n\n return JSONResponse({\n 'scripts': ScriptSerializer(Script.objects.filter(owner=request.user), many=True, empty_data=True).data\n })\n except ObjectDoesNotExist:\n return JSONResponse({'message': 'Script does not exist.'}, status=404)\n else:\n return JSONResponse({'message': 'User cant\\' delegate script.'}, status=500)\n\n\nclass ScriptExportingView(View):\n def get(self, request, *args, **kwargs):\n return JSONResponse({\n 'script_exporting_accesses': UserOfflineScriptExportAccessSerializer(\n UserOfflineScriptExportAccess.objects.get_actual_user_offline_script_export_accesses(request.user), many=True).data,\n 'script_exporting_unlim_access_is_active': True if UserOfflineScriptExportAccess.objects.get_actual_user_unlim_offline_script_export_access_date(request.user) else False\n })\n\n def post(self, request, *args, **kwargs):\n data = json.loads(request.body)\n actual_user_offline_script_export_access = UserOfflineScriptExportAccess.objects.get_actual_user_offline_script_export_access(request.user)\n if actual_user_offline_script_export_access:\n try:\n script = Script.objects.get(pk=int(data['script_id']))\n script_access = actual_user_offline_script_export_access.create_offline_scripts_data(script)\n return JSONResponse({\n 'script_access': UserOfflineScriptExportAccessSerializer(script_access).data\n })\n except ObjectDoesNotExist:\n return JSONResponse({'message': 'Script does not exist.'}, status=404)\n else:\n return JSONResponse({'message': 'User cant\\' export scripts.'}, status=500)\n\n\nclass TablesView(View):\n def get(self, request, *args, **kwargs):\n return JSONResponse({\n 'tables': TableSerializer(Table.objects.filter(script__pk=int(request.GET.get('id'))), many=True).data\n })\n\n def post(self, request, *args, **kwargs):\n data = json.loads(request.body)\n script = Script.objects.get(pk=int(data['script']))\n script_data_object, created = ScriptData.objects.get_or_create(script=script)\n script_data = json.loads(script_data_object.data)\n script_data.append(get_empty_table())\n script_data_object.data = json.dumps(script_data)\n script_data_object.save()\n return JSONResponse({\n 'data': json.loads(script.data())\n })\n\n def put(self, request, *args, **kwargs):\n data = json.loads(request.body)\n script = Script.objects.get(pk=int(data['script']))\n current_table = script.tables(table_id=data['table']['id'])\n new_table = TableSerializer(data=data['table'])\n if new_table.is_valid():\n script.replace_table(data['table'], current_table['index'])\n return JSONResponse({\n 'data': json.loads(script.data())\n })\n else:\n return JSONResponse(new_table.errors, status=500)\n\n def delete(self, request, *args, **kwargs):\n data = json.loads(request.body)\n script = Script.objects.get(pk=int(data['script']))\n script_data_object = ScriptData.objects.get(script=script)\n script_data = json.loads(script_data_object.data)\n script_data.remove(script.tables(table_id=data['table'])['data'])\n script_data_object.data = json.dumps(script_data)\n script_data_object.save()\n return JSONResponse({\n 'data': json.loads(script.data())\n })\n\n\nclass CollsView(View):\n def post(self, request, *args, **kwargs):\n data = json.loads(request.body)\n script = Script.objects.get(pk=int(data['script']))\n table_data = script.tables(table_id=data['table'])\n new_coll = get_empty_coll()\n table_data['data']['colls'].append(new_coll)\n script.replace_table(table_data['data'], table_data['index'])\n return JSONResponse({\n 'data': json.loads(script.data()),\n 'new_coll': new_coll\n })\n\n def put(self, request, *args, **kwargs):\n data = json.loads(request.body)\n script = Script.objects.get(pk=int(data['script']))\n table = script.tables(table_id=data['table'])\n current_coll = script.colls(table_id=data['table'], coll_id=data['coll']['id'])\n new_coll = TableLinksCollSerializer(data=data['coll'])\n if new_coll.is_valid():\n table['data']['colls'][current_coll['index']] = new_coll.validated_data\n script.replace_table(table['data'], table['index'])\n return JSONResponse({\n 'data': json.loads(script.data())\n })\n return JSONResponse(new_coll.errors, status=400)\n\n def delete(self, request, *args, **kwargs):\n data = json.loads(request.body)\n script = Script.objects.get(pk=int(data['script']))\n table_data = script.tables(table_id=data['table'])\n coll_data = script.colls(table_id=data['table'], coll_id=data['coll'])\n if coll_data:\n table_data['data']['colls'].remove(coll_data['data'])\n script.replace_table(table_data['data'], table_data['index'])\n return JSONResponse({\n 'data': json.loads(script.data())\n })\n return JSONResponse({'error': 'Coll doesn\\'t exist'}, status=400)\n\n\nclass LinkCategoriesView(View):\n def post(self, request, *args, **kwargs):\n data = json.loads(request.body)\n script = Script.objects.get(pk=int(data['script']))\n table_data = script.tables(table_id=data['table'])\n coll_data = script.colls(table_id=data['table'], coll_id=data['coll'])\n new_category = get_empty_category(data['hidden'])\n coll_data['data']['categories'].append(new_category)\n table_data['data']['colls'][coll_data['index']] = coll_data['data']\n script.replace_table(table_data['data'], table_data['index'])\n return JSONResponse({\n 'category': new_category\n })\n\n def put(self, request, *args, **kwargs):\n data = json.loads(request.body)\n script = Script.objects.get(pk=int(data['script']))\n table = script.tables(table_id=data['table'])\n coll = script.colls(table_id=data['table'], coll_id=data['coll'])\n current_category = script.categories(table_id=data['table'], coll_id=data['coll'], category_id=data['category']['id'])\n new_category = LinkCategorySerializer(data=data['category'])\n if new_category.is_valid():\n table['data']['colls'][coll['index']]['categories'][current_category['index']] = new_category.validated_data\n script.replace_table(table['data'], table['index'])\n return JSONResponse({\n 'data': json.loads(script.data())\n })\n return JSONResponse(new_category.errors, status=400)\n\n def delete(self, request, *args, **kwargs):\n data = json.loads(request.body)\n script = Script.objects.get(pk=int(data['script']))\n table = script.tables(table_id=data['table'])\n coll = script.colls(table_id=data['table'], coll_id=data['coll'])\n current_category = script.categories(table_id=data['table'], coll_id=data['coll'], category_id=data['category'])\n if current_category:\n table['data']['colls'][coll['index']]['categories'].remove(current_category['data'])\n script.replace_table(table['data'], table['index'])\n return JSONResponse({\n 'status': 'success'\n })\n return JSONResponse({'error': 'Category doesn\\'t exist'}, status=400)\n\n\nclass LinkView(View):\n def post(self, request, *args, **kwargs):\n data = json.loads(request.body)\n script = Script.objects.get(pk=int(data['script']))\n table_data = script.tables(table_id=data['table'])\n coll_data = script.colls(table_id=data['table'], coll_id=data['coll'])\n category_data = script.categories(table_id=data['table'], coll_id=data['coll'], category_id=data['category'])\n new_link = get_empty_link(to_link=data['to_link'])\n category_data['data']['links'].append(new_link)\n coll_data['data']['categories'][category_data['index']] = category_data['data']\n table_data['data']['colls'][coll_data['index']] = coll_data['data']\n script.replace_table(table_data['data'], table_data['index'])\n return JSONResponse({\n 'link': new_link\n })\n\n def put(self, request, *args, **kwargs):\n data = json.loads(request.body)\n script = Script.objects.get(pk=int(data['script']))\n table = script.tables(table_id=data['table'])\n coll = script.colls(table_id=data['table'], coll_id=data['coll'])\n category = script.categories(table_id=data['table'], coll_id=data['coll'], category_id=data['category'])\n current_link = script.links(table_id=data['table'], coll_id=data['coll'], category_id=data['category'], link_id=data['link']['id'])\n new_link = LinkSerializer(data=data['link'])\n if new_link.is_valid():\n table['data']['colls'][coll['index']]['categories'][category['index']]['links'][current_link['index']] = new_link.validated_data\n script.replace_table(table['data'], table['index'])\n return JSONResponse({\n 'data': json.loads(script.data())\n })\n return JSONResponse(new_link.errors, status=400)\n\n def delete(self, request, *args, **kwargs):\n data = json.loads(request.body)\n script = Script.objects.get(pk=int(data['script']))\n table = script.tables(table_id=data['table'])\n coll = script.colls(table_id=data['table'], coll_id=data['coll'])\n category = script.categories(table_id=data['table'], coll_id=data['coll'], category_id=data['category'])\n current_link = script.links(table_id=data['table'], coll_id=data['coll'], category_id=data['category'], link_id=data['link'])\n if current_link:\n table['data']['colls'][coll['index']]['categories'][category['index']]['links'].remove(current_link['data'])\n script.replace_table(table['data'], table['index'])\n return JSONResponse({\n 'status': 'success'\n })\n return JSONResponse({'error': 'Link doesn\\'t exist'}, status=400)\n\n\nclass ScriptAccessView(View):\n def post(self, request, *args, **kwargs):\n data = json.loads(request.body)\n accesses = data['accesses']\n script = Script.objects.get(pk=int(data['script_id']))\n\n def delete_accesses(accesses_for_deleting):\n for access in accesses_for_deleting:\n access.delete()\n\n if accesses:\n created_accesses = []\n for access in accesses:\n user = CustomUser.objects.get(pk=int(access['user_id']))\n try:\n created_access = ScriptAccess.objects.get(user=user, script=script)\n created_access.edit = access['edit']\n except ObjectDoesNotExist:\n created_access = ScriptAccess.objects.create(user=user, script=script, edit=access['edit'])\n created_access.save()\n created_accesses.append(created_access.pk)\n if created_accesses:\n delete_accesses(script.accesses().exclude(pk__in=created_accesses))\n else:\n delete_accesses(script.accesses())\n return JSONResponse({\n 'data': json.loads(script.data())\n })\n\n\nclass CloneScriptView(View):\n def post(self, request, *args, **kwargs):\n current_script = Script.objects.get(pk=int(request.POST.get('script')))\n clone_script_with_relations(current_script.pk, [('name', current_script.name + u' (копия)')])\n return JSONResponse({\n 'scripts': ScriptSerializer(Script.objects.filter(owner=request.user), many=True, empty_data=True).data\n })\n\n\nclass CloneTableView(View):\n def post(self, request, *args, **kwargs):\n current_script = Script.objects.get(pk=int(request.POST.get('current_script_id')))\n to_script = Script.objects.get(pk=int(request.POST.get('to_script_id')))\n current_table = current_script.tables(table_id=request.POST.get('table_id'))['data']\n\n new_table = clone_table(current_table)\n to_script.append_new_table(new_table)\n return JSONResponse({\n 'data': json.loads(current_script.data())\n })\n\n\nclass InitView(ScriptsView):\n http_method_names = ['get']\n\n def get(self, request, *args, **kwargs):\n available_scripts = Script.objects.filter(pk__in=self.user_accessable_scripts_ids(request))\n return JSONResponse({\n 'scripts': ScriptSerializer(Script.objects.filter(owner=request.user), many=True, empty_data=True).data,\n 'template_scripts': ScriptSerializer(Script.objects.filter(is_template=True), many=True, empty_data=True).data,\n 'available_scripts': ScriptSerializer(available_scripts, many=True).data,\n 'session_user': UserSerializer(request.user).data,\n 'shopId': YANDEX_SHOPID,\n 'scid': YANDEX_SCID,\n 'advertisment': {'title': config.ADVERTISING_TITLE, 'url': config.ADVERTISING_URL} if config.ADVERTISING_TITLE and config.ADVERTISING_URL else None,\n 'video_instructions': PageVideoInstructionSerializer(PageVideoInstruction.objects.all(), many=True).data\n }, status=200)\n\n\nclass ExternalRegisterView(View):\n def get(self, request, *args, **kwargs):\n email = request.GET.get('email')\n if email:\n active_user = create_active_user(request=request, email=email, first_name=request.GET.get('first_name'), phone=request.GET.get('phone'))\n if not active_user:\n return HttpResponseRedirect('/')\n user = active_user['user']\n password = active_user['password']\n if user:\n if request.GET.get('balance') == '1':\n take_presents_to_user(user)\n\n login(request, authenticate(\n username=user.username,\n password=password\n ))\n\n if request.GET.get('type') == 'ext':\n return HttpResponseRedirect('/')\n\n return JsonResponse({'success': 200}, status=200)\n return JsonResponse({'error': 500, 'message': u'User with same email already exist.'}, status=500)\n\n def post(self, request, *args, **kwargs):\n return JsonResponse({'error': 'Method doesn\\'t supports.'}, status=500)\n\n\nEXT_PAYMENT_TITLES = {\n 'SG_PAY_1000': 1000.0,\n 'SG_PAY_3000': 3000.0,\n 'SG_PAY_5000': 7000.0,\n 'SG_PAY_YEAR': 15000.0\n}\n\n\nclass ExternalPaymentView(View):\n def get(self, request, *args, **kwargs):\n send_mail('ExternalPaymentView.get', str(dict(request.GET)), 'info@scriptogenerator.ru', ['skyliffer@gmail.com', 'aliestarten@gmail.com'])\n email = request.GET.get('email')\n if email:\n try:\n user = CustomUser.objects.get(username=email)\n product_title = request.GET.get('product_title')\n if product_title:\n try:\n take_presents_to_user(\n user,\n EXT_PAYMENT_TITLES[product_title],\n u'Оплата пакета: ' + product_title,\n present_script=False,\n promotion=True if product_title == 'SG_PAY_YEAR' else False\n )\n if request.GET.get('type') == 'ext':\n return HttpResponseRedirect('/')\n except KeyError:\n return JsonResponse({'error': 500, 'message': u'Package does not exist.'}, status=500)\n else:\n return JsonResponse({'error': 500, 'message': u'Argument project_title does not exist.'}, status=500)\n except ObjectDoesNotExist:\n return JsonResponse({'error': 500, 'message': u'User with same email does not exist.'}, status=500)\n return JsonResponse({'error': 500, 'message': u'User with same email already exist.'}, status=500)\n\n def post(self, request, *args, **kwargs):\n return JsonResponse({'error': 'Method doesn\\'t supports.'}, status=500)\n", "sub_path": "main/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 23262, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "10", "api": [{"api_name": "django.views.generic.TemplateView", "line_number": 36, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 43, "usage_type": "call"}, {"api_name": "scripts.settings.DEBUG", "line_number": 47, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 51, "usage_type": "name"}, {"api_name": "rest_framework.renderers.JSONRenderer", "line_number": 53, "usage_type": "call"}, {"api_name": "django.views.generic.View", "line_number": 58, "usage_type": "name"}, {"api_name": "main.models.ScriptAccess.objects.filter", "line_number": 63, "usage_type": "call"}, {"api_name": "main.models.ScriptAccess.objects", "line_number": 63, "usage_type": "attribute"}, {"api_name": "main.models.ScriptAccess", "line_number": 63, "usage_type": "name"}, {"api_name": "scripts.settings", "line_number": 70, "usage_type": "name"}, {"api_name": "main.serializers.script.ScriptSerializer", "line_number": 70, "usage_type": "call"}, {"api_name": "main.models.Script.objects.filter", "line_number": 70, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 70, "usage_type": "name"}, {"api_name": "scripts.settings", "line_number": 72, "usage_type": "name"}, {"api_name": "main.serializers.script.ScriptSerializer", "line_number": 72, "usage_type": "call"}, {"api_name": "main.models.Script.objects.filter", "line_number": 72, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 72, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 72, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 74, "usage_type": "call"}, {"api_name": "scripts.settings", "line_number": 74, "usage_type": "argument"}, {"api_name": "scripts.settings", "line_number": 78, "usage_type": "name"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 79, "usage_type": "name"}, {"api_name": "scripts.settings", "line_number": 80, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 81, "usage_type": "name"}, {"api_name": "scripts.settings", "line_number": 82, "usage_type": "name"}, {"api_name": "scripts.settings.object_list", "line_number": 85, "usage_type": "attribute"}, {"api_name": "scripts.settings", "line_number": 85, "usage_type": "name"}, {"api_name": "scripts.settings.has_next", "line_number": 87, "usage_type": "call"}, {"api_name": "scripts.settings", "line_number": 87, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 91, "usage_type": "call"}, {"api_name": "main.serializers.script.ScriptSerializer", "line_number": 92, "usage_type": "call"}, {"api_name": "main.serializers.script.ScriptSerializer", "line_number": 96, "usage_type": "call"}, {"api_name": "main.models.Script.objects.filter", "line_number": 96, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 96, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 96, "usage_type": "name"}, {"api_name": "users.serializers.UserSerializer", "line_number": 97, "usage_type": "call"}, {"api_name": "users.models.CustomUser.objects.get", "line_number": 97, "usage_type": "call"}, {"api_name": "users.models.CustomUser.objects", "line_number": 97, "usage_type": "attribute"}, {"api_name": "users.models.CustomUser", "line_number": 97, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 102, "usage_type": "call"}, {"api_name": "main.models.Script.objects.get", "line_number": 103, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 103, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 103, "usage_type": "name"}, {"api_name": "main.serializers.script.ScriptSerializer", "line_number": 104, "usage_type": "call"}, {"api_name": "main.serializers.script.ScriptSerializer", "line_number": 108, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 113, "usage_type": "call"}, {"api_name": "main.models.Script.objects.get", "line_number": 115, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 115, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 115, "usage_type": "name"}, {"api_name": "main.serializers.script.ScriptSerializer", "line_number": 118, "usage_type": "call"}, {"api_name": "main.models.Script.objects.filter", "line_number": 118, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 118, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 118, "usage_type": "name"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 120, "usage_type": "name"}, {"api_name": "main.models.Script.objects.get", "line_number": 128, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 128, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 128, "usage_type": "name"}, {"api_name": "main.serializers.script.ScriptSerializer", "line_number": 132, "usage_type": "call"}, {"api_name": "main.serializers.script.ScriptSerializer", "line_number": 138, "usage_type": "call"}, {"api_name": "django.views.generic.View", "line_number": 142, "usage_type": "name"}, {"api_name": "payment.serializers.UserScriptDelegationAccessSerializer", "line_number": 145, "usage_type": "call"}, {"api_name": "payment.models.UserScriptDelegationAccess.objects.filter", "line_number": 146, "usage_type": "call"}, {"api_name": "payment.models.UserScriptDelegationAccess.objects", "line_number": 146, "usage_type": "attribute"}, {"api_name": "payment.models.UserScriptDelegationAccess", "line_number": 146, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 150, "usage_type": "call"}, {"api_name": "payment.models.UserScriptDelegationAccess.objects.get_active_script_delegation_access", "line_number": 151, "usage_type": "call"}, {"api_name": "payment.models.UserScriptDelegationAccess.objects", "line_number": 151, "usage_type": "attribute"}, {"api_name": "payment.models.UserScriptDelegationAccess", "line_number": 151, "usage_type": "name"}, {"api_name": "users.models.CustomUser.objects.get", "line_number": 155, "usage_type": "call"}, {"api_name": "users.models.CustomUser.objects", "line_number": 155, "usage_type": "attribute"}, {"api_name": "users.models.CustomUser", "line_number": 155, "usage_type": "name"}, {"api_name": "main.models.Script.objects.get", "line_number": 156, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 156, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 156, "usage_type": "name"}, {"api_name": "main.serializers.script.ScriptSerializer", "line_number": 166, "usage_type": "call"}, {"api_name": "main.models.Script.objects.filter", "line_number": 166, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 166, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 166, "usage_type": "name"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 168, "usage_type": "name"}, {"api_name": "django.views.generic.View", "line_number": 174, "usage_type": "name"}, {"api_name": "payment.serializers.UserOfflineScriptExportAccessSerializer", "line_number": 177, "usage_type": "call"}, {"api_name": "payment.models.UserOfflineScriptExportAccess.objects.get_actual_user_offline_script_export_accesses", "line_number": 178, "usage_type": "call"}, {"api_name": "payment.models.UserOfflineScriptExportAccess.objects", "line_number": 178, "usage_type": "attribute"}, {"api_name": "payment.models.UserOfflineScriptExportAccess", "line_number": 178, "usage_type": "name"}, {"api_name": "payment.models.UserOfflineScriptExportAccess.objects.get_actual_user_unlim_offline_script_export_access_date", "line_number": 179, "usage_type": "call"}, {"api_name": "payment.models.UserOfflineScriptExportAccess.objects", "line_number": 179, "usage_type": "attribute"}, {"api_name": "payment.models.UserOfflineScriptExportAccess", "line_number": 179, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 183, "usage_type": "call"}, {"api_name": "payment.models.UserOfflineScriptExportAccess.objects.get_actual_user_offline_script_export_access", "line_number": 184, "usage_type": "call"}, {"api_name": "payment.models.UserOfflineScriptExportAccess.objects", "line_number": 184, "usage_type": "attribute"}, {"api_name": "payment.models.UserOfflineScriptExportAccess", "line_number": 184, "usage_type": "name"}, {"api_name": "main.models.Script.objects.get", "line_number": 187, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 187, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 187, "usage_type": "name"}, {"api_name": "payment.serializers.UserOfflineScriptExportAccessSerializer", "line_number": 190, "usage_type": "call"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 192, "usage_type": "name"}, {"api_name": "django.views.generic.View", "line_number": 198, "usage_type": "name"}, {"api_name": "main.serializers.table.TableSerializer", "line_number": 201, "usage_type": "call"}, {"api_name": "main.models.Table.objects.filter", "line_number": 201, "usage_type": "call"}, {"api_name": "main.models.Table.objects", "line_number": 201, "usage_type": "attribute"}, {"api_name": "main.models.Table", "line_number": 201, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 205, "usage_type": "call"}, {"api_name": "main.models.Script.objects.get", "line_number": 206, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 206, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 206, "usage_type": "name"}, {"api_name": "main.models.ScriptData.objects.get_or_create", "line_number": 207, "usage_type": "call"}, {"api_name": "main.models.ScriptData.objects", "line_number": 207, "usage_type": "attribute"}, {"api_name": "main.models.ScriptData", "line_number": 207, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 208, "usage_type": "call"}, {"api_name": "main.utils.get_empty_table", "line_number": 209, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 210, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 213, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 217, "usage_type": "call"}, {"api_name": "main.models.Script.objects.get", "line_number": 218, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 218, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 218, "usage_type": "name"}, {"api_name": "main.serializers.table.TableSerializer", "line_number": 220, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 224, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 230, "usage_type": "call"}, {"api_name": "main.models.Script.objects.get", "line_number": 231, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 231, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 231, "usage_type": "name"}, {"api_name": "main.models.ScriptData.objects.get", "line_number": 232, "usage_type": "call"}, {"api_name": "main.models.ScriptData.objects", "line_number": 232, "usage_type": "attribute"}, {"api_name": "main.models.ScriptData", "line_number": 232, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 233, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 235, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 238, "usage_type": "call"}, {"api_name": "django.views.generic.View", "line_number": 242, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 244, "usage_type": "call"}, {"api_name": "main.models.Script.objects.get", "line_number": 245, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 245, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 245, "usage_type": "name"}, {"api_name": "main.utils.get_empty_coll", "line_number": 247, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 251, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 256, "usage_type": "call"}, {"api_name": "main.models.Script.objects.get", "line_number": 257, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 257, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 257, "usage_type": "name"}, {"api_name": "main.serializers.table.TableLinksCollSerializer", "line_number": 260, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 265, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 270, "usage_type": "call"}, {"api_name": "main.models.Script.objects.get", "line_number": 271, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 271, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 271, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 278, "usage_type": "call"}, {"api_name": "django.views.generic.View", "line_number": 283, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 285, "usage_type": "call"}, {"api_name": "main.models.Script.objects.get", "line_number": 286, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 286, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 286, "usage_type": "name"}, {"api_name": "main.utils.get_empty_category", "line_number": 289, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 298, "usage_type": "call"}, {"api_name": "main.models.Script.objects.get", "line_number": 299, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 299, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 299, "usage_type": "name"}, {"api_name": "main.serializers.link.LinkCategorySerializer", "line_number": 303, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 308, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 313, "usage_type": "call"}, {"api_name": "main.models.Script.objects.get", "line_number": 314, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 314, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 314, "usage_type": "name"}, {"api_name": "django.views.generic.View", "line_number": 327, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 329, "usage_type": "call"}, {"api_name": "main.models.Script.objects.get", "line_number": 330, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 330, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 330, "usage_type": "name"}, {"api_name": "main.utils.get_empty_link", "line_number": 334, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 344, "usage_type": "call"}, {"api_name": "main.models.Script.objects.get", "line_number": 345, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 345, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 345, "usage_type": "name"}, {"api_name": "main.serializers.link.LinkSerializer", "line_number": 350, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 355, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 360, "usage_type": "call"}, {"api_name": "main.models.Script.objects.get", "line_number": 361, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 361, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 361, "usage_type": "name"}, {"api_name": "django.views.generic.View", "line_number": 375, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 377, "usage_type": "call"}, {"api_name": "main.models.Script.objects.get", "line_number": 379, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 379, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 379, "usage_type": "name"}, {"api_name": "users.models.CustomUser.objects.get", "line_number": 388, "usage_type": "call"}, {"api_name": "users.models.CustomUser.objects", "line_number": 388, "usage_type": "attribute"}, {"api_name": "users.models.CustomUser", "line_number": 388, "usage_type": "name"}, {"api_name": "main.models.ScriptAccess.objects.get", "line_number": 390, "usage_type": "call"}, {"api_name": "main.models.ScriptAccess.objects", "line_number": 390, "usage_type": "attribute"}, {"api_name": "main.models.ScriptAccess", "line_number": 390, "usage_type": "name"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 392, "usage_type": "name"}, {"api_name": "main.models.ScriptAccess.objects.create", "line_number": 393, "usage_type": "call"}, {"api_name": "main.models.ScriptAccess.objects", "line_number": 393, "usage_type": "attribute"}, {"api_name": "main.models.ScriptAccess", "line_number": 393, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 401, "usage_type": "call"}, {"api_name": "django.views.generic.View", "line_number": 405, "usage_type": "name"}, {"api_name": "main.models.Script.objects.get", "line_number": 407, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 407, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 407, "usage_type": "name"}, {"api_name": "scripts.tasks.clone_script_with_relations", "line_number": 408, "usage_type": "call"}, {"api_name": "main.serializers.script.ScriptSerializer", "line_number": 410, "usage_type": "call"}, {"api_name": "main.models.Script.objects.filter", "line_number": 410, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 410, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 410, "usage_type": "name"}, {"api_name": "django.views.generic.View", "line_number": 414, "usage_type": "name"}, {"api_name": "main.models.Script.objects.get", "line_number": 416, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 416, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 416, "usage_type": "name"}, {"api_name": "main.models.Script.objects.get", "line_number": 417, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 417, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 417, "usage_type": "name"}, {"api_name": "main.utils.clone_table", "line_number": 420, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 423, "usage_type": "call"}, {"api_name": "main.models.Script.objects.filter", "line_number": 431, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 431, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 431, "usage_type": "name"}, {"api_name": "main.serializers.script.ScriptSerializer", "line_number": 433, "usage_type": "call"}, {"api_name": "main.models.Script.objects.filter", "line_number": 433, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 433, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 433, "usage_type": "name"}, {"api_name": "main.serializers.script.ScriptSerializer", "line_number": 434, "usage_type": "call"}, {"api_name": "main.models.Script.objects.filter", "line_number": 434, "usage_type": "call"}, {"api_name": "main.models.Script.objects", "line_number": 434, "usage_type": "attribute"}, {"api_name": "main.models.Script", "line_number": 434, "usage_type": "name"}, {"api_name": "main.serializers.script.ScriptSerializer", "line_number": 435, "usage_type": "call"}, {"api_name": "users.serializers.UserSerializer", "line_number": 436, "usage_type": "call"}, {"api_name": "scripts.settings.YANDEX_SHOPID", "line_number": 437, "usage_type": "name"}, {"api_name": "scripts.settings.YANDEX_SCID", "line_number": 438, "usage_type": "name"}, {"api_name": "constance.config.ADVERTISING_TITLE", "line_number": 439, "usage_type": "attribute"}, {"api_name": "constance.config", "line_number": 439, "usage_type": "name"}, {"api_name": "constance.config.ADVERTISING_URL", "line_number": 439, "usage_type": "attribute"}, {"api_name": "main.serializers.instruction.PageVideoInstructionSerializer", "line_number": 440, "usage_type": "call"}, {"api_name": "main.models.PageVideoInstruction.objects.all", "line_number": 440, "usage_type": "call"}, {"api_name": "main.models.PageVideoInstruction.objects", "line_number": 440, "usage_type": "attribute"}, {"api_name": "main.models.PageVideoInstruction", "line_number": 440, "usage_type": "name"}, {"api_name": "django.views.generic.View", "line_number": 444, "usage_type": "name"}, {"api_name": "main.utils.create_active_user", "line_number": 448, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 450, "usage_type": "call"}, {"api_name": "main.events.take_presents_to_user", "line_number": 455, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 457, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 457, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 463, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 465, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 466, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 469, "usage_type": "call"}, {"api_name": "django.views.generic.View", "line_number": 480, "usage_type": "name"}, {"api_name": "django.core.mail.send_mail", "line_number": 482, "usage_type": "call"}, {"api_name": "users.models.CustomUser.objects.get", "line_number": 486, "usage_type": "call"}, {"api_name": "users.models.CustomUser.objects", "line_number": 486, "usage_type": "attribute"}, {"api_name": "users.models.CustomUser", "line_number": 486, "usage_type": "name"}, {"api_name": "main.events.take_presents_to_user", "line_number": 490, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 498, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 500, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 502, "usage_type": "call"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 503, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 504, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 505, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 508, "usage_type": "call"}]}